2 * Kernel support for the ptrace() and syscall tracing interfaces.
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2006 Intel Co
7 * 2006-08-12 - IA64 Native Utrace implementation support added by
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * Derived from the x86 and Alpha versions.
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/smp_lock.h>
19 #include <linux/user.h>
20 #include <linux/security.h>
21 #include <linux/audit.h>
22 #include <linux/signal.h>
23 #include <linux/regset.h>
24 #include <linux/elf.h>
26 #include <asm/pgtable.h>
27 #include <asm/processor.h>
28 #include <asm/ptrace_offsets.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/unwind.h>
34 #include <asm/perfmon.h>
40 * Bits in the PSR that we allow ptrace() to change:
41 * be, up, ac, mfl, mfh (the user mask; five bits total)
42 * db (debug breakpoint fault; one bit)
43 * id (instruction debug fault disable; one bit)
44 * dd (data debug fault disable; one bit)
45 * ri (restart instruction; two bits)
46 * is (instruction set; one bit)
48 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
49 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
51 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
52 #define PFM_MASK MASK(38)
54 #define PTRACE_DEBUG 0
57 # define dprintk(format...) printk(format)
60 # define dprintk(format...)
63 /* Return TRUE if PT was created due to kernel-entry via a system-call. */
66 in_syscall (struct pt_regs
*pt
)
68 return (long) pt
->cr_ifs
>= 0;
72 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
73 * bitset where bit i is set iff the NaT bit of register i is set.
76 ia64_get_scratch_nat_bits (struct pt_regs
*pt
, unsigned long scratch_unat
)
78 # define GET_BITS(first, last, unat) \
80 unsigned long bit = ia64_unat_pos(&pt->r##first); \
81 unsigned long nbits = (last - first + 1); \
82 unsigned long mask = MASK(nbits) << first; \
85 dist = 64 + bit - first; \
88 ia64_rotr(unat, dist) & mask; \
93 * Registers that are stored consecutively in struct pt_regs
94 * can be handled in parallel. If the register order in
95 * struct_pt_regs changes, this code MUST be updated.
97 val
= GET_BITS( 1, 1, scratch_unat
);
98 val
|= GET_BITS( 2, 3, scratch_unat
);
99 val
|= GET_BITS(12, 13, scratch_unat
);
100 val
|= GET_BITS(14, 14, scratch_unat
);
101 val
|= GET_BITS(15, 15, scratch_unat
);
102 val
|= GET_BITS( 8, 11, scratch_unat
);
103 val
|= GET_BITS(16, 31, scratch_unat
);
110 * Set the NaT bits for the scratch registers according to NAT and
111 * return the resulting unat (assuming the scratch registers are
115 ia64_put_scratch_nat_bits (struct pt_regs
*pt
, unsigned long nat
)
117 # define PUT_BITS(first, last, nat) \
119 unsigned long bit = ia64_unat_pos(&pt->r##first); \
120 unsigned long nbits = (last - first + 1); \
121 unsigned long mask = MASK(nbits) << first; \
124 dist = 64 + bit - first; \
126 dist = bit - first; \
127 ia64_rotl(nat & mask, dist); \
129 unsigned long scratch_unat
;
132 * Registers that are stored consecutively in struct pt_regs
133 * can be handled in parallel. If the register order in
134 * struct_pt_regs changes, this code MUST be updated.
136 scratch_unat
= PUT_BITS( 1, 1, nat
);
137 scratch_unat
|= PUT_BITS( 2, 3, nat
);
138 scratch_unat
|= PUT_BITS(12, 13, nat
);
139 scratch_unat
|= PUT_BITS(14, 14, nat
);
140 scratch_unat
|= PUT_BITS(15, 15, nat
);
141 scratch_unat
|= PUT_BITS( 8, 11, nat
);
142 scratch_unat
|= PUT_BITS(16, 31, nat
);
149 #define IA64_MLX_TEMPLATE 0x2
150 #define IA64_MOVL_OPCODE 6
153 ia64_increment_ip (struct pt_regs
*regs
)
155 unsigned long w0
, ri
= ia64_psr(regs
)->ri
+ 1;
160 } else if (ri
== 2) {
161 get_user(w0
, (char __user
*) regs
->cr_iip
+ 0);
162 if (((w0
>> 1) & 0xf) == IA64_MLX_TEMPLATE
) {
164 * rfi'ing to slot 2 of an MLX bundle causes
165 * an illegal operation fault. We don't want
172 ia64_psr(regs
)->ri
= ri
;
176 ia64_decrement_ip (struct pt_regs
*regs
)
178 unsigned long w0
, ri
= ia64_psr(regs
)->ri
- 1;
180 if (ia64_psr(regs
)->ri
== 0) {
183 get_user(w0
, (char __user
*) regs
->cr_iip
+ 0);
184 if (((w0
>> 1) & 0xf) == IA64_MLX_TEMPLATE
) {
186 * rfi'ing to slot 2 of an MLX bundle causes
187 * an illegal operation fault. We don't want
193 ia64_psr(regs
)->ri
= ri
;
197 * This routine is used to read an rnat bits that are stored on the
198 * kernel backing store. Since, in general, the alignment of the user
199 * and kernel are different, this is not completely trivial. In
200 * essence, we need to construct the user RNAT based on up to two
201 * kernel RNAT values and/or the RNAT value saved in the child's
206 * +--------+ <-- lowest address
213 * | slot01 | > child_regs->ar_rnat
215 * | slot02 | / kernel rbs
216 * +--------+ +--------+
217 * <- child_regs->ar_bspstore | slot61 | <-- krbs
218 * +- - - - + +--------+
220 * +- - - - + +--------+
222 * +- - - - + +--------+
224 * +- - - - + +--------+
229 * | slot01 | > child_stack->ar_rnat
233 * <--- child_stack->ar_bspstore
235 * The way to think of this code is as follows: bit 0 in the user rnat
236 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
237 * value. The kernel rnat value holding this bit is stored in
238 * variable rnat0. rnat1 is loaded with the kernel rnat value that
239 * form the upper bits of the user rnat value.
243 * o when reading the rnat "below" the first rnat slot on the kernel
244 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
245 * merged in from pt->ar_rnat.
247 * o when reading the rnat "above" the last rnat slot on the kernel
248 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
251 get_rnat (struct task_struct
*task
, struct switch_stack
*sw
,
252 unsigned long *krbs
, unsigned long *urnat_addr
,
253 unsigned long *urbs_end
)
255 unsigned long rnat0
= 0, rnat1
= 0, urnat
= 0, *slot0_kaddr
;
256 unsigned long umask
= 0, mask
, m
;
257 unsigned long *kbsp
, *ubspstore
, *rnat0_kaddr
, *rnat1_kaddr
, shift
;
258 long num_regs
, nbits
;
261 pt
= task_pt_regs(task
);
262 kbsp
= (unsigned long *) sw
->ar_bspstore
;
263 ubspstore
= (unsigned long *) pt
->ar_bspstore
;
265 if (urbs_end
< urnat_addr
)
266 nbits
= ia64_rse_num_regs(urnat_addr
- 63, urbs_end
);
271 * First, figure out which bit number slot 0 in user-land maps
272 * to in the kernel rnat. Do this by figuring out how many
273 * register slots we're beyond the user's backingstore and
274 * then computing the equivalent address in kernel space.
276 num_regs
= ia64_rse_num_regs(ubspstore
, urnat_addr
+ 1);
277 slot0_kaddr
= ia64_rse_skip_regs(krbs
, num_regs
);
278 shift
= ia64_rse_slot_num(slot0_kaddr
);
279 rnat1_kaddr
= ia64_rse_rnat_addr(slot0_kaddr
);
280 rnat0_kaddr
= rnat1_kaddr
- 64;
282 if (ubspstore
+ 63 > urnat_addr
) {
283 /* some bits need to be merged in from pt->ar_rnat */
284 umask
= MASK(ia64_rse_slot_num(ubspstore
)) & mask
;
285 urnat
= (pt
->ar_rnat
& umask
);
292 if (rnat0_kaddr
>= kbsp
)
294 else if (rnat0_kaddr
> krbs
)
295 rnat0
= *rnat0_kaddr
;
296 urnat
|= (rnat0
& m
) >> shift
;
298 m
= mask
>> (63 - shift
);
299 if (rnat1_kaddr
>= kbsp
)
301 else if (rnat1_kaddr
> krbs
)
302 rnat1
= *rnat1_kaddr
;
303 urnat
|= (rnat1
& m
) << (63 - shift
);
308 * The reverse of get_rnat.
311 put_rnat (struct task_struct
*task
, struct switch_stack
*sw
,
312 unsigned long *krbs
, unsigned long *urnat_addr
, unsigned long urnat
,
313 unsigned long *urbs_end
)
315 unsigned long rnat0
= 0, rnat1
= 0, *slot0_kaddr
, umask
= 0, mask
, m
;
316 unsigned long *kbsp
, *ubspstore
, *rnat0_kaddr
, *rnat1_kaddr
, shift
;
317 long num_regs
, nbits
;
319 unsigned long cfm
, *urbs_kargs
;
321 pt
= task_pt_regs(task
);
322 kbsp
= (unsigned long *) sw
->ar_bspstore
;
323 ubspstore
= (unsigned long *) pt
->ar_bspstore
;
325 urbs_kargs
= urbs_end
;
326 if (in_syscall(pt
)) {
328 * If entered via syscall, don't allow user to set rnat bits
332 urbs_kargs
= ia64_rse_skip_regs(urbs_end
, -(cfm
& 0x7f));
335 if (urbs_kargs
>= urnat_addr
)
338 if ((urnat_addr
- 63) >= urbs_kargs
)
340 nbits
= ia64_rse_num_regs(urnat_addr
- 63, urbs_kargs
);
345 * First, figure out which bit number slot 0 in user-land maps
346 * to in the kernel rnat. Do this by figuring out how many
347 * register slots we're beyond the user's backingstore and
348 * then computing the equivalent address in kernel space.
350 num_regs
= ia64_rse_num_regs(ubspstore
, urnat_addr
+ 1);
351 slot0_kaddr
= ia64_rse_skip_regs(krbs
, num_regs
);
352 shift
= ia64_rse_slot_num(slot0_kaddr
);
353 rnat1_kaddr
= ia64_rse_rnat_addr(slot0_kaddr
);
354 rnat0_kaddr
= rnat1_kaddr
- 64;
356 if (ubspstore
+ 63 > urnat_addr
) {
357 /* some bits need to be place in pt->ar_rnat: */
358 umask
= MASK(ia64_rse_slot_num(ubspstore
)) & mask
;
359 pt
->ar_rnat
= (pt
->ar_rnat
& ~umask
) | (urnat
& umask
);
365 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
366 * rnat slot is ignored. so we don't have to clear it here.
368 rnat0
= (urnat
<< shift
);
370 if (rnat0_kaddr
>= kbsp
)
371 sw
->ar_rnat
= (sw
->ar_rnat
& ~m
) | (rnat0
& m
);
372 else if (rnat0_kaddr
> krbs
)
373 *rnat0_kaddr
= ((*rnat0_kaddr
& ~m
) | (rnat0
& m
));
375 rnat1
= (urnat
>> (63 - shift
));
376 m
= mask
>> (63 - shift
);
377 if (rnat1_kaddr
>= kbsp
)
378 sw
->ar_rnat
= (sw
->ar_rnat
& ~m
) | (rnat1
& m
);
379 else if (rnat1_kaddr
> krbs
)
380 *rnat1_kaddr
= ((*rnat1_kaddr
& ~m
) | (rnat1
& m
));
384 on_kernel_rbs (unsigned long addr
, unsigned long bspstore
,
385 unsigned long urbs_end
)
387 unsigned long *rnat_addr
= ia64_rse_rnat_addr((unsigned long *)
389 return (addr
>= bspstore
&& addr
<= (unsigned long) rnat_addr
);
393 * Read a word from the user-level backing store of task CHILD. ADDR
394 * is the user-level address to read the word from, VAL a pointer to
395 * the return value, and USER_BSP gives the end of the user-level
396 * backing store (i.e., it's the address that would be in ar.bsp after
397 * the user executed a "cover" instruction).
399 * This routine takes care of accessing the kernel register backing
400 * store for those registers that got spilled there. It also takes
401 * care of calculating the appropriate RNaT collection words.
404 ia64_peek (struct task_struct
*child
, struct switch_stack
*child_stack
,
405 unsigned long user_rbs_end
, unsigned long addr
, long *val
)
407 unsigned long *bspstore
, *krbs
, regnum
, *laddr
, *urbs_end
, *rnat_addr
;
408 struct pt_regs
*child_regs
;
412 urbs_end
= (long *) user_rbs_end
;
413 laddr
= (unsigned long *) addr
;
414 child_regs
= task_pt_regs(child
);
415 bspstore
= (unsigned long *) child_regs
->ar_bspstore
;
416 krbs
= (unsigned long *) child
+ IA64_RBS_OFFSET
/8;
417 if (on_kernel_rbs(addr
, (unsigned long) bspstore
,
418 (unsigned long) urbs_end
))
421 * Attempt to read the RBS in an area that's actually
422 * on the kernel RBS => read the corresponding bits in
425 rnat_addr
= ia64_rse_rnat_addr(laddr
);
426 ret
= get_rnat(child
, child_stack
, krbs
, rnat_addr
, urbs_end
);
428 if (laddr
== rnat_addr
) {
429 /* return NaT collection word itself */
434 if (((1UL << ia64_rse_slot_num(laddr
)) & ret
) != 0) {
436 * It is implementation dependent whether the
437 * data portion of a NaT value gets saved on a
438 * st8.spill or RSE spill (e.g., see EAS 2.6,
439 * 4.4.4.6 Register Spill and Fill). To get
440 * consistent behavior across all possible
441 * IA-64 implementations, we return zero in
448 if (laddr
< urbs_end
) {
450 * The desired word is on the kernel RBS and
453 regnum
= ia64_rse_num_regs(bspstore
, laddr
);
454 *val
= *ia64_rse_skip_regs(krbs
, regnum
);
458 copied
= access_process_vm(child
, addr
, &ret
, sizeof(ret
), 0);
459 if (copied
!= sizeof(ret
))
466 ia64_poke (struct task_struct
*child
, struct switch_stack
*child_stack
,
467 unsigned long user_rbs_end
, unsigned long addr
, long val
)
469 unsigned long *bspstore
, *krbs
, regnum
, *laddr
;
470 unsigned long *urbs_end
= (long *) user_rbs_end
;
471 struct pt_regs
*child_regs
;
473 laddr
= (unsigned long *) addr
;
474 child_regs
= task_pt_regs(child
);
475 bspstore
= (unsigned long *) child_regs
->ar_bspstore
;
476 krbs
= (unsigned long *) child
+ IA64_RBS_OFFSET
/8;
477 if (on_kernel_rbs(addr
, (unsigned long) bspstore
,
478 (unsigned long) urbs_end
))
481 * Attempt to write the RBS in an area that's actually
482 * on the kernel RBS => write the corresponding bits
485 if (ia64_rse_is_rnat_slot(laddr
))
486 put_rnat(child
, child_stack
, krbs
, laddr
, val
,
489 if (laddr
< urbs_end
) {
490 regnum
= ia64_rse_num_regs(bspstore
, laddr
);
491 *ia64_rse_skip_regs(krbs
, regnum
) = val
;
494 } else if (access_process_vm(child
, addr
, &val
, sizeof(val
), 1)
501 * Calculate the address of the end of the user-level register backing
502 * store. This is the address that would have been stored in ar.bsp
503 * if the user had executed a "cover" instruction right before
504 * entering the kernel. If CFMP is not NULL, it is used to return the
505 * "current frame mask" that was active at the time the kernel was
509 ia64_get_user_rbs_end (struct task_struct
*child
, struct pt_regs
*pt
,
512 unsigned long *krbs
, *bspstore
, cfm
= pt
->cr_ifs
;
515 krbs
= (unsigned long *) child
+ IA64_RBS_OFFSET
/8;
516 bspstore
= (unsigned long *) pt
->ar_bspstore
;
517 ndirty
= ia64_rse_num_regs(krbs
, krbs
+ (pt
->loadrs
>> 19));
520 ndirty
+= (cfm
& 0x7f);
522 cfm
&= ~(1UL << 63); /* clear valid bit */
526 return (unsigned long) ia64_rse_skip_regs(bspstore
, ndirty
);
530 * Synchronize (i.e, write) the RSE backing store living in kernel
531 * space to the VM of the CHILD task. SW and PT are the pointers to
532 * the switch_stack and pt_regs structures, respectively.
533 * USER_RBS_END is the user-level address at which the backing store
537 ia64_sync_user_rbs (struct task_struct
*child
, struct switch_stack
*sw
,
538 unsigned long user_rbs_start
, unsigned long user_rbs_end
)
540 unsigned long addr
, val
;
543 /* now copy word for word from kernel rbs to user rbs: */
544 for (addr
= user_rbs_start
; addr
< user_rbs_end
; addr
+= 8) {
545 ret
= ia64_peek(child
, sw
, user_rbs_end
, addr
, &val
);
548 if (access_process_vm(child
, addr
, &val
, sizeof(val
), 1)
556 ia64_sync_kernel_rbs (struct task_struct
*child
, struct switch_stack
*sw
,
557 unsigned long user_rbs_start
, unsigned long user_rbs_end
)
559 unsigned long addr
, val
;
562 /* now copy word for word from user rbs to kernel rbs: */
563 for (addr
= user_rbs_start
; addr
< user_rbs_end
; addr
+= 8) {
564 if (access_process_vm(child
, addr
, &val
, sizeof(val
), 0)
568 ret
= ia64_poke(child
, sw
, user_rbs_end
, addr
, val
);
575 typedef long (*syncfunc_t
)(struct task_struct
*, struct switch_stack
*,
576 unsigned long, unsigned long);
578 static void do_sync_rbs(struct unw_frame_info
*info
, void *arg
)
581 unsigned long urbs_end
;
584 if (unw_unwind_to_user(info
) < 0)
586 pt
= task_pt_regs(info
->task
);
587 urbs_end
= ia64_get_user_rbs_end(info
->task
, pt
, NULL
);
589 fn(info
->task
, info
->sw
, pt
->ar_bspstore
, urbs_end
);
593 * when a thread is stopped (ptraced), debugger might change thread's user
594 * stack (change memory directly), and we must avoid the RSE stored in kernel
595 * to override user stack (user space's RSE is newer than kernel's in the
596 * case). To workaround the issue, we copy kernel RSE to user RSE before the
597 * task is stopped, so user RSE has updated data. we then copy user RSE to
598 * kernel after the task is resummed from traced stop and kernel will use the
599 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
600 * synchronize user RSE to kernel.
602 void ia64_ptrace_stop(void)
604 if (test_and_set_tsk_thread_flag(current
, TIF_RESTORE_RSE
))
606 tsk_set_notify_resume(current
);
607 unw_init_running(do_sync_rbs
, ia64_sync_user_rbs
);
611 * This is called to read back the register backing store.
613 void ia64_sync_krbs(void)
615 clear_tsk_thread_flag(current
, TIF_RESTORE_RSE
);
616 tsk_clear_notify_resume(current
);
618 unw_init_running(do_sync_rbs
, ia64_sync_kernel_rbs
);
622 * After PTRACE_ATTACH, a thread's register backing store area in user
623 * space is assumed to contain correct data whenever the thread is
624 * stopped. arch_ptrace_stop takes care of this on tracing stops.
625 * But if the child was already stopped for job control when we attach
626 * to it, then it might not ever get into ptrace_stop by the time we
627 * want to examine the user memory containing the RBS.
630 ptrace_attach_sync_user_rbs (struct task_struct
*child
)
633 struct unw_frame_info info
;
636 * If the child is in TASK_STOPPED, we need to change that to
637 * TASK_TRACED momentarily while we operate on it. This ensures
638 * that the child won't be woken up and return to user mode while
639 * we are doing the sync. (It can only be woken up for SIGKILL.)
642 read_lock(&tasklist_lock
);
644 spin_lock_irq(&child
->sighand
->siglock
);
645 if (child
->state
== TASK_STOPPED
&&
646 !test_and_set_tsk_thread_flag(child
, TIF_RESTORE_RSE
)) {
647 tsk_set_notify_resume(child
);
649 child
->state
= TASK_TRACED
;
652 spin_unlock_irq(&child
->sighand
->siglock
);
654 read_unlock(&tasklist_lock
);
659 unw_init_from_blocked_task(&info
, child
);
660 do_sync_rbs(&info
, ia64_sync_user_rbs
);
663 * Now move the child back into TASK_STOPPED if it should be in a
664 * job control stop, so that SIGCONT can be used to wake it up.
666 read_lock(&tasklist_lock
);
668 spin_lock_irq(&child
->sighand
->siglock
);
669 if (child
->state
== TASK_TRACED
&&
670 (child
->signal
->flags
& SIGNAL_STOP_STOPPED
)) {
671 child
->state
= TASK_STOPPED
;
673 spin_unlock_irq(&child
->sighand
->siglock
);
675 read_unlock(&tasklist_lock
);
679 thread_matches (struct task_struct
*thread
, unsigned long addr
)
681 unsigned long thread_rbs_end
;
682 struct pt_regs
*thread_regs
;
684 if (ptrace_check_attach(thread
, 0) < 0)
686 * If the thread is not in an attachable state, we'll
687 * ignore it. The net effect is that if ADDR happens
688 * to overlap with the portion of the thread's
689 * register backing store that is currently residing
690 * on the thread's kernel stack, then ptrace() may end
691 * up accessing a stale value. But if the thread
692 * isn't stopped, that's a problem anyhow, so we're
693 * doing as well as we can...
697 thread_regs
= task_pt_regs(thread
);
698 thread_rbs_end
= ia64_get_user_rbs_end(thread
, thread_regs
, NULL
);
699 if (!on_kernel_rbs(addr
, thread_regs
->ar_bspstore
, thread_rbs_end
))
702 return 1; /* looks like we've got a winner */
706 * Write f32-f127 back to task->thread.fph if it has been modified.
709 ia64_flush_fph (struct task_struct
*task
)
711 struct ia64_psr
*psr
= ia64_psr(task_pt_regs(task
));
714 * Prevent migrating this task while
715 * we're fiddling with the FPU state
718 if (ia64_is_local_fpu_owner(task
) && psr
->mfh
) {
720 task
->thread
.flags
|= IA64_THREAD_FPH_VALID
;
721 ia64_save_fpu(&task
->thread
.fph
[0]);
727 * Sync the fph state of the task so that it can be manipulated
728 * through thread.fph. If necessary, f32-f127 are written back to
729 * thread.fph or, if the fph state hasn't been used before, thread.fph
730 * is cleared to zeroes. Also, access to f32-f127 is disabled to
731 * ensure that the task picks up the state from thread.fph when it
735 ia64_sync_fph (struct task_struct
*task
)
737 struct ia64_psr
*psr
= ia64_psr(task_pt_regs(task
));
739 ia64_flush_fph(task
);
740 if (!(task
->thread
.flags
& IA64_THREAD_FPH_VALID
)) {
741 task
->thread
.flags
|= IA64_THREAD_FPH_VALID
;
742 memset(&task
->thread
.fph
, 0, sizeof(task
->thread
.fph
));
749 * Change the machine-state of CHILD such that it will return via the normal
750 * kernel exit-path, rather than the syscall-exit path.
753 convert_to_non_syscall (struct task_struct
*child
, struct pt_regs
*pt
,
756 struct unw_frame_info info
, prev_info
;
757 unsigned long ip
, sp
, pr
;
759 unw_init_from_blocked_task(&info
, child
);
762 if (unw_unwind(&info
) < 0)
765 unw_get_sp(&info
, &sp
);
766 if ((long)((unsigned long)child
+ IA64_STK_OFFSET
- sp
)
767 < IA64_PT_REGS_SIZE
) {
768 dprintk("ptrace.%s: ran off the top of the kernel "
769 "stack\n", __func__
);
772 if (unw_get_pr (&prev_info
, &pr
) < 0) {
773 unw_get_rp(&prev_info
, &ip
);
774 dprintk("ptrace.%s: failed to read "
775 "predicate register (ip=0x%lx)\n",
779 if (unw_is_intr_frame(&info
)
780 && (pr
& (1UL << PRED_USER_STACK
)))
785 * Note: at the time of this call, the target task is blocked
786 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
787 * (aka, "pLvSys") we redirect execution from
788 * .work_pending_syscall_end to .work_processed_kernel.
790 unw_get_pr(&prev_info
, &pr
);
791 pr
&= ~((1UL << PRED_SYSCALL
) | (1UL << PRED_LEAVE_SYSCALL
));
792 pr
|= (1UL << PRED_NON_SYSCALL
);
793 unw_set_pr(&prev_info
, pr
);
795 pt
->cr_ifs
= (1UL << 63) | cfm
;
797 * Clear the memory that is NOT written on syscall-entry to
798 * ensure we do not leak kernel-state to user when execution
804 memset(&pt
->r16
, 0, 16*8); /* clear r16-r31 */
805 memset(&pt
->f6
, 0, 6*16); /* clear f6-f11 */
813 access_nat_bits (struct task_struct
*child
, struct pt_regs
*pt
,
814 struct unw_frame_info
*info
,
815 unsigned long *data
, int write_access
)
817 unsigned long regnum
, nat_bits
, scratch_unat
, dummy
= 0;
822 scratch_unat
= ia64_put_scratch_nat_bits(pt
, nat_bits
);
823 if (unw_set_ar(info
, UNW_AR_UNAT
, scratch_unat
) < 0) {
824 dprintk("ptrace: failed to set ar.unat\n");
827 for (regnum
= 4; regnum
<= 7; ++regnum
) {
828 unw_get_gr(info
, regnum
, &dummy
, &nat
);
829 unw_set_gr(info
, regnum
, dummy
,
830 (nat_bits
>> regnum
) & 1);
833 if (unw_get_ar(info
, UNW_AR_UNAT
, &scratch_unat
) < 0) {
834 dprintk("ptrace: failed to read ar.unat\n");
837 nat_bits
= ia64_get_scratch_nat_bits(pt
, scratch_unat
);
838 for (regnum
= 4; regnum
<= 7; ++regnum
) {
839 unw_get_gr(info
, regnum
, &dummy
, &nat
);
840 nat_bits
|= (nat
!= 0) << regnum
;
848 access_uarea (struct task_struct
*child
, unsigned long addr
,
849 unsigned long *data
, int write_access
);
852 ptrace_getregs (struct task_struct
*child
, struct pt_all_user_regs __user
*ppr
)
854 unsigned long psr
, ec
, lc
, rnat
, bsp
, cfm
, nat_bits
, val
;
855 struct unw_frame_info info
;
856 struct ia64_fpreg fpval
;
857 struct switch_stack
*sw
;
859 long ret
, retval
= 0;
863 if (!access_ok(VERIFY_WRITE
, ppr
, sizeof(struct pt_all_user_regs
)))
866 pt
= task_pt_regs(child
);
867 sw
= (struct switch_stack
*) (child
->thread
.ksp
+ 16);
868 unw_init_from_blocked_task(&info
, child
);
869 if (unw_unwind_to_user(&info
) < 0) {
873 if (((unsigned long) ppr
& 0x7) != 0) {
874 dprintk("ptrace:unaligned register address %p\n", ppr
);
878 if (access_uarea(child
, PT_CR_IPSR
, &psr
, 0) < 0
879 || access_uarea(child
, PT_AR_EC
, &ec
, 0) < 0
880 || access_uarea(child
, PT_AR_LC
, &lc
, 0) < 0
881 || access_uarea(child
, PT_AR_RNAT
, &rnat
, 0) < 0
882 || access_uarea(child
, PT_AR_BSP
, &bsp
, 0) < 0
883 || access_uarea(child
, PT_CFM
, &cfm
, 0)
884 || access_uarea(child
, PT_NAT_BITS
, &nat_bits
, 0))
889 retval
|= __put_user(pt
->cr_iip
, &ppr
->cr_iip
);
890 retval
|= __put_user(psr
, &ppr
->cr_ipsr
);
894 retval
|= __put_user(pt
->ar_pfs
, &ppr
->ar
[PT_AUR_PFS
]);
895 retval
|= __put_user(pt
->ar_rsc
, &ppr
->ar
[PT_AUR_RSC
]);
896 retval
|= __put_user(pt
->ar_bspstore
, &ppr
->ar
[PT_AUR_BSPSTORE
]);
897 retval
|= __put_user(pt
->ar_unat
, &ppr
->ar
[PT_AUR_UNAT
]);
898 retval
|= __put_user(pt
->ar_ccv
, &ppr
->ar
[PT_AUR_CCV
]);
899 retval
|= __put_user(pt
->ar_fpsr
, &ppr
->ar
[PT_AUR_FPSR
]);
901 retval
|= __put_user(ec
, &ppr
->ar
[PT_AUR_EC
]);
902 retval
|= __put_user(lc
, &ppr
->ar
[PT_AUR_LC
]);
903 retval
|= __put_user(rnat
, &ppr
->ar
[PT_AUR_RNAT
]);
904 retval
|= __put_user(bsp
, &ppr
->ar
[PT_AUR_BSP
]);
905 retval
|= __put_user(cfm
, &ppr
->cfm
);
909 retval
|= __copy_to_user(&ppr
->gr
[1], &pt
->r1
, sizeof(long));
910 retval
|= __copy_to_user(&ppr
->gr
[2], &pt
->r2
, sizeof(long) *2);
914 for (i
= 4; i
< 8; i
++) {
915 if (unw_access_gr(&info
, i
, &val
, &nat
, 0) < 0)
917 retval
|= __put_user(val
, &ppr
->gr
[i
]);
922 retval
|= __copy_to_user(&ppr
->gr
[8], &pt
->r8
, sizeof(long) * 4);
926 retval
|= __copy_to_user(&ppr
->gr
[12], &pt
->r12
, sizeof(long) * 2);
927 retval
|= __copy_to_user(&ppr
->gr
[14], &pt
->r14
, sizeof(long));
928 retval
|= __copy_to_user(&ppr
->gr
[15], &pt
->r15
, sizeof(long));
932 retval
|= __copy_to_user(&ppr
->gr
[16], &pt
->r16
, sizeof(long) * 16);
936 retval
|= __put_user(pt
->b0
, &ppr
->br
[0]);
940 for (i
= 1; i
< 6; i
++) {
941 if (unw_access_br(&info
, i
, &val
, 0) < 0)
943 __put_user(val
, &ppr
->br
[i
]);
948 retval
|= __put_user(pt
->b6
, &ppr
->br
[6]);
949 retval
|= __put_user(pt
->b7
, &ppr
->br
[7]);
953 for (i
= 2; i
< 6; i
++) {
954 if (unw_get_fr(&info
, i
, &fpval
) < 0)
956 retval
|= __copy_to_user(&ppr
->fr
[i
], &fpval
, sizeof (fpval
));
961 retval
|= __copy_to_user(&ppr
->fr
[6], &pt
->f6
,
962 sizeof(struct ia64_fpreg
) * 6);
964 /* fp scratch regs(12-15) */
966 retval
|= __copy_to_user(&ppr
->fr
[12], &sw
->f12
,
967 sizeof(struct ia64_fpreg
) * 4);
971 for (i
= 16; i
< 32; i
++) {
972 if (unw_get_fr(&info
, i
, &fpval
) < 0)
974 retval
|= __copy_to_user(&ppr
->fr
[i
], &fpval
, sizeof (fpval
));
979 ia64_flush_fph(child
);
980 retval
|= __copy_to_user(&ppr
->fr
[32], &child
->thread
.fph
,
981 sizeof(ppr
->fr
[32]) * 96);
985 retval
|= __put_user(pt
->pr
, &ppr
->pr
);
989 retval
|= __put_user(nat_bits
, &ppr
->nat
);
991 ret
= retval
? -EIO
: 0;
996 ptrace_setregs (struct task_struct
*child
, struct pt_all_user_regs __user
*ppr
)
998 unsigned long psr
, rsc
, ec
, lc
, rnat
, bsp
, cfm
, nat_bits
, val
= 0;
999 struct unw_frame_info info
;
1000 struct switch_stack
*sw
;
1001 struct ia64_fpreg fpval
;
1003 long ret
, retval
= 0;
1006 memset(&fpval
, 0, sizeof(fpval
));
1008 if (!access_ok(VERIFY_READ
, ppr
, sizeof(struct pt_all_user_regs
)))
1011 pt
= task_pt_regs(child
);
1012 sw
= (struct switch_stack
*) (child
->thread
.ksp
+ 16);
1013 unw_init_from_blocked_task(&info
, child
);
1014 if (unw_unwind_to_user(&info
) < 0) {
1018 if (((unsigned long) ppr
& 0x7) != 0) {
1019 dprintk("ptrace:unaligned register address %p\n", ppr
);
1025 retval
|= __get_user(pt
->cr_iip
, &ppr
->cr_iip
);
1026 retval
|= __get_user(psr
, &ppr
->cr_ipsr
);
1030 retval
|= __get_user(pt
->ar_pfs
, &ppr
->ar
[PT_AUR_PFS
]);
1031 retval
|= __get_user(rsc
, &ppr
->ar
[PT_AUR_RSC
]);
1032 retval
|= __get_user(pt
->ar_bspstore
, &ppr
->ar
[PT_AUR_BSPSTORE
]);
1033 retval
|= __get_user(pt
->ar_unat
, &ppr
->ar
[PT_AUR_UNAT
]);
1034 retval
|= __get_user(pt
->ar_ccv
, &ppr
->ar
[PT_AUR_CCV
]);
1035 retval
|= __get_user(pt
->ar_fpsr
, &ppr
->ar
[PT_AUR_FPSR
]);
1037 retval
|= __get_user(ec
, &ppr
->ar
[PT_AUR_EC
]);
1038 retval
|= __get_user(lc
, &ppr
->ar
[PT_AUR_LC
]);
1039 retval
|= __get_user(rnat
, &ppr
->ar
[PT_AUR_RNAT
]);
1040 retval
|= __get_user(bsp
, &ppr
->ar
[PT_AUR_BSP
]);
1041 retval
|= __get_user(cfm
, &ppr
->cfm
);
1045 retval
|= __copy_from_user(&pt
->r1
, &ppr
->gr
[1], sizeof(long));
1046 retval
|= __copy_from_user(&pt
->r2
, &ppr
->gr
[2], sizeof(long) * 2);
1050 for (i
= 4; i
< 8; i
++) {
1051 retval
|= __get_user(val
, &ppr
->gr
[i
]);
1052 /* NaT bit will be set via PT_NAT_BITS: */
1053 if (unw_set_gr(&info
, i
, val
, 0) < 0)
1059 retval
|= __copy_from_user(&pt
->r8
, &ppr
->gr
[8], sizeof(long) * 4);
1063 retval
|= __copy_from_user(&pt
->r12
, &ppr
->gr
[12], sizeof(long) * 2);
1064 retval
|= __copy_from_user(&pt
->r14
, &ppr
->gr
[14], sizeof(long));
1065 retval
|= __copy_from_user(&pt
->r15
, &ppr
->gr
[15], sizeof(long));
1069 retval
|= __copy_from_user(&pt
->r16
, &ppr
->gr
[16], sizeof(long) * 16);
1073 retval
|= __get_user(pt
->b0
, &ppr
->br
[0]);
1077 for (i
= 1; i
< 6; i
++) {
1078 retval
|= __get_user(val
, &ppr
->br
[i
]);
1079 unw_set_br(&info
, i
, val
);
1084 retval
|= __get_user(pt
->b6
, &ppr
->br
[6]);
1085 retval
|= __get_user(pt
->b7
, &ppr
->br
[7]);
1089 for (i
= 2; i
< 6; i
++) {
1090 retval
|= __copy_from_user(&fpval
, &ppr
->fr
[i
], sizeof(fpval
));
1091 if (unw_set_fr(&info
, i
, fpval
) < 0)
1097 retval
|= __copy_from_user(&pt
->f6
, &ppr
->fr
[6],
1098 sizeof(ppr
->fr
[6]) * 6);
1100 /* fp scratch regs(12-15) */
1102 retval
|= __copy_from_user(&sw
->f12
, &ppr
->fr
[12],
1103 sizeof(ppr
->fr
[12]) * 4);
1107 for (i
= 16; i
< 32; i
++) {
1108 retval
|= __copy_from_user(&fpval
, &ppr
->fr
[i
],
1110 if (unw_set_fr(&info
, i
, fpval
) < 0)
1116 ia64_sync_fph(child
);
1117 retval
|= __copy_from_user(&child
->thread
.fph
, &ppr
->fr
[32],
1118 sizeof(ppr
->fr
[32]) * 96);
1122 retval
|= __get_user(pt
->pr
, &ppr
->pr
);
1126 retval
|= __get_user(nat_bits
, &ppr
->nat
);
1128 retval
|= access_uarea(child
, PT_CR_IPSR
, &psr
, 1);
1129 retval
|= access_uarea(child
, PT_AR_RSC
, &rsc
, 1);
1130 retval
|= access_uarea(child
, PT_AR_EC
, &ec
, 1);
1131 retval
|= access_uarea(child
, PT_AR_LC
, &lc
, 1);
1132 retval
|= access_uarea(child
, PT_AR_RNAT
, &rnat
, 1);
1133 retval
|= access_uarea(child
, PT_AR_BSP
, &bsp
, 1);
1134 retval
|= access_uarea(child
, PT_CFM
, &cfm
, 1);
1135 retval
|= access_uarea(child
, PT_NAT_BITS
, &nat_bits
, 1);
1137 ret
= retval
? -EIO
: 0;
1142 user_enable_single_step (struct task_struct
*child
)
1144 struct ia64_psr
*child_psr
= ia64_psr(task_pt_regs(child
));
1146 set_tsk_thread_flag(child
, TIF_SINGLESTEP
);
1151 user_enable_block_step (struct task_struct
*child
)
1153 struct ia64_psr
*child_psr
= ia64_psr(task_pt_regs(child
));
1155 set_tsk_thread_flag(child
, TIF_SINGLESTEP
);
1160 user_disable_single_step (struct task_struct
*child
)
1162 struct ia64_psr
*child_psr
= ia64_psr(task_pt_regs(child
));
1164 /* make sure the single step/taken-branch trap bits are not set: */
1165 clear_tsk_thread_flag(child
, TIF_SINGLESTEP
);
1171 * Called by kernel/ptrace.c when detaching..
1173 * Make sure the single step bit is not set.
1176 ptrace_disable (struct task_struct
*child
)
1178 user_disable_single_step(child
);
1182 arch_ptrace (struct task_struct
*child
, long request
, long addr
, long data
)
1185 case PTRACE_PEEKTEXT
:
1186 case PTRACE_PEEKDATA
:
1187 /* read word at location addr */
1188 if (access_process_vm(child
, addr
, &data
, sizeof(data
), 0)
1191 /* ensure return value is not mistaken for error code */
1192 force_successful_syscall_return();
1195 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1196 * by the generic ptrace_request().
1199 case PTRACE_PEEKUSR
:
1200 /* read the word at addr in the USER area */
1201 if (access_uarea(child
, addr
, &data
, 0) < 0)
1203 /* ensure return value is not mistaken for error code */
1204 force_successful_syscall_return();
1207 case PTRACE_POKEUSR
:
1208 /* write the word at addr in the USER area */
1209 if (access_uarea(child
, addr
, &data
, 1) < 0)
1213 case PTRACE_OLD_GETSIGINFO
:
1214 /* for backwards-compatibility */
1215 return ptrace_request(child
, PTRACE_GETSIGINFO
, addr
, data
);
1217 case PTRACE_OLD_SETSIGINFO
:
1218 /* for backwards-compatibility */
1219 return ptrace_request(child
, PTRACE_SETSIGINFO
, addr
, data
);
1221 case PTRACE_GETREGS
:
1222 return ptrace_getregs(child
,
1223 (struct pt_all_user_regs __user
*) data
);
1225 case PTRACE_SETREGS
:
1226 return ptrace_setregs(child
,
1227 (struct pt_all_user_regs __user
*) data
);
1230 return ptrace_request(child
, request
, addr
, data
);
1236 syscall_trace (void)
1239 * The 0x80 provides a way for the tracing parent to
1240 * distinguish between a syscall stop and SIGTRAP delivery.
1242 ptrace_notify(SIGTRAP
1243 | ((current
->ptrace
& PT_TRACESYSGOOD
) ? 0x80 : 0));
1246 * This isn't the same as continuing with a signal, but it
1247 * will do for normal use. strace only continues with a
1248 * signal if the stopping signal is not SIGTRAP. -brl
1250 if (current
->exit_code
) {
1251 send_sig(current
->exit_code
, current
, 1);
1252 current
->exit_code
= 0;
1256 /* "asmlinkage" so the input arguments are preserved... */
1259 syscall_trace_enter (long arg0
, long arg1
, long arg2
, long arg3
,
1260 long arg4
, long arg5
, long arg6
, long arg7
,
1261 struct pt_regs regs
)
1263 if (test_thread_flag(TIF_SYSCALL_TRACE
)
1264 && (current
->ptrace
& PT_PTRACED
))
1267 /* copy user rbs to kernel rbs */
1268 if (test_thread_flag(TIF_RESTORE_RSE
))
1271 if (unlikely(current
->audit_context
)) {
1275 if (IS_IA32_PROCESS(®s
)) {
1277 arch
= AUDIT_ARCH_I386
;
1280 arch
= AUDIT_ARCH_IA64
;
1283 audit_syscall_entry(arch
, syscall
, arg0
, arg1
, arg2
, arg3
);
1288 /* "asmlinkage" so the input arguments are preserved... */
1291 syscall_trace_leave (long arg0
, long arg1
, long arg2
, long arg3
,
1292 long arg4
, long arg5
, long arg6
, long arg7
,
1293 struct pt_regs regs
)
1295 if (unlikely(current
->audit_context
)) {
1296 int success
= AUDITSC_RESULT(regs
.r10
);
1297 long result
= regs
.r8
;
1299 if (success
!= AUDITSC_SUCCESS
)
1301 audit_syscall_exit(success
, result
);
1304 if ((test_thread_flag(TIF_SYSCALL_TRACE
)
1305 || test_thread_flag(TIF_SINGLESTEP
))
1306 && (current
->ptrace
& PT_PTRACED
))
1309 /* copy user rbs to kernel rbs */
1310 if (test_thread_flag(TIF_RESTORE_RSE
))
1314 /* Utrace implementation starts here */
1322 const void __user
*ubuf
;
1325 struct regset_getset
{
1326 struct task_struct
*target
;
1327 const struct user_regset
*regset
;
1329 struct regset_get get
;
1330 struct regset_set set
;
1338 access_elf_gpreg(struct task_struct
*target
, struct unw_frame_info
*info
,
1339 unsigned long addr
, unsigned long *data
, int write_access
)
1342 unsigned long *ptr
= NULL
;
1346 pt
= task_pt_regs(target
);
1348 case ELF_GR_OFFSET(1):
1351 case ELF_GR_OFFSET(2):
1352 case ELF_GR_OFFSET(3):
1353 ptr
= (void *)&pt
->r2
+ (addr
- ELF_GR_OFFSET(2));
1355 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1357 /* read NaT bit first: */
1358 unsigned long dummy
;
1360 ret
= unw_get_gr(info
, addr
/8, &dummy
, &nat
);
1364 return unw_access_gr(info
, addr
/8, data
, &nat
, write_access
);
1365 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1366 ptr
= (void *)&pt
->r8
+ addr
- ELF_GR_OFFSET(8);
1368 case ELF_GR_OFFSET(12):
1369 case ELF_GR_OFFSET(13):
1370 ptr
= (void *)&pt
->r12
+ addr
- ELF_GR_OFFSET(12);
1372 case ELF_GR_OFFSET(14):
1375 case ELF_GR_OFFSET(15):
1386 access_elf_breg(struct task_struct
*target
, struct unw_frame_info
*info
,
1387 unsigned long addr
, unsigned long *data
, int write_access
)
1390 unsigned long *ptr
= NULL
;
1392 pt
= task_pt_regs(target
);
1394 case ELF_BR_OFFSET(0):
1397 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1398 return unw_access_br(info
, (addr
- ELF_BR_OFFSET(0))/8,
1399 data
, write_access
);
1400 case ELF_BR_OFFSET(6):
1403 case ELF_BR_OFFSET(7):
1414 access_elf_areg(struct task_struct
*target
, struct unw_frame_info
*info
,
1415 unsigned long addr
, unsigned long *data
, int write_access
)
1418 unsigned long cfm
, urbs_end
;
1419 unsigned long *ptr
= NULL
;
1421 pt
= task_pt_regs(target
);
1422 if (addr
>= ELF_AR_RSC_OFFSET
&& addr
<= ELF_AR_SSD_OFFSET
) {
1424 case ELF_AR_RSC_OFFSET
:
1427 pt
->ar_rsc
= *data
| (3 << 2);
1431 case ELF_AR_BSP_OFFSET
:
1433 * By convention, we use PT_AR_BSP to refer to
1434 * the end of the user-level backing store.
1435 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1436 * to get the real value of ar.bsp at the time
1437 * the kernel was entered.
1439 * Furthermore, when changing the contents of
1440 * PT_AR_BSP (or PT_CFM) while the task is
1441 * blocked in a system call, convert the state
1442 * so that the non-system-call exit
1443 * path is used. This ensures that the proper
1444 * state will be picked up when resuming
1445 * execution. However, it *also* means that
1446 * once we write PT_AR_BSP/PT_CFM, it won't be
1447 * possible to modify the syscall arguments of
1448 * the pending system call any longer. This
1449 * shouldn't be an issue because modifying
1450 * PT_AR_BSP/PT_CFM generally implies that
1451 * we're either abandoning the pending system
1452 * call or that we defer it's re-execution
1453 * (e.g., due to GDB doing an inferior
1456 urbs_end
= ia64_get_user_rbs_end(target
, pt
, &cfm
);
1458 if (*data
!= urbs_end
) {
1460 convert_to_non_syscall(target
,
1464 * Simulate user-level write
1468 pt
->ar_bspstore
= *data
;
1473 case ELF_AR_BSPSTORE_OFFSET
:
1474 ptr
= &pt
->ar_bspstore
;
1476 case ELF_AR_RNAT_OFFSET
:
1479 case ELF_AR_CCV_OFFSET
:
1482 case ELF_AR_UNAT_OFFSET
:
1485 case ELF_AR_FPSR_OFFSET
:
1488 case ELF_AR_PFS_OFFSET
:
1491 case ELF_AR_LC_OFFSET
:
1492 return unw_access_ar(info
, UNW_AR_LC
, data
,
1494 case ELF_AR_EC_OFFSET
:
1495 return unw_access_ar(info
, UNW_AR_EC
, data
,
1497 case ELF_AR_CSD_OFFSET
:
1500 case ELF_AR_SSD_OFFSET
:
1503 } else if (addr
>= ELF_CR_IIP_OFFSET
&& addr
<= ELF_CR_IPSR_OFFSET
) {
1505 case ELF_CR_IIP_OFFSET
:
1508 case ELF_CFM_OFFSET
:
1509 urbs_end
= ia64_get_user_rbs_end(target
, pt
, &cfm
);
1511 if (((cfm
^ *data
) & PFM_MASK
) != 0) {
1513 convert_to_non_syscall(target
,
1516 pt
->cr_ifs
= ((pt
->cr_ifs
& ~PFM_MASK
)
1517 | (*data
& PFM_MASK
));
1522 case ELF_CR_IPSR_OFFSET
:
1524 unsigned long tmp
= *data
;
1525 /* psr.ri==3 is a reserved value: SDM 2:25 */
1526 if ((tmp
& IA64_PSR_RI
) == IA64_PSR_RI
)
1527 tmp
&= ~IA64_PSR_RI
;
1528 pt
->cr_ipsr
= ((tmp
& IPSR_MASK
)
1529 | (pt
->cr_ipsr
& ~IPSR_MASK
));
1531 *data
= (pt
->cr_ipsr
& IPSR_MASK
);
1534 } else if (addr
== ELF_NAT_OFFSET
)
1535 return access_nat_bits(target
, pt
, info
,
1536 data
, write_access
);
1537 else if (addr
== ELF_PR_OFFSET
)
1551 access_elf_reg(struct task_struct
*target
, struct unw_frame_info
*info
,
1552 unsigned long addr
, unsigned long *data
, int write_access
)
1554 if (addr
>= ELF_GR_OFFSET(1) && addr
<= ELF_GR_OFFSET(15))
1555 return access_elf_gpreg(target
, info
, addr
, data
, write_access
);
1556 else if (addr
>= ELF_BR_OFFSET(0) && addr
<= ELF_BR_OFFSET(7))
1557 return access_elf_breg(target
, info
, addr
, data
, write_access
);
1559 return access_elf_areg(target
, info
, addr
, data
, write_access
);
1562 void do_gpregs_get(struct unw_frame_info
*info
, void *arg
)
1565 struct regset_getset
*dst
= arg
;
1567 unsigned int i
, index
, min_copy
;
1569 if (unw_unwind_to_user(info
) < 0)
1575 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1576 * predicate registers (p0-p63)
1579 * ar.rsc ar.bsp ar.bspstore ar.rnat
1580 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1585 if (dst
->count
> 0 && dst
->pos
< ELF_GR_OFFSET(1)) {
1586 dst
->ret
= user_regset_copyout_zero(&dst
->pos
, &dst
->count
,
1589 0, ELF_GR_OFFSET(1));
1590 if (dst
->ret
|| dst
->count
== 0)
1595 if (dst
->count
> 0 && dst
->pos
< ELF_GR_OFFSET(16)) {
1596 index
= (dst
->pos
- ELF_GR_OFFSET(1)) / sizeof(elf_greg_t
);
1597 min_copy
= ELF_GR_OFFSET(16) > (dst
->pos
+ dst
->count
) ?
1598 (dst
->pos
+ dst
->count
) : ELF_GR_OFFSET(16);
1599 for (i
= dst
->pos
; i
< min_copy
; i
+= sizeof(elf_greg_t
),
1601 if (access_elf_reg(dst
->target
, info
, i
,
1602 &tmp
[index
], 0) < 0) {
1606 dst
->ret
= user_regset_copyout(&dst
->pos
, &dst
->count
,
1607 &dst
->u
.get
.kbuf
, &dst
->u
.get
.ubuf
, tmp
,
1608 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1609 if (dst
->ret
|| dst
->count
== 0)
1614 if (dst
->count
> 0 && dst
->pos
< ELF_NAT_OFFSET
) {
1615 pt
= task_pt_regs(dst
->target
);
1616 dst
->ret
= user_regset_copyout(&dst
->pos
, &dst
->count
,
1617 &dst
->u
.get
.kbuf
, &dst
->u
.get
.ubuf
, &pt
->r16
,
1618 ELF_GR_OFFSET(16), ELF_NAT_OFFSET
);
1619 if (dst
->ret
|| dst
->count
== 0)
1623 /* nat, pr, b0 - b7 */
1624 if (dst
->count
> 0 && dst
->pos
< ELF_CR_IIP_OFFSET
) {
1625 index
= (dst
->pos
- ELF_NAT_OFFSET
) / sizeof(elf_greg_t
);
1626 min_copy
= ELF_CR_IIP_OFFSET
> (dst
->pos
+ dst
->count
) ?
1627 (dst
->pos
+ dst
->count
) : ELF_CR_IIP_OFFSET
;
1628 for (i
= dst
->pos
; i
< min_copy
; i
+= sizeof(elf_greg_t
),
1630 if (access_elf_reg(dst
->target
, info
, i
,
1631 &tmp
[index
], 0) < 0) {
1635 dst
->ret
= user_regset_copyout(&dst
->pos
, &dst
->count
,
1636 &dst
->u
.get
.kbuf
, &dst
->u
.get
.ubuf
, tmp
,
1637 ELF_NAT_OFFSET
, ELF_CR_IIP_OFFSET
);
1638 if (dst
->ret
|| dst
->count
== 0)
1642 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1643 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1645 if (dst
->count
> 0 && dst
->pos
< (ELF_AR_END_OFFSET
)) {
1646 index
= (dst
->pos
- ELF_CR_IIP_OFFSET
) / sizeof(elf_greg_t
);
1647 min_copy
= ELF_AR_END_OFFSET
> (dst
->pos
+ dst
->count
) ?
1648 (dst
->pos
+ dst
->count
) : ELF_AR_END_OFFSET
;
1649 for (i
= dst
->pos
; i
< min_copy
; i
+= sizeof(elf_greg_t
),
1651 if (access_elf_reg(dst
->target
, info
, i
,
1652 &tmp
[index
], 0) < 0) {
1656 dst
->ret
= user_regset_copyout(&dst
->pos
, &dst
->count
,
1657 &dst
->u
.get
.kbuf
, &dst
->u
.get
.ubuf
, tmp
,
1658 ELF_CR_IIP_OFFSET
, ELF_AR_END_OFFSET
);
1662 void do_gpregs_set(struct unw_frame_info
*info
, void *arg
)
1665 struct regset_getset
*dst
= arg
;
1667 unsigned int i
, index
;
1669 if (unw_unwind_to_user(info
) < 0)
1673 if (dst
->count
> 0 && dst
->pos
< ELF_GR_OFFSET(1)) {
1674 dst
->ret
= user_regset_copyin_ignore(&dst
->pos
, &dst
->count
,
1677 0, ELF_GR_OFFSET(1));
1678 if (dst
->ret
|| dst
->count
== 0)
1683 if (dst
->count
> 0 && dst
->pos
< ELF_GR_OFFSET(16)) {
1685 index
= (dst
->pos
- ELF_GR_OFFSET(1)) / sizeof(elf_greg_t
);
1686 dst
->ret
= user_regset_copyin(&dst
->pos
, &dst
->count
,
1687 &dst
->u
.set
.kbuf
, &dst
->u
.set
.ubuf
, tmp
,
1688 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1691 for ( ; i
< dst
->pos
; i
+= sizeof(elf_greg_t
), index
++)
1692 if (access_elf_reg(dst
->target
, info
, i
,
1693 &tmp
[index
], 1) < 0) {
1697 if (dst
->count
== 0)
1702 if (dst
->count
> 0 && dst
->pos
< ELF_NAT_OFFSET
) {
1703 pt
= task_pt_regs(dst
->target
);
1704 dst
->ret
= user_regset_copyin(&dst
->pos
, &dst
->count
,
1705 &dst
->u
.set
.kbuf
, &dst
->u
.set
.ubuf
, &pt
->r16
,
1706 ELF_GR_OFFSET(16), ELF_NAT_OFFSET
);
1707 if (dst
->ret
|| dst
->count
== 0)
1711 /* nat, pr, b0 - b7 */
1712 if (dst
->count
> 0 && dst
->pos
< ELF_CR_IIP_OFFSET
) {
1714 index
= (dst
->pos
- ELF_NAT_OFFSET
) / sizeof(elf_greg_t
);
1715 dst
->ret
= user_regset_copyin(&dst
->pos
, &dst
->count
,
1716 &dst
->u
.set
.kbuf
, &dst
->u
.set
.ubuf
, tmp
,
1717 ELF_NAT_OFFSET
, ELF_CR_IIP_OFFSET
);
1720 for (; i
< dst
->pos
; i
+= sizeof(elf_greg_t
), index
++)
1721 if (access_elf_reg(dst
->target
, info
, i
,
1722 &tmp
[index
], 1) < 0) {
1726 if (dst
->count
== 0)
1730 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1731 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1733 if (dst
->count
> 0 && dst
->pos
< (ELF_AR_END_OFFSET
)) {
1735 index
= (dst
->pos
- ELF_CR_IIP_OFFSET
) / sizeof(elf_greg_t
);
1736 dst
->ret
= user_regset_copyin(&dst
->pos
, &dst
->count
,
1737 &dst
->u
.set
.kbuf
, &dst
->u
.set
.ubuf
, tmp
,
1738 ELF_CR_IIP_OFFSET
, ELF_AR_END_OFFSET
);
1741 for ( ; i
< dst
->pos
; i
+= sizeof(elf_greg_t
), index
++)
1742 if (access_elf_reg(dst
->target
, info
, i
,
1743 &tmp
[index
], 1) < 0) {
1750 #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1752 void do_fpregs_get(struct unw_frame_info
*info
, void *arg
)
1754 struct regset_getset
*dst
= arg
;
1755 struct task_struct
*task
= dst
->target
;
1756 elf_fpreg_t tmp
[30];
1757 int index
, min_copy
, i
;
1759 if (unw_unwind_to_user(info
) < 0)
1762 /* Skip pos 0 and 1 */
1763 if (dst
->count
> 0 && dst
->pos
< ELF_FP_OFFSET(2)) {
1764 dst
->ret
= user_regset_copyout_zero(&dst
->pos
, &dst
->count
,
1767 0, ELF_FP_OFFSET(2));
1768 if (dst
->count
== 0 || dst
->ret
)
1773 if (dst
->count
> 0 && dst
->pos
< ELF_FP_OFFSET(32)) {
1774 index
= (dst
->pos
- ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t
);
1776 min_copy
= min(((unsigned int)ELF_FP_OFFSET(32)),
1777 dst
->pos
+ dst
->count
);
1778 for (i
= dst
->pos
; i
< min_copy
; i
+= sizeof(elf_fpreg_t
),
1780 if (unw_get_fr(info
, i
/ sizeof(elf_fpreg_t
),
1785 dst
->ret
= user_regset_copyout(&dst
->pos
, &dst
->count
,
1786 &dst
->u
.get
.kbuf
, &dst
->u
.get
.ubuf
, tmp
,
1787 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1788 if (dst
->count
== 0 || dst
->ret
)
1793 if (dst
->count
> 0) {
1794 ia64_flush_fph(dst
->target
);
1795 if (task
->thread
.flags
& IA64_THREAD_FPH_VALID
)
1796 dst
->ret
= user_regset_copyout(
1797 &dst
->pos
, &dst
->count
,
1798 &dst
->u
.get
.kbuf
, &dst
->u
.get
.ubuf
,
1799 &dst
->target
->thread
.fph
,
1800 ELF_FP_OFFSET(32), -1);
1802 /* Zero fill instead. */
1803 dst
->ret
= user_regset_copyout_zero(
1804 &dst
->pos
, &dst
->count
,
1805 &dst
->u
.get
.kbuf
, &dst
->u
.get
.ubuf
,
1806 ELF_FP_OFFSET(32), -1);
1810 void do_fpregs_set(struct unw_frame_info
*info
, void *arg
)
1812 struct regset_getset
*dst
= arg
;
1813 elf_fpreg_t fpreg
, tmp
[30];
1814 int index
, start
, end
;
1816 if (unw_unwind_to_user(info
) < 0)
1819 /* Skip pos 0 and 1 */
1820 if (dst
->count
> 0 && dst
->pos
< ELF_FP_OFFSET(2)) {
1821 dst
->ret
= user_regset_copyin_ignore(&dst
->pos
, &dst
->count
,
1824 0, ELF_FP_OFFSET(2));
1825 if (dst
->count
== 0 || dst
->ret
)
1830 if (dst
->count
> 0 && dst
->pos
< ELF_FP_OFFSET(32)) {
1832 end
= min(((unsigned int)ELF_FP_OFFSET(32)),
1833 dst
->pos
+ dst
->count
);
1834 dst
->ret
= user_regset_copyin(&dst
->pos
, &dst
->count
,
1835 &dst
->u
.set
.kbuf
, &dst
->u
.set
.ubuf
, tmp
,
1836 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1840 if (start
& 0xF) { /* only write high part */
1841 if (unw_get_fr(info
, start
/ sizeof(elf_fpreg_t
),
1846 tmp
[start
/ sizeof(elf_fpreg_t
) - 2].u
.bits
[0]
1850 if (end
& 0xF) { /* only write low part */
1851 if (unw_get_fr(info
, end
/ sizeof(elf_fpreg_t
),
1856 tmp
[end
/ sizeof(elf_fpreg_t
) - 2].u
.bits
[1]
1858 end
= (end
+ 0xF) & ~0xFUL
;
1861 for ( ; start
< end
; start
+= sizeof(elf_fpreg_t
)) {
1862 index
= start
/ sizeof(elf_fpreg_t
);
1863 if (unw_set_fr(info
, index
, tmp
[index
- 2])) {
1868 if (dst
->ret
|| dst
->count
== 0)
1873 if (dst
->count
> 0 && dst
->pos
< ELF_FP_OFFSET(128)) {
1874 ia64_sync_fph(dst
->target
);
1875 dst
->ret
= user_regset_copyin(&dst
->pos
, &dst
->count
,
1878 &dst
->target
->thread
.fph
,
1879 ELF_FP_OFFSET(32), -1);
1884 do_regset_call(void (*call
)(struct unw_frame_info
*, void *),
1885 struct task_struct
*target
,
1886 const struct user_regset
*regset
,
1887 unsigned int pos
, unsigned int count
,
1888 const void *kbuf
, const void __user
*ubuf
)
1890 struct regset_getset info
= { .target
= target
, .regset
= regset
,
1891 .pos
= pos
, .count
= count
,
1892 .u
.set
= { .kbuf
= kbuf
, .ubuf
= ubuf
},
1895 if (target
== current
)
1896 unw_init_running(call
, &info
);
1898 struct unw_frame_info ufi
;
1899 memset(&ufi
, 0, sizeof(ufi
));
1900 unw_init_from_blocked_task(&ufi
, target
);
1901 (*call
)(&ufi
, &info
);
1908 gpregs_get(struct task_struct
*target
,
1909 const struct user_regset
*regset
,
1910 unsigned int pos
, unsigned int count
,
1911 void *kbuf
, void __user
*ubuf
)
1913 return do_regset_call(do_gpregs_get
, target
, regset
, pos
, count
,
1917 static int gpregs_set(struct task_struct
*target
,
1918 const struct user_regset
*regset
,
1919 unsigned int pos
, unsigned int count
,
1920 const void *kbuf
, const void __user
*ubuf
)
1922 return do_regset_call(do_gpregs_set
, target
, regset
, pos
, count
,
1926 static void do_gpregs_writeback(struct unw_frame_info
*info
, void *arg
)
1928 do_sync_rbs(info
, ia64_sync_user_rbs
);
1932 * This is called to write back the register backing store.
1933 * ptrace does this before it stops, so that a tracer reading the user
1934 * memory after the thread stops will get the current register data.
1937 gpregs_writeback(struct task_struct
*target
,
1938 const struct user_regset
*regset
,
1941 if (test_and_set_tsk_thread_flag(target
, TIF_RESTORE_RSE
))
1943 tsk_set_notify_resume(target
);
1944 return do_regset_call(do_gpregs_writeback
, target
, regset
, 0, 0,
1949 fpregs_active(struct task_struct
*target
, const struct user_regset
*regset
)
1951 return (target
->thread
.flags
& IA64_THREAD_FPH_VALID
) ? 128 : 32;
1954 static int fpregs_get(struct task_struct
*target
,
1955 const struct user_regset
*regset
,
1956 unsigned int pos
, unsigned int count
,
1957 void *kbuf
, void __user
*ubuf
)
1959 return do_regset_call(do_fpregs_get
, target
, regset
, pos
, count
,
1963 static int fpregs_set(struct task_struct
*target
,
1964 const struct user_regset
*regset
,
1965 unsigned int pos
, unsigned int count
,
1966 const void *kbuf
, const void __user
*ubuf
)
1968 return do_regset_call(do_fpregs_set
, target
, regset
, pos
, count
,
1973 access_uarea(struct task_struct
*child
, unsigned long addr
,
1974 unsigned long *data
, int write_access
)
1976 unsigned int pos
= -1; /* an invalid value */
1978 unsigned long *ptr
, regnum
;
1980 if ((addr
& 0x7) != 0) {
1981 dprintk("ptrace: unaligned register address 0x%lx\n", addr
);
1984 if ((addr
>= PT_NAT_BITS
+ 8 && addr
< PT_F2
) ||
1985 (addr
>= PT_R7
+ 8 && addr
< PT_B1
) ||
1986 (addr
>= PT_AR_LC
+ 8 && addr
< PT_CR_IPSR
) ||
1987 (addr
>= PT_AR_SSD
+ 8 && addr
< PT_DBR
)) {
1988 dprintk("ptrace: rejecting access to register "
1989 "address 0x%lx\n", addr
);
1994 case PT_F32
... (PT_F127
+ 15):
1995 pos
= addr
- PT_F32
+ ELF_FP_OFFSET(32);
1997 case PT_F2
... (PT_F5
+ 15):
1998 pos
= addr
- PT_F2
+ ELF_FP_OFFSET(2);
2000 case PT_F10
... (PT_F31
+ 15):
2001 pos
= addr
- PT_F10
+ ELF_FP_OFFSET(10);
2003 case PT_F6
... (PT_F9
+ 15):
2004 pos
= addr
- PT_F6
+ ELF_FP_OFFSET(6);
2010 ret
= fpregs_set(child
, NULL
, pos
,
2011 sizeof(unsigned long), data
, NULL
);
2013 ret
= fpregs_get(child
, NULL
, pos
,
2014 sizeof(unsigned long), data
, NULL
);
2022 pos
= ELF_NAT_OFFSET
;
2024 case PT_R4
... PT_R7
:
2025 pos
= addr
- PT_R4
+ ELF_GR_OFFSET(4);
2027 case PT_B1
... PT_B5
:
2028 pos
= addr
- PT_B1
+ ELF_BR_OFFSET(1);
2031 pos
= ELF_AR_EC_OFFSET
;
2034 pos
= ELF_AR_LC_OFFSET
;
2037 pos
= ELF_CR_IPSR_OFFSET
;
2040 pos
= ELF_CR_IIP_OFFSET
;
2043 pos
= ELF_CFM_OFFSET
;
2046 pos
= ELF_AR_UNAT_OFFSET
;
2049 pos
= ELF_AR_PFS_OFFSET
;
2052 pos
= ELF_AR_RSC_OFFSET
;
2055 pos
= ELF_AR_RNAT_OFFSET
;
2057 case PT_AR_BSPSTORE
:
2058 pos
= ELF_AR_BSPSTORE_OFFSET
;
2061 pos
= ELF_PR_OFFSET
;
2064 pos
= ELF_BR_OFFSET(6);
2067 pos
= ELF_AR_BSP_OFFSET
;
2069 case PT_R1
... PT_R3
:
2070 pos
= addr
- PT_R1
+ ELF_GR_OFFSET(1);
2072 case PT_R12
... PT_R15
:
2073 pos
= addr
- PT_R12
+ ELF_GR_OFFSET(12);
2075 case PT_R8
... PT_R11
:
2076 pos
= addr
- PT_R8
+ ELF_GR_OFFSET(8);
2078 case PT_R16
... PT_R31
:
2079 pos
= addr
- PT_R16
+ ELF_GR_OFFSET(16);
2082 pos
= ELF_AR_CCV_OFFSET
;
2085 pos
= ELF_AR_FPSR_OFFSET
;
2088 pos
= ELF_BR_OFFSET(0);
2091 pos
= ELF_BR_OFFSET(7);
2094 pos
= ELF_AR_CSD_OFFSET
;
2097 pos
= ELF_AR_SSD_OFFSET
;
2103 ret
= gpregs_set(child
, NULL
, pos
,
2104 sizeof(unsigned long), data
, NULL
);
2106 ret
= gpregs_get(child
, NULL
, pos
,
2107 sizeof(unsigned long), data
, NULL
);
2113 /* access debug registers */
2114 if (addr
>= PT_IBR
) {
2115 regnum
= (addr
- PT_IBR
) >> 3;
2116 ptr
= &child
->thread
.ibr
[0];
2118 regnum
= (addr
- PT_DBR
) >> 3;
2119 ptr
= &child
->thread
.dbr
[0];
2123 dprintk("ptrace: rejecting access to register "
2124 "address 0x%lx\n", addr
);
2127 #ifdef CONFIG_PERFMON
2129 * Check if debug registers are used by perfmon. This
2130 * test must be done once we know that we can do the
2131 * operation, i.e. the arguments are all valid, but
2132 * before we start modifying the state.
2134 * Perfmon needs to keep a count of how many processes
2135 * are trying to modify the debug registers for system
2136 * wide monitoring sessions.
2138 * We also include read access here, because they may
2139 * cause the PMU-installed debug register state
2140 * (dbr[], ibr[]) to be reset. The two arrays are also
2141 * used by perfmon, but we do not use
2142 * IA64_THREAD_DBG_VALID. The registers are restored
2143 * by the PMU context switch code.
2145 if (pfm_use_debug_registers(child
))
2149 if (!(child
->thread
.flags
& IA64_THREAD_DBG_VALID
)) {
2150 child
->thread
.flags
|= IA64_THREAD_DBG_VALID
;
2151 memset(child
->thread
.dbr
, 0,
2152 sizeof(child
->thread
.dbr
));
2153 memset(child
->thread
.ibr
, 0,
2154 sizeof(child
->thread
.ibr
));
2159 if ((regnum
& 1) && write_access
) {
2160 /* don't let the user set kernel-level breakpoints: */
2161 *ptr
= *data
& ~(7UL << 56);
2171 static const struct user_regset native_regsets
[] = {
2173 .core_note_type
= NT_PRSTATUS
,
2175 .size
= sizeof(elf_greg_t
), .align
= sizeof(elf_greg_t
),
2176 .get
= gpregs_get
, .set
= gpregs_set
,
2177 .writeback
= gpregs_writeback
2180 .core_note_type
= NT_PRFPREG
,
2182 .size
= sizeof(elf_fpreg_t
), .align
= sizeof(elf_fpreg_t
),
2183 .get
= fpregs_get
, .set
= fpregs_set
, .active
= fpregs_active
2187 static const struct user_regset_view user_ia64_view
= {
2189 .e_machine
= EM_IA_64
,
2190 .regsets
= native_regsets
, .n
= ARRAY_SIZE(native_regsets
)
2193 const struct user_regset_view
*task_user_regset_view(struct task_struct
*tsk
)
2195 #ifdef CONFIG_IA32_SUPPORT
2196 extern const struct user_regset_view user_ia32_view
;
2197 if (IS_IA32_PROCESS(task_pt_regs(tsk
)))
2198 return &user_ia32_view
;
2200 return &user_ia64_view
;