2 * Based on arch/arm/kernel/ptrace.c
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
25 #include <linux/smp.h>
26 #include <linux/ptrace.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/uaccess.h>
32 #include <linux/perf_event.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/regset.h>
35 #include <linux/tracehook.h>
36 #include <linux/elf.h>
38 #include <asm/compat.h>
39 #include <asm/debug-monitors.h>
40 #include <asm/pgtable.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
45 * TODO: does not yet catch signals sent when the child dies.
46 * in exit.c or in signal.c.
50 * Called by kernel/ptrace.c when detaching..
52 void ptrace_disable(struct task_struct
*child
)
56 #ifdef CONFIG_HAVE_HW_BREAKPOINT
58 * Handle hitting a HW-breakpoint.
60 static void ptrace_hbptriggered(struct perf_event
*bp
,
61 struct perf_sample_data
*data
,
64 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
68 .si_code
= TRAP_HWBKPT
,
69 .si_addr
= (void __user
*)(bkpt
->trigger
),
75 if (!is_compat_task())
78 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
79 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
80 info
.si_errno
= (i
<< 1) + 1;
84 for (i
= ARM_MAX_BRP
; i
< ARM_MAX_HBP_SLOTS
&& !bp
; ++i
) {
85 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
86 info
.si_errno
= -((i
<< 1) + 1);
93 force_sig_info(SIGTRAP
, &info
, current
);
97 * Unregister breakpoints from this task and reset the pointers in
100 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
103 struct thread_struct
*t
= &tsk
->thread
;
105 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
106 if (t
->debug
.hbp_break
[i
]) {
107 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
108 t
->debug
.hbp_break
[i
] = NULL
;
112 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
113 if (t
->debug
.hbp_watch
[i
]) {
114 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
115 t
->debug
.hbp_watch
[i
] = NULL
;
120 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
122 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
125 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
126 struct task_struct
*tsk
,
129 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
132 case NT_ARM_HW_BREAK
:
133 if (idx
< ARM_MAX_BRP
)
134 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
136 case NT_ARM_HW_WATCH
:
137 if (idx
< ARM_MAX_WRP
)
138 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
145 static int ptrace_hbp_set_event(unsigned int note_type
,
146 struct task_struct
*tsk
,
148 struct perf_event
*bp
)
153 case NT_ARM_HW_BREAK
:
154 if (idx
< ARM_MAX_BRP
) {
155 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
159 case NT_ARM_HW_WATCH
:
160 if (idx
< ARM_MAX_WRP
) {
161 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
170 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
171 struct task_struct
*tsk
,
174 struct perf_event
*bp
;
175 struct perf_event_attr attr
;
179 case NT_ARM_HW_BREAK
:
180 type
= HW_BREAKPOINT_X
;
182 case NT_ARM_HW_WATCH
:
183 type
= HW_BREAKPOINT_RW
;
186 return ERR_PTR(-EINVAL
);
189 ptrace_breakpoint_init(&attr
);
192 * Initialise fields to sane defaults
193 * (i.e. values that will pass validation).
196 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
200 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
204 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
211 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
212 struct arch_hw_breakpoint_ctrl ctrl
,
213 struct perf_event_attr
*attr
)
215 int err
, len
, type
, disabled
= !ctrl
.enabled
;
219 type
= HW_BREAKPOINT_EMPTY
;
221 err
= arch_bp_generic_fields(ctrl
, &len
, &type
);
226 case NT_ARM_HW_BREAK
:
227 if ((type
& HW_BREAKPOINT_X
) != type
)
230 case NT_ARM_HW_WATCH
:
231 if ((type
& HW_BREAKPOINT_RW
) != type
)
240 attr
->bp_type
= type
;
241 attr
->disabled
= disabled
;
246 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
252 case NT_ARM_HW_BREAK
:
253 num
= hw_breakpoint_slots(TYPE_INST
);
255 case NT_ARM_HW_WATCH
:
256 num
= hw_breakpoint_slots(TYPE_DATA
);
262 reg
|= debug_monitors_arch();
270 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
271 struct task_struct
*tsk
,
275 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
280 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
284 static int ptrace_hbp_get_addr(unsigned int note_type
,
285 struct task_struct
*tsk
,
289 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
294 *addr
= bp
? bp
->attr
.bp_addr
: 0;
298 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
299 struct task_struct
*tsk
,
302 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
305 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
310 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
311 struct task_struct
*tsk
,
316 struct perf_event
*bp
;
317 struct perf_event_attr attr
;
318 struct arch_hw_breakpoint_ctrl ctrl
;
320 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
327 decode_ctrl_reg(uctrl
, &ctrl
);
328 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
332 return modify_user_hw_breakpoint(bp
, &attr
);
335 static int ptrace_hbp_set_addr(unsigned int note_type
,
336 struct task_struct
*tsk
,
341 struct perf_event
*bp
;
342 struct perf_event_attr attr
;
344 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
352 err
= modify_user_hw_breakpoint(bp
, &attr
);
356 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
357 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
358 #define PTRACE_HBP_PAD_SZ sizeof(u32)
360 static int hw_break_get(struct task_struct
*target
,
361 const struct user_regset
*regset
,
362 unsigned int pos
, unsigned int count
,
363 void *kbuf
, void __user
*ubuf
)
365 unsigned int note_type
= regset
->core_note_type
;
366 int ret
, idx
= 0, offset
, limit
;
371 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
375 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &info
, 0,
381 offset
= offsetof(struct user_hwdebug_state
, pad
);
382 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
, offset
,
383 offset
+ PTRACE_HBP_PAD_SZ
);
387 /* (address, ctrl) registers */
388 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
389 limit
= regset
->n
* regset
->size
;
390 while (count
&& offset
< limit
) {
391 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
394 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
395 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
398 offset
+= PTRACE_HBP_ADDR_SZ
;
400 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
403 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
404 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
407 offset
+= PTRACE_HBP_CTRL_SZ
;
409 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
411 offset
+ PTRACE_HBP_PAD_SZ
);
414 offset
+= PTRACE_HBP_PAD_SZ
;
421 static int hw_break_set(struct task_struct
*target
,
422 const struct user_regset
*regset
,
423 unsigned int pos
, unsigned int count
,
424 const void *kbuf
, const void __user
*ubuf
)
426 unsigned int note_type
= regset
->core_note_type
;
427 int ret
, idx
= 0, offset
, limit
;
431 /* Resource info and pad */
432 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
433 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
437 /* (address, ctrl) registers */
438 limit
= regset
->n
* regset
->size
;
439 while (count
&& offset
< limit
) {
440 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
441 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
444 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
447 offset
+= PTRACE_HBP_ADDR_SZ
;
449 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
450 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
453 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
456 offset
+= PTRACE_HBP_CTRL_SZ
;
458 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
460 offset
+ PTRACE_HBP_PAD_SZ
);
463 offset
+= PTRACE_HBP_PAD_SZ
;
469 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
471 static int gpr_get(struct task_struct
*target
,
472 const struct user_regset
*regset
,
473 unsigned int pos
, unsigned int count
,
474 void *kbuf
, void __user
*ubuf
)
476 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
477 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
480 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
481 unsigned int pos
, unsigned int count
,
482 const void *kbuf
, const void __user
*ubuf
)
485 struct user_pt_regs newregs
;
487 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
491 if (!valid_user_regs(&newregs
))
494 task_pt_regs(target
)->user_regs
= newregs
;
499 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
501 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
502 unsigned int pos
, unsigned int count
,
503 void *kbuf
, void __user
*ubuf
)
505 struct user_fpsimd_state
*uregs
;
506 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
507 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
510 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
511 unsigned int pos
, unsigned int count
,
512 const void *kbuf
, const void __user
*ubuf
)
515 struct user_fpsimd_state newstate
;
517 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
, 0, -1);
521 target
->thread
.fpsimd_state
.user_fpsimd
= newstate
;
525 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
526 unsigned int pos
, unsigned int count
,
527 void *kbuf
, void __user
*ubuf
)
529 unsigned long *tls
= &target
->thread
.tp_value
;
530 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, tls
, 0, -1);
533 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
534 unsigned int pos
, unsigned int count
,
535 const void *kbuf
, const void __user
*ubuf
)
540 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
544 target
->thread
.tp_value
= tls
;
548 enum aarch64_regset
{
552 #ifdef CONFIG_HAVE_HW_BREAKPOINT
558 static const struct user_regset aarch64_regsets
[] = {
560 .core_note_type
= NT_PRSTATUS
,
561 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
563 .align
= sizeof(u64
),
568 .core_note_type
= NT_PRFPREG
,
569 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
571 * We pretend we have 32-bit registers because the fpsr and
572 * fpcr are 32-bits wide.
575 .align
= sizeof(u32
),
580 .core_note_type
= NT_ARM_TLS
,
582 .size
= sizeof(void *),
583 .align
= sizeof(void *),
587 #ifdef CONFIG_HAVE_HW_BREAKPOINT
588 [REGSET_HW_BREAK
] = {
589 .core_note_type
= NT_ARM_HW_BREAK
,
590 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
592 .align
= sizeof(u32
),
596 [REGSET_HW_WATCH
] = {
597 .core_note_type
= NT_ARM_HW_WATCH
,
598 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
600 .align
= sizeof(u32
),
607 static const struct user_regset_view user_aarch64_view
= {
608 .name
= "aarch64", .e_machine
= EM_AARCH64
,
609 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
613 #include <linux/compat.h>
620 static int compat_gpr_get(struct task_struct
*target
,
621 const struct user_regset
*regset
,
622 unsigned int pos
, unsigned int count
,
623 void *kbuf
, void __user
*ubuf
)
626 unsigned int i
, start
, num_regs
;
628 /* Calculate the number of AArch32 registers contained in count */
629 num_regs
= count
/ regset
->size
;
631 /* Convert pos into an register number */
632 start
= pos
/ regset
->size
;
634 if (start
+ num_regs
> regset
->n
)
637 for (i
= 0; i
< num_regs
; ++i
) {
638 unsigned int idx
= start
+ i
;
643 reg
= (void *)&task_pt_regs(target
)->pc
;
646 reg
= (void *)&task_pt_regs(target
)->pstate
;
649 reg
= (void *)&task_pt_regs(target
)->orig_x0
;
652 reg
= (void *)&task_pt_regs(target
)->regs
[idx
];
655 ret
= copy_to_user(ubuf
, reg
, sizeof(compat_ulong_t
));
660 ubuf
+= sizeof(compat_ulong_t
);
666 static int compat_gpr_set(struct task_struct
*target
,
667 const struct user_regset
*regset
,
668 unsigned int pos
, unsigned int count
,
669 const void *kbuf
, const void __user
*ubuf
)
671 struct pt_regs newregs
;
673 unsigned int i
, start
, num_regs
;
675 /* Calculate the number of AArch32 registers contained in count */
676 num_regs
= count
/ regset
->size
;
678 /* Convert pos into an register number */
679 start
= pos
/ regset
->size
;
681 if (start
+ num_regs
> regset
->n
)
684 newregs
= *task_pt_regs(target
);
686 for (i
= 0; i
< num_regs
; ++i
) {
687 unsigned int idx
= start
+ i
;
692 reg
= (void *)&newregs
.pc
;
695 reg
= (void *)&newregs
.pstate
;
698 reg
= (void *)&newregs
.orig_x0
;
701 reg
= (void *)&newregs
.regs
[idx
];
704 ret
= copy_from_user(reg
, ubuf
, sizeof(compat_ulong_t
));
709 ubuf
+= sizeof(compat_ulong_t
);
712 if (valid_user_regs(&newregs
.user_regs
))
713 *task_pt_regs(target
) = newregs
;
721 static int compat_vfp_get(struct task_struct
*target
,
722 const struct user_regset
*regset
,
723 unsigned int pos
, unsigned int count
,
724 void *kbuf
, void __user
*ubuf
)
726 struct user_fpsimd_state
*uregs
;
727 compat_ulong_t fpscr
;
730 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
733 * The VFP registers are packed into the fpsimd_state, so they all sit
734 * nicely together for us. We just need to create the fpscr separately.
736 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
737 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
740 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
741 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
742 ret
= put_user(fpscr
, (compat_ulong_t
*)ubuf
);
748 static int compat_vfp_set(struct task_struct
*target
,
749 const struct user_regset
*regset
,
750 unsigned int pos
, unsigned int count
,
751 const void *kbuf
, const void __user
*ubuf
)
753 struct user_fpsimd_state
*uregs
;
754 compat_ulong_t fpscr
;
757 if (pos
+ count
> VFP_STATE_SIZE
)
760 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
762 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
763 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
766 ret
= get_user(fpscr
, (compat_ulong_t
*)ubuf
);
767 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
768 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
774 static const struct user_regset aarch32_regsets
[] = {
775 [REGSET_COMPAT_GPR
] = {
776 .core_note_type
= NT_PRSTATUS
,
777 .n
= COMPAT_ELF_NGREG
,
778 .size
= sizeof(compat_elf_greg_t
),
779 .align
= sizeof(compat_elf_greg_t
),
780 .get
= compat_gpr_get
,
781 .set
= compat_gpr_set
783 [REGSET_COMPAT_VFP
] = {
784 .core_note_type
= NT_ARM_VFP
,
785 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
786 .size
= sizeof(compat_ulong_t
),
787 .align
= sizeof(compat_ulong_t
),
788 .get
= compat_vfp_get
,
789 .set
= compat_vfp_set
793 static const struct user_regset_view user_aarch32_view
= {
794 .name
= "aarch32", .e_machine
= EM_ARM
,
795 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
798 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
799 compat_ulong_t __user
*ret
)
806 if (off
== COMPAT_PT_TEXT_ADDR
)
807 tmp
= tsk
->mm
->start_code
;
808 else if (off
== COMPAT_PT_DATA_ADDR
)
809 tmp
= tsk
->mm
->start_data
;
810 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
811 tmp
= tsk
->mm
->end_code
;
812 else if (off
< sizeof(compat_elf_gregset_t
))
813 return copy_regset_to_user(tsk
, &user_aarch32_view
,
814 REGSET_COMPAT_GPR
, off
,
815 sizeof(compat_ulong_t
), ret
);
816 else if (off
>= COMPAT_USER_SZ
)
821 return put_user(tmp
, ret
);
824 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
829 if (off
& 3 || off
>= COMPAT_USER_SZ
)
832 if (off
>= sizeof(compat_elf_gregset_t
))
835 ret
= copy_regset_from_user(tsk
, &user_aarch32_view
,
836 REGSET_COMPAT_GPR
, off
,
837 sizeof(compat_ulong_t
),
842 #ifdef CONFIG_HAVE_HW_BREAKPOINT
845 * Convert a virtual register number into an index for a thread_info
846 * breakpoint array. Breakpoints are identified using positive numbers
847 * whilst watchpoints are negative. The registers are laid out as pairs
848 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
849 * Register 0 is reserved for describing resource information.
851 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
853 return (abs(num
) - 1) >> 1;
856 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
858 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
861 num_brps
= hw_breakpoint_slots(TYPE_INST
);
862 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
864 debug_arch
= debug_monitors_arch();
878 static int compat_ptrace_hbp_get(unsigned int note_type
,
879 struct task_struct
*tsk
,
886 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);;
889 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
892 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
899 static int compat_ptrace_hbp_set(unsigned int note_type
,
900 struct task_struct
*tsk
,
907 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
911 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
914 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
920 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
921 compat_ulong_t __user
*data
)
925 mm_segment_t old_fs
= get_fs();
930 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
932 } else if (num
== 0) {
933 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
936 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
941 ret
= put_user(kdata
, data
);
946 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
947 compat_ulong_t __user
*data
)
951 mm_segment_t old_fs
= get_fs();
956 ret
= get_user(kdata
, data
);
962 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
964 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
969 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
971 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
972 compat_ulong_t caddr
, compat_ulong_t cdata
)
974 unsigned long addr
= caddr
;
975 unsigned long data
= cdata
;
976 void __user
*datap
= compat_ptr(data
);
981 ret
= compat_ptrace_read_user(child
, addr
, datap
);
985 ret
= compat_ptrace_write_user(child
, addr
, data
);
988 case COMPAT_PTRACE_GETREGS
:
989 ret
= copy_regset_to_user(child
,
992 0, sizeof(compat_elf_gregset_t
),
996 case COMPAT_PTRACE_SETREGS
:
997 ret
= copy_regset_from_user(child
,
1000 0, sizeof(compat_elf_gregset_t
),
1004 case COMPAT_PTRACE_GET_THREAD_AREA
:
1005 ret
= put_user((compat_ulong_t
)child
->thread
.tp_value
,
1006 (compat_ulong_t __user
*)datap
);
1009 case COMPAT_PTRACE_SET_SYSCALL
:
1010 task_pt_regs(child
)->syscallno
= data
;
1014 case COMPAT_PTRACE_GETVFPREGS
:
1015 ret
= copy_regset_to_user(child
,
1022 case COMPAT_PTRACE_SETVFPREGS
:
1023 ret
= copy_regset_from_user(child
,
1030 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1031 case COMPAT_PTRACE_GETHBPREGS
:
1032 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1035 case COMPAT_PTRACE_SETHBPREGS
:
1036 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1041 ret
= compat_ptrace_request(child
, request
, addr
,
1048 #endif /* CONFIG_COMPAT */
1050 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1052 #ifdef CONFIG_COMPAT
1053 if (is_compat_thread(task_thread_info(task
)))
1054 return &user_aarch32_view
;
1056 return &user_aarch64_view
;
1059 long arch_ptrace(struct task_struct
*child
, long request
,
1060 unsigned long addr
, unsigned long data
)
1062 return ptrace_request(child
, request
, addr
, data
);
1065 asmlinkage
int syscall_trace(int dir
, struct pt_regs
*regs
)
1067 unsigned long saved_reg
;
1069 if (!test_thread_flag(TIF_SYSCALL_TRACE
))
1070 return regs
->syscallno
;
1072 if (is_compat_task()) {
1073 /* AArch32 uses ip (r12) for scratch */
1074 saved_reg
= regs
->regs
[12];
1075 regs
->regs
[12] = dir
;
1078 * Save X7. X7 is used to denote syscall entry/exit:
1079 * X7 = 0 -> entry, = 1 -> exit
1081 saved_reg
= regs
->regs
[7];
1082 regs
->regs
[7] = dir
;
1086 tracehook_report_syscall_exit(regs
, 0);
1087 else if (tracehook_report_syscall_entry(regs
))
1088 regs
->syscallno
= ~0UL;
1090 if (is_compat_task())
1091 regs
->regs
[12] = saved_reg
;
1093 regs
->regs
[7] = saved_reg
;
1095 return regs
->syscallno
;