arm64: ftrace: fix a stack tracer's output under function graph tracer
[linux-2.6/btrfs-unstable.git] / arch / arm64 / kernel / perf_callchain.c
blobff4665462a025d4ec2655ca30d49732a63194e53
1 /*
2 * arm64 callchain support
4 * Copyright (C) 2015 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
21 #include <asm/stacktrace.h>
23 struct frame_tail {
24 struct frame_tail __user *fp;
25 unsigned long lr;
26 } __attribute__((packed));
29 * Get the return address for a single stackframe and return a pointer to the
30 * next frame tail.
32 static struct frame_tail __user *
33 user_backtrace(struct frame_tail __user *tail,
34 struct perf_callchain_entry *entry)
36 struct frame_tail buftail;
37 unsigned long err;
39 /* Also check accessibility of one struct frame_tail beyond */
40 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
41 return NULL;
43 pagefault_disable();
44 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
45 pagefault_enable();
47 if (err)
48 return NULL;
50 perf_callchain_store(entry, buftail.lr);
53 * Frame pointers should strictly progress back up the stack
54 * (towards higher addresses).
56 if (tail >= buftail.fp)
57 return NULL;
59 return buftail.fp;
62 #ifdef CONFIG_COMPAT
64 * The registers we're interested in are at the end of the variable
65 * length saved register structure. The fp points at the end of this
66 * structure so the address of this struct is:
67 * (struct compat_frame_tail *)(xxx->fp)-1
69 * This code has been adapted from the ARM OProfile support.
71 struct compat_frame_tail {
72 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
73 u32 sp;
74 u32 lr;
75 } __attribute__((packed));
77 static struct compat_frame_tail __user *
78 compat_user_backtrace(struct compat_frame_tail __user *tail,
79 struct perf_callchain_entry *entry)
81 struct compat_frame_tail buftail;
82 unsigned long err;
84 /* Also check accessibility of one struct frame_tail beyond */
85 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
86 return NULL;
88 pagefault_disable();
89 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
90 pagefault_enable();
92 if (err)
93 return NULL;
95 perf_callchain_store(entry, buftail.lr);
98 * Frame pointers should strictly progress back up the stack
99 * (towards higher addresses).
101 if (tail + 1 >= (struct compat_frame_tail __user *)
102 compat_ptr(buftail.fp))
103 return NULL;
105 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
107 #endif /* CONFIG_COMPAT */
109 void perf_callchain_user(struct perf_callchain_entry *entry,
110 struct pt_regs *regs)
112 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
113 /* We don't support guest os callchain now */
114 return;
117 perf_callchain_store(entry, regs->pc);
119 if (!compat_user_mode(regs)) {
120 /* AARCH64 mode */
121 struct frame_tail __user *tail;
123 tail = (struct frame_tail __user *)regs->regs[29];
125 while (entry->nr < PERF_MAX_STACK_DEPTH &&
126 tail && !((unsigned long)tail & 0xf))
127 tail = user_backtrace(tail, entry);
128 } else {
129 #ifdef CONFIG_COMPAT
130 /* AARCH32 compat mode */
131 struct compat_frame_tail __user *tail;
133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
135 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
136 tail && !((unsigned long)tail & 0x3))
137 tail = compat_user_backtrace(tail, entry);
138 #endif
143 * Gets called by walk_stackframe() for every stackframe. This will be called
144 * whist unwinding the stackframe and is like a subroutine return so we use
145 * the PC.
147 static int callchain_trace(struct stackframe *frame, void *data)
149 struct perf_callchain_entry *entry = data;
150 perf_callchain_store(entry, frame->pc);
151 return 0;
154 void perf_callchain_kernel(struct perf_callchain_entry *entry,
155 struct pt_regs *regs)
157 struct stackframe frame;
159 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
160 /* We don't support guest os callchain now */
161 return;
164 frame.fp = regs->regs[29];
165 frame.sp = regs->sp;
166 frame.pc = regs->pc;
167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
168 frame.graph = current->curr_ret_stack;
169 #endif
171 walk_stackframe(current, &frame, callchain_trace, entry);
174 unsigned long perf_instruction_pointer(struct pt_regs *regs)
176 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
177 return perf_guest_cbs->get_guest_ip();
179 return instruction_pointer(regs);
182 unsigned long perf_misc_flags(struct pt_regs *regs)
184 int misc = 0;
186 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
187 if (perf_guest_cbs->is_user_mode())
188 misc |= PERF_RECORD_MISC_GUEST_USER;
189 else
190 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
191 } else {
192 if (user_mode(regs))
193 misc |= PERF_RECORD_MISC_USER;
194 else
195 misc |= PERF_RECORD_MISC_KERNEL;
198 return misc;