Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / arch / mips / kernel / ftrace.c
blob7f3dfdbc3657e6705b6a797c6e1dfa565fa3bff9
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Code for replacing ftrace calls with jumps.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
7 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
9 * Thanks goes to Steven Rostedt for writing the original x86 version.
12 #include <linux/uaccess.h>
13 #include <linux/init.h>
14 #include <linux/ftrace.h>
15 #include <linux/syscalls.h>
17 #include <asm/asm.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/cacheflush.h>
20 #include <asm/syscall.h>
21 #include <asm/uasm.h>
22 #include <asm/unistd.h>
24 #include <asm-generic/sections.h>
26 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
27 #define MCOUNT_OFFSET_INSNS 5
28 #else
29 #define MCOUNT_OFFSET_INSNS 4
30 #endif
32 #ifdef CONFIG_DYNAMIC_FTRACE
34 /* Arch override because MIPS doesn't need to run this from stop_machine() */
35 void arch_ftrace_update_code(int command)
37 ftrace_modify_all_code(command);
40 #endif
42 #ifdef CONFIG_DYNAMIC_FTRACE
44 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
45 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
46 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
48 #define INSN_NOP 0x00000000 /* nop */
49 #define INSN_JAL(addr) \
50 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
52 static unsigned int insn_jal_ftrace_caller __read_mostly;
53 static unsigned int insn_la_mcount[2] __read_mostly;
54 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
56 static inline void ftrace_dyn_arch_init_insns(void)
58 u32 *buf;
59 unsigned int v1;
61 /* la v1, _mcount */
62 v1 = 3;
63 buf = (u32 *)&insn_la_mcount[0];
64 UASM_i_LA(&buf, v1, MCOUNT_ADDR);
66 /* jal (ftrace_caller + 8), jump over the first two instruction */
67 buf = (u32 *)&insn_jal_ftrace_caller;
68 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
70 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
71 /* j ftrace_graph_caller */
72 buf = (u32 *)&insn_j_ftrace_graph_caller;
73 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
74 #endif
77 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
79 int faulted;
80 mm_segment_t old_fs;
82 /* *(unsigned int *)ip = new_code; */
83 safe_store_code(new_code, ip, faulted);
85 if (unlikely(faulted))
86 return -EFAULT;
88 old_fs = get_fs();
89 set_fs(get_ds());
90 flush_icache_range(ip, ip + 8);
91 set_fs(old_fs);
93 return 0;
96 #ifndef CONFIG_64BIT
97 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
98 unsigned int new_code2)
100 int faulted;
101 mm_segment_t old_fs;
103 safe_store_code(new_code1, ip, faulted);
104 if (unlikely(faulted))
105 return -EFAULT;
107 ip += 4;
108 safe_store_code(new_code2, ip, faulted);
109 if (unlikely(faulted))
110 return -EFAULT;
112 ip -= 4;
113 old_fs = get_fs();
114 set_fs(get_ds());
115 flush_icache_range(ip, ip + 8);
116 set_fs(old_fs);
118 return 0;
121 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
122 unsigned int new_code2)
124 int faulted;
125 mm_segment_t old_fs;
127 ip += 4;
128 safe_store_code(new_code2, ip, faulted);
129 if (unlikely(faulted))
130 return -EFAULT;
132 ip -= 4;
133 safe_store_code(new_code1, ip, faulted);
134 if (unlikely(faulted))
135 return -EFAULT;
137 old_fs = get_fs();
138 set_fs(get_ds());
139 flush_icache_range(ip, ip + 8);
140 set_fs(old_fs);
142 return 0;
144 #endif
147 * The details about the calling site of mcount on MIPS
149 * 1. For kernel:
151 * move at, ra
152 * jal _mcount --> nop
153 * sub sp, sp, 8 --> nop (CONFIG_32BIT)
155 * 2. For modules:
157 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
159 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
160 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
161 * move at, ra
162 * move $12, ra_address
163 * jalr v1
164 * sub sp, sp, 8
165 * 1: offset = 5 instructions
166 * 2.2 For the Other situations
168 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
169 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
170 * move at, ra
171 * jalr v1
172 * nop | move $12, ra_address | sub sp, sp, 8
173 * 1: offset = 4 instructions
176 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
178 int ftrace_make_nop(struct module *mod,
179 struct dyn_ftrace *rec, unsigned long addr)
181 unsigned int new;
182 unsigned long ip = rec->ip;
185 * If ip is in kernel space, no long call, otherwise, long call is
186 * needed.
188 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
189 #ifdef CONFIG_64BIT
190 return ftrace_modify_code(ip, new);
191 #else
193 * On 32 bit MIPS platforms, gcc adds a stack adjust
194 * instruction in the delay slot after the branch to
195 * mcount and expects mcount to restore the sp on return.
196 * This is based on a legacy API and does nothing but
197 * waste instructions so it's being removed at runtime.
199 return ftrace_modify_code_2(ip, new, INSN_NOP);
200 #endif
203 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
205 unsigned int new;
206 unsigned long ip = rec->ip;
208 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
210 #ifdef CONFIG_64BIT
211 return ftrace_modify_code(ip, new);
212 #else
213 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
214 INSN_NOP : insn_la_mcount[1]);
215 #endif
218 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
220 int ftrace_update_ftrace_func(ftrace_func_t func)
222 unsigned int new;
224 new = INSN_JAL((unsigned long)func);
226 return ftrace_modify_code(FTRACE_CALL_IP, new);
229 int __init ftrace_dyn_arch_init(void)
231 /* Encode the instructions when booting */
232 ftrace_dyn_arch_init_insns();
234 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
235 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
237 return 0;
239 #endif /* CONFIG_DYNAMIC_FTRACE */
241 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
243 #ifdef CONFIG_DYNAMIC_FTRACE
245 extern void ftrace_graph_call(void);
246 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
248 int ftrace_enable_ftrace_graph_caller(void)
250 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
251 insn_j_ftrace_graph_caller);
254 int ftrace_disable_ftrace_graph_caller(void)
256 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
259 #endif /* CONFIG_DYNAMIC_FTRACE */
261 #ifndef KBUILD_MCOUNT_RA_ADDRESS
263 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
264 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
265 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
267 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
268 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
270 unsigned long sp, ip, tmp;
271 unsigned int code;
272 int faulted;
275 * For module, move the ip from the return address after the
276 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
277 * kernel, move after the instruction "move ra, at"(offset is 16)
279 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
282 * search the text until finding the non-store instruction or "s{d,w}
283 * ra, offset(sp)" instruction
285 do {
286 /* get the code at "ip": code = *(unsigned int *)ip; */
287 safe_load_code(code, ip, faulted);
289 if (unlikely(faulted))
290 return 0;
292 * If we hit the non-store instruction before finding where the
293 * ra is stored, then this is a leaf function and it does not
294 * store the ra on the stack
296 if ((code & S_R_SP) != S_R_SP)
297 return parent_ra_addr;
299 /* Move to the next instruction */
300 ip -= 4;
301 } while ((code & S_RA_SP) != S_RA_SP);
303 sp = fp + (code & OFFSET_MASK);
305 /* tmp = *(unsigned long *)sp; */
306 safe_load_stack(tmp, sp, faulted);
307 if (unlikely(faulted))
308 return 0;
310 if (tmp == old_parent_ra)
311 return sp;
312 return 0;
315 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */
318 * Hook the return address and push it in the stack of return addrs
319 * in current thread info.
321 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
322 unsigned long fp)
324 unsigned long old_parent_ra;
325 struct ftrace_graph_ent trace;
326 unsigned long return_hooker = (unsigned long)
327 &return_to_handler;
328 int faulted, insns;
330 if (unlikely(ftrace_graph_is_dead()))
331 return;
333 if (unlikely(atomic_read(&current->tracing_graph_pause)))
334 return;
337 * "parent_ra_addr" is the stack address saved the return address of
338 * the caller of _mcount.
340 * if the gcc < 4.5, a leaf function does not save the return address
341 * in the stack address, so, we "emulate" one in _mcount's stack space,
342 * and hijack it directly, but for a non-leaf function, it save the
343 * return address to the its own stack space, we can not hijack it
344 * directly, but need to find the real stack address,
345 * ftrace_get_parent_addr() does it!
347 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
348 * non-leaf function, the location of the return address will be saved
349 * to $12 for us, and for a leaf function, only put a zero into $12. we
350 * do it in ftrace_graph_caller of mcount.S.
353 /* old_parent_ra = *parent_ra_addr; */
354 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
355 if (unlikely(faulted))
356 goto out;
357 #ifndef KBUILD_MCOUNT_RA_ADDRESS
358 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
359 old_parent_ra, (unsigned long)parent_ra_addr, fp);
361 * If fails when getting the stack address of the non-leaf function's
362 * ra, stop function graph tracer and return
364 if (parent_ra_addr == NULL)
365 goto out;
366 #endif
367 /* *parent_ra_addr = return_hooker; */
368 safe_store_stack(return_hooker, parent_ra_addr, faulted);
369 if (unlikely(faulted))
370 goto out;
372 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp,
373 NULL) == -EBUSY) {
374 *parent_ra_addr = old_parent_ra;
375 return;
379 * Get the recorded ip of the current mcount calling site in the
380 * __mcount_loc section, which will be used to filter the function
381 * entries configured through the tracing/set_graph_function interface.
384 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
385 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
387 /* Only trace if the calling function expects to */
388 if (!ftrace_graph_entry(&trace)) {
389 current->curr_ret_stack--;
390 *parent_ra_addr = old_parent_ra;
392 return;
393 out:
394 ftrace_graph_stop();
395 WARN_ON(1);
397 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
399 #ifdef CONFIG_FTRACE_SYSCALLS
401 #ifdef CONFIG_32BIT
402 unsigned long __init arch_syscall_addr(int nr)
404 return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
406 #endif
408 #ifdef CONFIG_64BIT
410 unsigned long __init arch_syscall_addr(int nr)
412 #ifdef CONFIG_MIPS32_N32
413 if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
414 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
415 #endif
416 if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
417 return (unsigned long)sys_call_table[nr - __NR_64_Linux];
418 #ifdef CONFIG_MIPS32_O32
419 if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
420 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
421 #endif
423 return (unsigned long) &sys_ni_syscall;
425 #endif
427 #endif /* CONFIG_FTRACE_SYSCALLS */