oprofile, x86: Fix nmi-unsafe callgraph support
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / oprofile / backtrace.c
blob32f78eb4674455f78fec097cd41721510cc78b34
1 /**
2 * @file backtrace.c
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon
8 * @author David Smith
9 */
11 #include <linux/oprofile.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/compat.h>
15 #include <linux/highmem.h>
17 #include <asm/ptrace.h>
18 #include <asm/uaccess.h>
19 #include <asm/stacktrace.h>
21 static int backtrace_stack(void *data, char *name)
23 /* Yes, we want all stacks */
24 return 0;
27 static void backtrace_address(void *data, unsigned long addr, int reliable)
29 unsigned int *depth = data;
31 if ((*depth)--)
32 oprofile_add_trace(addr);
35 static struct stacktrace_ops backtrace_ops = {
36 .stack = backtrace_stack,
37 .address = backtrace_address,
38 .walk_stack = print_context_stack,
41 /* from arch/x86/kernel/cpu/perf_event.c: */
44 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
46 static unsigned long
47 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
49 unsigned long offset, addr = (unsigned long)from;
50 unsigned long size, len = 0;
51 struct page *page;
52 void *map;
53 int ret;
55 do {
56 ret = __get_user_pages_fast(addr, 1, 0, &page);
57 if (!ret)
58 break;
60 offset = addr & (PAGE_SIZE - 1);
61 size = min(PAGE_SIZE - offset, n - len);
63 map = kmap_atomic(page);
64 memcpy(to, map+offset, size);
65 kunmap_atomic(map);
66 put_page(page);
68 len += size;
69 to += size;
70 addr += size;
72 } while (len < n);
74 return len;
77 #ifdef CONFIG_COMPAT
78 static struct stack_frame_ia32 *
79 dump_user_backtrace_32(struct stack_frame_ia32 *head)
81 /* Also check accessibility of one struct frame_head beyond: */
82 struct stack_frame_ia32 bufhead[2];
83 struct stack_frame_ia32 *fp;
84 unsigned long bytes;
86 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
87 if (bytes != sizeof(bufhead))
88 return NULL;
90 fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
92 oprofile_add_trace(bufhead[0].return_address);
94 /* frame pointers should strictly progress back up the stack
95 * (towards higher addresses) */
96 if (head >= fp)
97 return NULL;
99 return fp;
102 static inline int
103 x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
105 struct stack_frame_ia32 *head;
107 /* User process is 32-bit */
108 if (!current || !test_thread_flag(TIF_IA32))
109 return 0;
111 head = (struct stack_frame_ia32 *) regs->bp;
112 while (depth-- && head)
113 head = dump_user_backtrace_32(head);
115 return 1;
118 #else
119 static inline int
120 x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
122 return 0;
124 #endif /* CONFIG_COMPAT */
126 static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
128 /* Also check accessibility of one struct frame_head beyond: */
129 struct stack_frame bufhead[2];
130 unsigned long bytes;
132 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
133 if (bytes != sizeof(bufhead))
134 return NULL;
136 oprofile_add_trace(bufhead[0].return_address);
138 /* frame pointers should strictly progress back up the stack
139 * (towards higher addresses) */
140 if (head >= bufhead[0].next_frame)
141 return NULL;
143 return bufhead[0].next_frame;
146 void
147 x86_backtrace(struct pt_regs * const regs, unsigned int depth)
149 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
151 if (!user_mode_vm(regs)) {
152 unsigned long stack = kernel_stack_pointer(regs);
153 if (depth)
154 dump_trace(NULL, regs, (unsigned long *)stack, 0,
155 &backtrace_ops, &depth);
156 return;
159 if (x86_backtrace_32(regs, depth))
160 return;
162 while (depth-- && head)
163 head = dump_user_backtrace(head);