sky2: support ethtool set_phys_id
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / parisc / kernel / unwind.c
blob76ed62ed785b6f4be9c1a6e663101ff3daeb6aa7
1 /*
2 * Kernel unwinding support
4 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6 * Derived partially from the IA64 implementation. The PA-RISC
7 * Runtime Architecture Document is also a useful reference to
8 * understand what is happening here
9 */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sort.h>
18 #include <asm/uaccess.h>
19 #include <asm/assembly.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/ptrace.h>
23 #include <asm/unwind.h>
25 /* #define DEBUG 1 */
26 #ifdef DEBUG
27 #define dbg(x...) printk(x)
28 #else
29 #define dbg(x...)
30 #endif
32 #define KERNEL_START (KERNEL_BINARY_TEXT_START)
34 extern struct unwind_table_entry __start___unwind[];
35 extern struct unwind_table_entry __stop___unwind[];
37 static spinlock_t unwind_lock;
39 * the kernel unwind block is not dynamically allocated so that
40 * we can call unwind_init as early in the bootup process as
41 * possible (before the slab allocator is initialized)
43 static struct unwind_table kernel_unwind_table __read_mostly;
44 static LIST_HEAD(unwind_tables);
46 static inline const struct unwind_table_entry *
47 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
49 const struct unwind_table_entry *e = NULL;
50 unsigned long lo, hi, mid;
52 lo = 0;
53 hi = table->length - 1;
55 while (lo <= hi) {
56 mid = (hi - lo) / 2 + lo;
57 e = &table->table[mid];
58 if (addr < e->region_start)
59 hi = mid - 1;
60 else if (addr > e->region_end)
61 lo = mid + 1;
62 else
63 return e;
66 return NULL;
69 static const struct unwind_table_entry *
70 find_unwind_entry(unsigned long addr)
72 struct unwind_table *table;
73 const struct unwind_table_entry *e = NULL;
75 if (addr >= kernel_unwind_table.start &&
76 addr <= kernel_unwind_table.end)
77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 else
79 list_for_each_entry(table, &unwind_tables, list) {
80 if (addr >= table->start &&
81 addr <= table->end)
82 e = find_unwind_entry_in_table(table, addr);
83 if (e) {
84 /* Move-to-front to exploit common traces */
85 list_move(&table->list, &unwind_tables);
86 break;
90 return e;
93 static void
94 unwind_table_init(struct unwind_table *table, const char *name,
95 unsigned long base_addr, unsigned long gp,
96 void *table_start, void *table_end)
98 struct unwind_table_entry *start = table_start;
99 struct unwind_table_entry *end =
100 (struct unwind_table_entry *)table_end - 1;
102 table->name = name;
103 table->base_addr = base_addr;
104 table->gp = gp;
105 table->start = base_addr + start->region_start;
106 table->end = base_addr + end->region_end;
107 table->table = (struct unwind_table_entry *)table_start;
108 table->length = end - start + 1;
109 INIT_LIST_HEAD(&table->list);
111 for (; start <= end; start++) {
112 if (start < end &&
113 start->region_end > (start+1)->region_start) {
114 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
117 start->region_start += base_addr;
118 start->region_end += base_addr;
122 static int cmp_unwind_table_entry(const void *a, const void *b)
124 return ((const struct unwind_table_entry *)a)->region_start
125 - ((const struct unwind_table_entry *)b)->region_start;
128 static void
129 unwind_table_sort(struct unwind_table_entry *start,
130 struct unwind_table_entry *finish)
132 sort(start, finish - start, sizeof(struct unwind_table_entry),
133 cmp_unwind_table_entry, NULL);
136 struct unwind_table *
137 unwind_table_add(const char *name, unsigned long base_addr,
138 unsigned long gp,
139 void *start, void *end)
141 struct unwind_table *table;
142 unsigned long flags;
143 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
144 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
146 unwind_table_sort(s, e);
148 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
149 if (table == NULL)
150 return NULL;
151 unwind_table_init(table, name, base_addr, gp, start, end);
152 spin_lock_irqsave(&unwind_lock, flags);
153 list_add_tail(&table->list, &unwind_tables);
154 spin_unlock_irqrestore(&unwind_lock, flags);
156 return table;
159 void unwind_table_remove(struct unwind_table *table)
161 unsigned long flags;
163 spin_lock_irqsave(&unwind_lock, flags);
164 list_del(&table->list);
165 spin_unlock_irqrestore(&unwind_lock, flags);
167 kfree(table);
170 /* Called from setup_arch to import the kernel unwind info */
171 int unwind_init(void)
173 long start, stop;
174 register unsigned long gp __asm__ ("r27");
176 start = (long)&__start___unwind[0];
177 stop = (long)&__stop___unwind[0];
179 spin_lock_init(&unwind_lock);
181 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
182 start, stop,
183 (stop - start) / sizeof(struct unwind_table_entry));
185 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
186 gp,
187 &__start___unwind[0], &__stop___unwind[0]);
188 #if 0
190 int i;
191 for (i = 0; i < 10; i++)
193 printk("region 0x%x-0x%x\n",
194 __start___unwind[i].region_start,
195 __start___unwind[i].region_end);
198 #endif
199 return 0;
202 #ifdef CONFIG_64BIT
203 #define get_func_addr(fptr) fptr[2]
204 #else
205 #define get_func_addr(fptr) fptr[0]
206 #endif
208 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
210 extern void handle_interruption(int, struct pt_regs *);
211 static unsigned long *hi = (unsigned long *)&handle_interruption;
213 if (pc == get_func_addr(hi)) {
214 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
215 dbg("Unwinding through handle_interruption()\n");
216 info->prev_sp = regs->gr[30];
217 info->prev_ip = regs->iaoq[0];
219 return 1;
222 return 0;
225 static void unwind_frame_regs(struct unwind_frame_info *info)
227 const struct unwind_table_entry *e;
228 unsigned long npc;
229 unsigned int insn;
230 long frame_size = 0;
231 int looking_for_rp, rpoffset = 0;
233 e = find_unwind_entry(info->ip);
234 if (e == NULL) {
235 unsigned long sp;
236 extern char _stext[], _etext[];
238 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
240 #ifdef CONFIG_KALLSYMS
241 /* Handle some frequent special cases.... */
243 char symname[KSYM_NAME_LEN];
244 char *modname;
246 kallsyms_lookup(info->ip, NULL, NULL, &modname,
247 symname);
249 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
251 if (strcmp(symname, "_switch_to_ret") == 0) {
252 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
253 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
254 dbg("_switch_to_ret @ %lx - setting "
255 "prev_sp=%lx prev_ip=%lx\n",
256 info->ip, info->prev_sp,
257 info->prev_ip);
258 return;
259 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
260 strcmp(symname, "syscall_exit") == 0) {
261 info->prev_ip = info->prev_sp = 0;
262 return;
265 #endif
267 /* Since we are doing the unwinding blind, we don't know if
268 we are adjusting the stack correctly or extracting the rp
269 correctly. The rp is checked to see if it belongs to the
270 kernel text section, if not we assume we don't have a
271 correct stack frame and we continue to unwind the stack.
272 This is not quite correct, and will fail for loadable
273 modules. */
274 sp = info->sp & ~63;
275 do {
276 unsigned long tmp;
278 info->prev_sp = sp - 64;
279 info->prev_ip = 0;
280 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
281 break;
282 info->prev_ip = tmp;
283 sp = info->prev_sp;
284 } while (info->prev_ip < (unsigned long)_stext ||
285 info->prev_ip > (unsigned long)_etext);
287 info->rp = 0;
289 dbg("analyzing func @ %lx with no unwind info, setting "
290 "prev_sp=%lx prev_ip=%lx\n", info->ip,
291 info->prev_sp, info->prev_ip);
292 } else {
293 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
294 "Save_RP = %d, Millicode = %d size = %u\n",
295 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
296 e->Millicode, e->Total_frame_size);
298 looking_for_rp = e->Save_RP;
300 for (npc = e->region_start;
301 (frame_size < (e->Total_frame_size << 3) ||
302 looking_for_rp) &&
303 npc < info->ip;
304 npc += 4) {
306 insn = *(unsigned int *)npc;
308 if ((insn & 0xffffc000) == 0x37de0000 ||
309 (insn & 0xffe00000) == 0x6fc00000) {
310 /* ldo X(sp), sp, or stwm X,D(sp) */
311 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
312 ((insn & 0x3fff) >> 1);
313 dbg("analyzing func @ %lx, insn=%08x @ "
314 "%lx, frame_size = %ld\n", info->ip,
315 insn, npc, frame_size);
316 } else if ((insn & 0xffe00008) == 0x73c00008) {
317 /* std,ma X,D(sp) */
318 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
319 (((insn >> 4) & 0x3ff) << 3);
320 dbg("analyzing func @ %lx, insn=%08x @ "
321 "%lx, frame_size = %ld\n", info->ip,
322 insn, npc, frame_size);
323 } else if (insn == 0x6bc23fd9) {
324 /* stw rp,-20(sp) */
325 rpoffset = 20;
326 looking_for_rp = 0;
327 dbg("analyzing func @ %lx, insn=stw rp,"
328 "-20(sp) @ %lx\n", info->ip, npc);
329 } else if (insn == 0x0fc212c1) {
330 /* std rp,-16(sr0,sp) */
331 rpoffset = 16;
332 looking_for_rp = 0;
333 dbg("analyzing func @ %lx, insn=std rp,"
334 "-16(sp) @ %lx\n", info->ip, npc);
338 if (!unwind_special(info, e->region_start, frame_size)) {
339 info->prev_sp = info->sp - frame_size;
340 if (e->Millicode)
341 info->rp = info->r31;
342 else if (rpoffset)
343 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
344 info->prev_ip = info->rp;
345 info->rp = 0;
348 dbg("analyzing func @ %lx, setting prev_sp=%lx "
349 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
350 info->prev_ip, npc);
354 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
355 struct pt_regs *regs)
357 memset(info, 0, sizeof(struct unwind_frame_info));
358 info->t = t;
359 info->sp = regs->gr[30];
360 info->ip = regs->iaoq[0];
361 info->rp = regs->gr[2];
362 info->r31 = regs->gr[31];
364 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
365 t ? (int)t->pid : -1, info->sp, info->ip);
368 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
370 struct pt_regs *r = &t->thread.regs;
371 struct pt_regs *r2;
373 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
374 if (!r2)
375 return;
376 *r2 = *r;
377 r2->gr[30] = r->ksp;
378 r2->iaoq[0] = r->kpc;
379 unwind_frame_init(info, t, r2);
380 kfree(r2);
383 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
385 unwind_frame_init(info, current, regs);
388 int unwind_once(struct unwind_frame_info *next_frame)
390 unwind_frame_regs(next_frame);
392 if (next_frame->prev_sp == 0 ||
393 next_frame->prev_ip == 0)
394 return -1;
396 next_frame->sp = next_frame->prev_sp;
397 next_frame->ip = next_frame->prev_ip;
398 next_frame->prev_sp = 0;
399 next_frame->prev_ip = 0;
401 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
402 next_frame->t ? (int)next_frame->t->pid : -1,
403 next_frame->sp, next_frame->ip);
405 return 0;
408 int unwind_to_user(struct unwind_frame_info *info)
410 int ret;
412 do {
413 ret = unwind_once(info);
414 } while (!ret && !(info->ip & 3));
416 return ret;
419 unsigned long return_address(unsigned int level)
421 struct unwind_frame_info info;
422 struct pt_regs r;
423 unsigned long sp;
425 /* initialize unwind info */
426 asm volatile ("copy %%r30, %0" : "=r"(sp));
427 memset(&r, 0, sizeof(struct pt_regs));
428 r.iaoq[0] = (unsigned long) current_text_addr();
429 r.gr[2] = (unsigned long) __builtin_return_address(0);
430 r.gr[30] = sp;
431 unwind_frame_init(&info, current, &r);
433 /* unwind stack */
434 ++level;
435 do {
436 if (unwind_once(&info) < 0 || info.ip == 0)
437 return 0;
438 if (!__kernel_text_address(info.ip)) {
439 return 0;
441 } while (info.ip && level--);
443 return info.ip;