DMAENGINE: ste_dma40: removed a few magic numbers
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / parisc / kernel / unwind.c
blobd58eac1a8288af9c07ef776faa9b29d6b1890a2e
1 /*
2 * Kernel unwinding support
4 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6 * Derived partially from the IA64 implementation. The PA-RISC
7 * Runtime Architecture Document is also a useful reference to
8 * understand what is happening here
9 */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sort.h>
18 #include <asm/uaccess.h>
19 #include <asm/assembly.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/ptrace.h>
23 #include <asm/unwind.h>
25 /* #define DEBUG 1 */
26 #ifdef DEBUG
27 #define dbg(x...) printk(x)
28 #else
29 #define dbg(x...)
30 #endif
32 #define KERNEL_START (KERNEL_BINARY_TEXT_START)
34 extern struct unwind_table_entry __start___unwind[];
35 extern struct unwind_table_entry __stop___unwind[];
37 static spinlock_t unwind_lock;
39 * the kernel unwind block is not dynamically allocated so that
40 * we can call unwind_init as early in the bootup process as
41 * possible (before the slab allocator is initialized)
43 static struct unwind_table kernel_unwind_table __read_mostly;
44 static LIST_HEAD(unwind_tables);
46 static inline const struct unwind_table_entry *
47 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
49 const struct unwind_table_entry *e = NULL;
50 unsigned long lo, hi, mid;
52 lo = 0;
53 hi = table->length - 1;
55 while (lo <= hi) {
56 mid = (hi - lo) / 2 + lo;
57 e = &table->table[mid];
58 if (addr < e->region_start)
59 hi = mid - 1;
60 else if (addr > e->region_end)
61 lo = mid + 1;
62 else
63 return e;
66 return NULL;
69 static const struct unwind_table_entry *
70 find_unwind_entry(unsigned long addr)
72 struct unwind_table *table;
73 const struct unwind_table_entry *e = NULL;
75 if (addr >= kernel_unwind_table.start &&
76 addr <= kernel_unwind_table.end)
77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 else
79 list_for_each_entry(table, &unwind_tables, list) {
80 if (addr >= table->start &&
81 addr <= table->end)
82 e = find_unwind_entry_in_table(table, addr);
83 if (e)
84 break;
87 return e;
90 static void
91 unwind_table_init(struct unwind_table *table, const char *name,
92 unsigned long base_addr, unsigned long gp,
93 void *table_start, void *table_end)
95 struct unwind_table_entry *start = table_start;
96 struct unwind_table_entry *end =
97 (struct unwind_table_entry *)table_end - 1;
99 table->name = name;
100 table->base_addr = base_addr;
101 table->gp = gp;
102 table->start = base_addr + start->region_start;
103 table->end = base_addr + end->region_end;
104 table->table = (struct unwind_table_entry *)table_start;
105 table->length = end - start + 1;
106 INIT_LIST_HEAD(&table->list);
108 for (; start <= end; start++) {
109 if (start < end &&
110 start->region_end > (start+1)->region_start) {
111 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
114 start->region_start += base_addr;
115 start->region_end += base_addr;
119 static int cmp_unwind_table_entry(const void *a, const void *b)
121 return ((const struct unwind_table_entry *)a)->region_start
122 - ((const struct unwind_table_entry *)b)->region_start;
125 static void
126 unwind_table_sort(struct unwind_table_entry *start,
127 struct unwind_table_entry *finish)
129 sort(start, finish - start, sizeof(struct unwind_table_entry),
130 cmp_unwind_table_entry, NULL);
133 struct unwind_table *
134 unwind_table_add(const char *name, unsigned long base_addr,
135 unsigned long gp,
136 void *start, void *end)
138 struct unwind_table *table;
139 unsigned long flags;
140 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
141 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
143 unwind_table_sort(s, e);
145 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
146 if (table == NULL)
147 return NULL;
148 unwind_table_init(table, name, base_addr, gp, start, end);
149 spin_lock_irqsave(&unwind_lock, flags);
150 list_add_tail(&table->list, &unwind_tables);
151 spin_unlock_irqrestore(&unwind_lock, flags);
153 return table;
156 void unwind_table_remove(struct unwind_table *table)
158 unsigned long flags;
160 spin_lock_irqsave(&unwind_lock, flags);
161 list_del(&table->list);
162 spin_unlock_irqrestore(&unwind_lock, flags);
164 kfree(table);
167 /* Called from setup_arch to import the kernel unwind info */
168 int unwind_init(void)
170 long start, stop;
171 register unsigned long gp __asm__ ("r27");
173 start = (long)&__start___unwind[0];
174 stop = (long)&__stop___unwind[0];
176 spin_lock_init(&unwind_lock);
178 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
179 start, stop,
180 (stop - start) / sizeof(struct unwind_table_entry));
182 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
183 gp,
184 &__start___unwind[0], &__stop___unwind[0]);
185 #if 0
187 int i;
188 for (i = 0; i < 10; i++)
190 printk("region 0x%x-0x%x\n",
191 __start___unwind[i].region_start,
192 __start___unwind[i].region_end);
195 #endif
196 return 0;
199 #ifdef CONFIG_64BIT
200 #define get_func_addr(fptr) fptr[2]
201 #else
202 #define get_func_addr(fptr) fptr[0]
203 #endif
205 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
207 extern void handle_interruption(int, struct pt_regs *);
208 static unsigned long *hi = (unsigned long *)&handle_interruption;
210 if (pc == get_func_addr(hi)) {
211 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
212 dbg("Unwinding through handle_interruption()\n");
213 info->prev_sp = regs->gr[30];
214 info->prev_ip = regs->iaoq[0];
216 return 1;
219 return 0;
222 static void unwind_frame_regs(struct unwind_frame_info *info)
224 const struct unwind_table_entry *e;
225 unsigned long npc;
226 unsigned int insn;
227 long frame_size = 0;
228 int looking_for_rp, rpoffset = 0;
230 e = find_unwind_entry(info->ip);
231 if (e == NULL) {
232 unsigned long sp;
233 extern char _stext[], _etext[];
235 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
237 #ifdef CONFIG_KALLSYMS
238 /* Handle some frequent special cases.... */
240 char symname[KSYM_NAME_LEN];
241 char *modname;
243 kallsyms_lookup(info->ip, NULL, NULL, &modname,
244 symname);
246 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
248 if (strcmp(symname, "_switch_to_ret") == 0) {
249 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
250 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
251 dbg("_switch_to_ret @ %lx - setting "
252 "prev_sp=%lx prev_ip=%lx\n",
253 info->ip, info->prev_sp,
254 info->prev_ip);
255 return;
256 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
257 strcmp(symname, "syscall_exit") == 0) {
258 info->prev_ip = info->prev_sp = 0;
259 return;
262 #endif
264 /* Since we are doing the unwinding blind, we don't know if
265 we are adjusting the stack correctly or extracting the rp
266 correctly. The rp is checked to see if it belongs to the
267 kernel text section, if not we assume we don't have a
268 correct stack frame and we continue to unwind the stack.
269 This is not quite correct, and will fail for loadable
270 modules. */
271 sp = info->sp & ~63;
272 do {
273 unsigned long tmp;
275 info->prev_sp = sp - 64;
276 info->prev_ip = 0;
277 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
278 break;
279 info->prev_ip = tmp;
280 sp = info->prev_sp;
281 } while (info->prev_ip < (unsigned long)_stext ||
282 info->prev_ip > (unsigned long)_etext);
284 info->rp = 0;
286 dbg("analyzing func @ %lx with no unwind info, setting "
287 "prev_sp=%lx prev_ip=%lx\n", info->ip,
288 info->prev_sp, info->prev_ip);
289 } else {
290 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
291 "Save_RP = %d, Millicode = %d size = %u\n",
292 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
293 e->Millicode, e->Total_frame_size);
295 looking_for_rp = e->Save_RP;
297 for (npc = e->region_start;
298 (frame_size < (e->Total_frame_size << 3) ||
299 looking_for_rp) &&
300 npc < info->ip;
301 npc += 4) {
303 insn = *(unsigned int *)npc;
305 if ((insn & 0xffffc000) == 0x37de0000 ||
306 (insn & 0xffe00000) == 0x6fc00000) {
307 /* ldo X(sp), sp, or stwm X,D(sp) */
308 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
309 ((insn & 0x3fff) >> 1);
310 dbg("analyzing func @ %lx, insn=%08x @ "
311 "%lx, frame_size = %ld\n", info->ip,
312 insn, npc, frame_size);
313 } else if ((insn & 0xffe00008) == 0x73c00008) {
314 /* std,ma X,D(sp) */
315 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
316 (((insn >> 4) & 0x3ff) << 3);
317 dbg("analyzing func @ %lx, insn=%08x @ "
318 "%lx, frame_size = %ld\n", info->ip,
319 insn, npc, frame_size);
320 } else if (insn == 0x6bc23fd9) {
321 /* stw rp,-20(sp) */
322 rpoffset = 20;
323 looking_for_rp = 0;
324 dbg("analyzing func @ %lx, insn=stw rp,"
325 "-20(sp) @ %lx\n", info->ip, npc);
326 } else if (insn == 0x0fc212c1) {
327 /* std rp,-16(sr0,sp) */
328 rpoffset = 16;
329 looking_for_rp = 0;
330 dbg("analyzing func @ %lx, insn=std rp,"
331 "-16(sp) @ %lx\n", info->ip, npc);
335 if (!unwind_special(info, e->region_start, frame_size)) {
336 info->prev_sp = info->sp - frame_size;
337 if (e->Millicode)
338 info->rp = info->r31;
339 else if (rpoffset)
340 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
341 info->prev_ip = info->rp;
342 info->rp = 0;
345 dbg("analyzing func @ %lx, setting prev_sp=%lx "
346 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
347 info->prev_ip, npc);
351 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
352 struct pt_regs *regs)
354 memset(info, 0, sizeof(struct unwind_frame_info));
355 info->t = t;
356 info->sp = regs->gr[30];
357 info->ip = regs->iaoq[0];
358 info->rp = regs->gr[2];
359 info->r31 = regs->gr[31];
361 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
362 t ? (int)t->pid : -1, info->sp, info->ip);
365 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
367 struct pt_regs *r = &t->thread.regs;
368 struct pt_regs *r2;
370 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
371 if (!r2)
372 return;
373 *r2 = *r;
374 r2->gr[30] = r->ksp;
375 r2->iaoq[0] = r->kpc;
376 unwind_frame_init(info, t, r2);
377 kfree(r2);
380 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
382 unwind_frame_init(info, current, regs);
385 int unwind_once(struct unwind_frame_info *next_frame)
387 unwind_frame_regs(next_frame);
389 if (next_frame->prev_sp == 0 ||
390 next_frame->prev_ip == 0)
391 return -1;
393 next_frame->sp = next_frame->prev_sp;
394 next_frame->ip = next_frame->prev_ip;
395 next_frame->prev_sp = 0;
396 next_frame->prev_ip = 0;
398 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
399 next_frame->t ? (int)next_frame->t->pid : -1,
400 next_frame->sp, next_frame->ip);
402 return 0;
405 int unwind_to_user(struct unwind_frame_info *info)
407 int ret;
409 do {
410 ret = unwind_once(info);
411 } while (!ret && !(info->ip & 3));
413 return ret;
416 unsigned long return_address(unsigned int level)
418 struct unwind_frame_info info;
419 struct pt_regs r;
420 unsigned long sp;
422 /* initialize unwind info */
423 asm volatile ("copy %%r30, %0" : "=r"(sp));
424 memset(&r, 0, sizeof(struct pt_regs));
425 r.iaoq[0] = (unsigned long) current_text_addr();
426 r.gr[2] = (unsigned long) __builtin_return_address(0);
427 r.gr[30] = sp;
428 unwind_frame_init(&info, current, &r);
430 /* unwind stack */
431 ++level;
432 do {
433 if (unwind_once(&info) < 0 || info.ip == 0)
434 return 0;
435 if (!__kernel_text_address(info.ip)) {
436 return 0;
438 } while (info.ip && level--);
440 return info.ip;