RTAS support for AROS.
[cake.git] / arch / ppc-chrp / efika / kernel / intr.c
blobac19bf7ce63f4faf741b8744b1b6b8b1d4ac5aea
1 /*
2 * intr.c
4 * Created on: Aug 26, 2008
5 * Author: misc
6 */
8 #include <inttypes.h>
9 #include <aros/kernel.h>
10 #include <aros/libcall.h>
11 #include <asm/mpc5200b.h>
12 #include <stddef.h>
13 #include <string.h>
15 #include <proto/exec.h>
16 #include <proto/kernel.h>
18 #include "kernel_intern.h"
19 #include "syscall.h"
21 extern void __tmpl_start();
22 extern uint32_t __tmpl_addr_lo;
23 extern uint32_t __tmpl_addr_hi;
24 extern uint32_t __tmpl_irq_num;
25 extern uint32_t __tmpl_length;
26 static void init_interrupt(uint8_t num, void *handler);
27 void __attribute__((noreturn)) program_handler(regs_t *ctx, uint8_t exception, void *self);
28 void __attribute__((noreturn)) generic_handler(regs_t *ctx, uint8_t exception, void *self);
29 void __attribute__((noreturn)) decrementer_handler(regs_t *ctx, uint8_t exception, void *self);
30 static void flush_cache(char *start, char *end);
32 AROS_LH0I(void, KrnCli,
33 struct KernelBase *, KernelBase, 9, Kernel)
35 AROS_LIBFUNC_INIT
37 asm volatile("li %%r3,%0; sc"::"i"(SC_CLI):"memory","r3");
39 AROS_LIBFUNC_EXIT
42 AROS_LH0I(void, KrnSti,
43 struct KernelBase *, KernelBase, 10, Kernel)
45 AROS_LIBFUNC_INIT
47 asm volatile("li %%r3,%0; sc"::"i"(SC_STI):"memory","r3");
49 AROS_LIBFUNC_EXIT
52 AROS_LH0I(void, KrnIsSuper,
53 struct KernelBase *, KernelBase, 12, Kernel)
55 AROS_LIBFUNC_INIT
57 register int retval asm ("r3");
59 asm volatile("sc":"=r"(retval):"0"(SC_ISSUPERSTATE):"memory");
61 return retval;
63 AROS_LIBFUNC_EXIT
67 AROS_LH4(void *, KrnAddExceptionHandler,
68 AROS_LHA(uint8_t, irq, D0),
69 AROS_LHA(void *, handler, A0),
70 AROS_LHA(void *, handlerData, A1),
71 AROS_LHA(void *, handlerData2, A2),
72 struct KernelBase *, KernelBase, 7, Kernel)
74 AROS_LIBFUNC_INIT
76 struct ExecBase *SysBase = getSysBase();
77 struct ExceptNode *handle = NULL;
78 D(bug("[KRN] KrnAddExceptionHandler(%02x, %012p, %012p, %012p):\n", irq, handler, handlerData, handlerData2));
80 if (irq < 21)
82 /* Go to supervisor mode */
83 goSuper();
85 handle = Allocate(KernelBase->kb_SupervisorMem, sizeof(struct ExceptNode));
86 D(bug("[KRN] handle=%012p\n", handle));
88 if (handle)
90 handle->in_Handler = handler;
91 handle->in_HandlerData = handlerData;
92 handle->in_HandlerData2 = handlerData2;
93 handle->in_type = it_exception;
94 handle->in_nr = irq;
96 Disable();
97 ADDHEAD(&KernelBase->kb_Exceptions[irq], &handle->in_Node);
98 Enable();
101 goUser();
104 return handle;
106 AROS_LIBFUNC_EXIT
109 AROS_LH1(void, KrnRemExceptionHandler,
110 AROS_LHA(void *, handle, A0),
111 struct KernelBase *, KernelBase, 8, Kernel)
113 AROS_LIBFUNC_INIT
115 struct ExecBase *SysBase = getSysBase();
116 struct ExceptNode *h = handle;
118 if (h && (h->in_type == it_exception))
120 goSuper();
122 Disable();
123 REMOVE(h);
124 Enable();
126 Deallocate(KernelBase->kb_SupervisorMem, h, sizeof(struct IntrNode));
128 goUser();
131 AROS_LIBFUNC_EXIT
136 AROS_LH4(void *, KrnAddIRQHandler,
137 AROS_LHA(uint8_t, irq, D0),
138 AROS_LHA(void *, handler, A0),
139 AROS_LHA(void *, handlerData, A1),
140 AROS_LHA(void *, handlerData2, A2),
141 struct KernelBase *, KernelBase, 7, Kernel)
143 AROS_LIBFUNC_INIT
145 struct ExecBase *SysBase = getSysBase();
146 struct IntrNode *handle = NULL;
147 D(bug("[KRN] KrnAddIRQHandler(%02x, %012p, %012p, %012p):\n", irq, handler, handlerData, handlerData2));
149 if (irq < 63)
151 /* Go to supervisor mode */
152 goSuper();
154 handle = Allocate(KernelBase->kb_SupervisorMem, sizeof(struct IntrNode));
155 D(bug("[KRN] handle=%012p\n", handle));
157 if (handle)
159 handle->in_Handler = handler;
160 handle->in_HandlerData = handlerData;
161 handle->in_HandlerData2 = handlerData2;
162 handle->in_type = it_interrupt;
163 handle->in_nr = irq;
165 Disable();
167 ADDHEAD(&KernelBase->kb_Interrupts[irq], &handle->in_Node);
169 ictl_enable_irq(irq);
171 Enable();
174 goUser();
177 return handle;
179 AROS_LIBFUNC_EXIT
182 AROS_LH1(void, KrnRemIRQHandler,
183 AROS_LHA(void *, handle, A0),
184 struct KernelBase *, KernelBase, 8, Kernel)
186 AROS_LIBFUNC_INIT
188 struct ExecBase *SysBase = getSysBase();
189 struct IntrNode *h = handle;
190 uint8_t irq = h->in_nr;
192 if (h && (h->in_type == it_interrupt))
194 goSuper();
196 Disable();
197 REMOVE(h);
198 if (IsListEmpty(&KernelBase->kb_Interrupts[irq]))
200 ictl_disable_irq(irq);
202 Enable();
204 Deallocate(KernelBase->kb_SupervisorMem, h, sizeof(struct IntrNode));
206 goUser();
209 AROS_LIBFUNC_EXIT
214 * G2 core and exceptions.
216 * The MPC5200B CPU just like most of the PowerPC family members, has two fixed
217 * locations for exception handlers: 0x0000_xxxx and 0xFFFF_xxxx. When an
218 * exception occurs, CPU calculates the entry address by shifting exception
219 * number by eight bits left, adds the base location and begins execution of the
220 * code at calculated address.
222 * For AROS it means that it has only 256 bytes (64 instructions) for an
223 * exception entry code. Therefore, just like all PowerPC operating systems,
224 * AROS performs a quick initialization of an exception, saves few registers,
225 * sets the exception number and determines the handler address and then it jumps
226 * to a common trampoline code. There, the rest of the CPU context is saved, MMU
227 * is activated and the handler routine written in C is called.
229 * Leaving the exception is performed through core_LeaveInterrupt() call which
230 * takes the CPU context as parameter.
232 * About MMU.
234 * There is some trouble related to MMU, location of exception handler and
235 * accessing data from there. PPC executes exception handlers in real mode, which
236 * means that the MMU translations are disabled completely. Therefore, going back
237 * to MMU-ed state is performed in two steps:
238 * 1. The MMU for DATA is turned on very early, because otherwise the exception
239 * handler wouldn't be able to access the supervisor stack
240 * 2. The MMU for CODE is turned on as late as possible. It happens when the C
241 * code is called. The Call is performed through the rfi instruction, which
242 * restores the old contents of MSR register (actually it is prepared by the
243 * asm part of exception handler) which in turn enables MMU for CODE.
246 extern uint32_t __vector_imiss;
247 extern uint32_t __vector_dmiss;
248 extern uint32_t __vector_dmissw;
250 void intr_init()
252 D(bug("[KRN] Initializing exception handlers\n"));
254 init_interrupt( 1, generic_handler); /* RESET */
255 init_interrupt( 2, generic_handler); /* Machine check */
256 init_interrupt( 3, mmu_handler); /* DSI */
257 init_interrupt( 4, mmu_handler); /* ISI */
258 init_interrupt( 5, ictl_handler); /* External Intr */
259 init_interrupt( 6, generic_handler); /* Alignment */
260 init_interrupt( 7, program_handler); /* Program */
261 init_interrupt( 8, generic_handler); /* Floating point unavailable */
262 init_interrupt( 9, decrementer_handler);/* Decrementer */
263 init_interrupt(10, generic_handler); /* critical exception */
264 init_interrupt(12, syscall_handler); /* Syscall */
265 init_interrupt(13, generic_handler); /* Trace */
266 init_interrupt(16, generic_handler); /* Instruction translation miss */
267 init_interrupt(17, generic_handler); /* Data load translation miss */
268 init_interrupt(18, generic_handler); /* Data store translation miss */
269 init_interrupt(19, generic_handler); /* Instruction address breakpoint */
270 init_interrupt(20, ictl_handler); /* SMI */
274 * Initializer of an exception handler. It copies the template code into proper
275 * location and adjust two exception-dependent elements in the code - the
276 * instruction which loads exception number: "li %r4,exception_number" and the
277 * two instructions which load the address of a handler:
278 * "lis %r5,handler_address@ha; la %r5, handler_address@l(%r5)"
280 * Once ready, data cache has to be flushed back into memory and the instruction
281 * cache has to be invalidated.
283 static void init_interrupt(uint8_t num, void *handler)
285 if (num > 0 && num < 0x2f)
287 intptr_t target = num << 8;
289 if (num == 16)
290 memcpy((void*)target, &__vector_imiss, 256);
291 else if (num == 17)
292 memcpy((void*)target, &__vector_dmiss, 256);
293 else if (num == 18)
294 memcpy((void*)target, &__vector_dmissw, 256);
295 else
297 memcpy((void*)target, __tmpl_start, __tmpl_length);
299 /* Fix the exception number */
300 *(uint16_t *)(target + __tmpl_irq_num) = num;
302 /* Fix the handler address */
303 *(uint16_t *)(target + __tmpl_addr_lo) = (intptr_t)handler & 0x0000ffff;
304 *(uint16_t *)(target + __tmpl_addr_hi) = (intptr_t)handler >> 16;
307 * Adjustment of the lower halfword of address is done through "la"
308 * instruction, which happens to be the same as addi:
310 * "la %reg1, offset(%reg2) <=> addi %reg1, %reg1, offset"
312 * If the offset is bigger then 32KB (thus seen by addi as a negative
313 * number), increase the upper halfword by one.
315 if ((intptr_t)handler & 0x00008000)
316 (*(uint16_t *)(target + __tmpl_addr_hi))++;
319 /* Flush the cache */
320 flush_cache((char*)target, (char*)target + 0xff);
324 /* Tiny routine to flush caches for a region of memory */
325 static void flush_cache(char *start, char *end)
327 start = (char*)((unsigned long)start & 0xffffffe0);
328 end = (char*)((unsigned long)end & 0xffffffe0);
329 char *ptr;
331 for (ptr = start; ptr < end; ptr +=32)
333 asm volatile("dcbst 0,%0"::"r"(ptr));
335 asm volatile("sync");
337 for (ptr = start; ptr < end; ptr +=32)
339 asm volatile("icbi 0,%0"::"r"(ptr));
342 asm volatile("sync; isync; ");
345 /* FPU handler */
346 void __attribute__((noreturn)) fpu_handler(context_t *ctx, uint8_t exception, void *self)
348 struct KernelBase *KernelBase = getKernelBase();
350 if (KernelBase)
352 if (!IsListEmpty(&KernelBase->kb_Exceptions[exception]))
354 struct ExceptNode *in, *intemp;
356 ForeachNodeSafe(&KernelBase->kb_Exceptions[exception], in, intemp)
359 * call every handler tied to this exception.
361 if (in->in_Handler)
362 in->in_Handler(ctx, in->in_HandlerData, in->in_HandlerData2);
367 core_LeaveInterrupt(ctx);
370 extern uint64_t tbu1;
371 extern uint64_t tbu2;
372 extern uint64_t last_calc;
373 extern uint64_t idle_time;
374 extern uint32_t cpu_usage;
375 extern struct Task *idle_task;
377 /* Decrementer handler */
378 void __attribute__((noreturn)) decrementer_handler(regs_t *ctx, uint8_t exception, void *self)
380 struct KernelBase *KernelBase = getKernelBase();
381 struct ExecBase *SysBase = getSysBase();
382 // static uint32_t cnt = 0;
384 asm volatile("mtdec %0"::"r"(33000000/100));
386 if (KernelBase)
388 if (!IsListEmpty(&KernelBase->kb_Exceptions[exception]))
390 struct ExceptNode *in, *intemp;
392 ForeachNodeSafe(&KernelBase->kb_Exceptions[exception], in, intemp)
395 * call every handler tied to this exception.
397 if (in->in_Handler)
398 in->in_Handler(ctx, in->in_HandlerData, in->in_HandlerData2);
403 if (SysBase && SysBase->Elapsed)
405 if (--SysBase->Elapsed == 0)
407 SysBase->SysFlags |= 0x2000;
408 SysBase->AttnResched |= 0x80;
412 /* CPU usage meter. it should not be here, actually */
413 uint64_t current = mftbu();
414 if (current - last_calc > 33000000)
416 uint32_t total_time = current - last_calc;
418 if (SysBase->ThisTask == idle_task)
420 tbu2 = mftbu();
421 idle_time += tbu2 - tbu1;
422 tbu1 = tbu2;
425 if (total_time < idle_time)
426 total_time=idle_time;
428 cpu_usage = 1000 - ((uint32_t)(idle_time))/(total_time /1000);
430 D(bug("[KRN] CPU usage: %3d.%d\n", cpu_usage / 10, cpu_usage % 10));
432 last_calc = current;
433 idle_time = 0;
436 core_ExitInterrupt(ctx);
439 /* Generic boring handler */
440 void __attribute__((noreturn)) program_handler(regs_t *ctx, uint8_t exception, void *self)
442 struct KernelBase *KernelBase = getKernelBase();
443 struct ExecBase *SysBase = getSysBase();
444 int handled = 0;
446 uint32_t insn = *(uint32_t *)ctx->srr0;
448 if ((insn & 0xfc1fffff) == 0x7c1442a6) /* mfspr sprg4 */
450 ctx->gpr[(insn >> 21) & 0x1f] = getKernelBase();
451 ctx->srr0 += 4;
452 core_LeaveInterrupt(ctx);
454 else if ((insn & 0xfc1fffff) == 0x7c1542a6) /* mfspr sprg5 */
456 ctx->gpr[(insn >> 21) & 0x1f] = getSysBase();
457 ctx->srr0 += 4;
458 core_LeaveInterrupt(ctx);
460 else if (insn == 0x7fe00008)
462 D(bug("[KRN] trap @ %08x (r3=%08x)\n", ctx->srr0, ctx->gpr[3]));
464 if (SysBase)
466 struct Task *t = FindTask(NULL);
467 D(bug("[KRN] %s %p (%s)\n", t->tc_Node.ln_Type == NT_TASK ? "Task":"Process", t, t->tc_Node.ln_Name ? t->tc_Node.ln_Name : "--unknown--"));
469 D(bug("[KRN] SRR0=%08x, SRR1=%08x\n",ctx->srr0, ctx->srr1));
470 D(bug("[KRN] CTR=%08x LR=%08x XER=%08x CCR=%08x\n", ctx->ctr, ctx->lr, ctx->xer, ctx->ccr));
471 D(bug("[KRN] DAR=%08x DSISR=%08x\n", ctx->dar, ctx->dsisr));
473 D(bug("[KRN] HASH1=%08x HASH2=%08x IMISS=%08x DMISS=%08x ICMP=%08x DCMP=%08x\n",
474 rdspr(978), rdspr(979), rdspr(980), rdspr(976), rdspr(981), rdspr(977)));
476 D(bug("[KRN] SPRG0=%08x SPRG1=%08x SPRG2=%08x SPRG3=%08x SPRG4=%08x SPRG5=%08x\n",
477 rdspr(SPRG0),rdspr(SPRG1),rdspr(SPRG2),rdspr(SPRG3),rdspr(SPRG4),rdspr(SPRG5)));
479 D(bug("[KRN] GPR00=%08x GPR01=%08x GPR02=%08x GPR03=%08x\n",
480 ctx->gpr[0],ctx->gpr[1],ctx->gpr[2],ctx->gpr[3]));
481 D(bug("[KRN] GPR04=%08x GPR05=%08x GPR06=%08x GPR07=%08x\n",
482 ctx->gpr[4],ctx->gpr[5],ctx->gpr[6],ctx->gpr[7]));
483 D(bug("[KRN] GPR08=%08x GPR09=%08x GPR10=%08x GPR11=%08x\n",
484 ctx->gpr[8],ctx->gpr[9],ctx->gpr[10],ctx->gpr[11]));
485 D(bug("[KRN] GPR12=%08x GPR13=%08x GPR14=%08x GPR15=%08x\n",
486 ctx->gpr[12],ctx->gpr[13],ctx->gpr[14],ctx->gpr[15]));
488 D(bug("[KRN] GPR16=%08x GPR17=%08x GPR18=%08x GPR19=%08x\n",
489 ctx->gpr[16],ctx->gpr[17],ctx->gpr[18],ctx->gpr[19]));
490 D(bug("[KRN] GPR20=%08x GPR21=%08x GPR22=%08x GPR23=%08x\n",
491 ctx->gpr[20],ctx->gpr[21],ctx->gpr[22],ctx->gpr[23]));
492 D(bug("[KRN] GPR24=%08x GPR25=%08x GPR26=%08x GPR27=%08x\n",
493 ctx->gpr[24],ctx->gpr[25],ctx->gpr[26],ctx->gpr[27]));
494 D(bug("[KRN] GPR28=%08x GPR29=%08x GPR30=%08x GPR31=%08x\n",
495 ctx->gpr[28],ctx->gpr[29],ctx->gpr[30],ctx->gpr[31]));
497 ctx->srr0 += 4;
498 core_LeaveInterrupt(ctx);
500 else
501 generic_handler(ctx, exception, self);
506 /* Generic boring handler */
507 void __attribute__((noreturn)) generic_handler(regs_t *ctx, uint8_t exception, void *self)
509 struct KernelBase *KernelBase = getKernelBase();
510 struct ExecBase *SysBase = getSysBase();
511 int handled = 0;
513 if (KernelBase)
515 if (!IsListEmpty(&KernelBase->kb_Exceptions[exception]))
517 struct ExceptNode *in, *intemp;
519 ForeachNodeSafe(&KernelBase->kb_Exceptions[exception], in, intemp)
522 * call every handler tied to this exception. If any of them
523 * returns a non-zero value, the exception is considered handled.
525 * If no handler will return zero, or there are no handlers at all,
526 * this generic handler will stop cpu.
528 if (in->in_Handler)
529 handled |= in->in_Handler(ctx, in->in_HandlerData, in->in_HandlerData2);
534 D(bug("[KRN] Exception %d handler. Context @ %p, SysBase @ %p, KernelBase @ %p\n", exception, ctx, SysBase, KernelBase));
535 if (SysBase)
537 struct Task *t = FindTask(NULL);
538 D(bug("[KRN] %s %p (%s)\n", t->tc_Node.ln_Type == NT_TASK ? "Task":"Process", t, t->tc_Node.ln_Name ? t->tc_Node.ln_Name : "--unknown--"));
540 D(bug("[KRN] SRR0=%08x, SRR1=%08x\n",ctx->srr0, ctx->srr1));
541 D(bug("[KRN] CTR=%08x LR=%08x XER=%08x CCR=%08x\n", ctx->ctr, ctx->lr, ctx->xer, ctx->ccr));
542 D(bug("[KRN] DAR=%08x DSISR=%08x\n", ctx->dar, ctx->dsisr));
544 D(bug("[KRN] HASH1=%08x HASH2=%08x IMISS=%08x DMISS=%08x ICMP=%08x DCMP=%08x\n",
545 rdspr(978), rdspr(979), rdspr(980), rdspr(976), rdspr(981), rdspr(977)));
547 D(bug("[KRN] SPRG0=%08x SPRG1=%08x SPRG2=%08x SPRG3=%08x SPRG4=%08x SPRG5=%08x\n",
548 rdspr(SPRG0),rdspr(SPRG1),rdspr(SPRG2),rdspr(SPRG3),rdspr(SPRG4),rdspr(SPRG5)));
550 D(bug("[KRN] GPR00=%08x GPR01=%08x GPR02=%08x GPR03=%08x\n",
551 ctx->gpr[0],ctx->gpr[1],ctx->gpr[2],ctx->gpr[3]));
552 D(bug("[KRN] GPR04=%08x GPR05=%08x GPR06=%08x GPR07=%08x\n",
553 ctx->gpr[4],ctx->gpr[5],ctx->gpr[6],ctx->gpr[7]));
554 D(bug("[KRN] GPR08=%08x GPR09=%08x GPR10=%08x GPR11=%08x\n",
555 ctx->gpr[8],ctx->gpr[9],ctx->gpr[10],ctx->gpr[11]));
556 D(bug("[KRN] GPR12=%08x GPR13=%08x GPR14=%08x GPR15=%08x\n",
557 ctx->gpr[12],ctx->gpr[13],ctx->gpr[14],ctx->gpr[15]));
559 D(bug("[KRN] GPR16=%08x GPR17=%08x GPR18=%08x GPR19=%08x\n",
560 ctx->gpr[16],ctx->gpr[17],ctx->gpr[18],ctx->gpr[19]));
561 D(bug("[KRN] GPR20=%08x GPR21=%08x GPR22=%08x GPR23=%08x\n",
562 ctx->gpr[20],ctx->gpr[21],ctx->gpr[22],ctx->gpr[23]));
563 D(bug("[KRN] GPR24=%08x GPR25=%08x GPR26=%08x GPR27=%08x\n",
564 ctx->gpr[24],ctx->gpr[25],ctx->gpr[26],ctx->gpr[27]));
565 D(bug("[KRN] GPR28=%08x GPR29=%08x GPR30=%08x GPR31=%08x\n",
566 ctx->gpr[28],ctx->gpr[29],ctx->gpr[30],ctx->gpr[31]));
568 D(bug("[KRN] Instruction dump:\n"));
569 int i;
570 ULONG *p = (ULONG*)ctx->srr0;
571 for (i=0; i < 8; i++)
573 D(bug("[KRN] %08x: %08x\n", &p[i], p[i]));
576 if (!handled)
578 D(bug("[KRN] **UNHANDLED EXCEPTION** stopping here...\n"));
580 while(1) {
581 wrmsr(rdmsr() | MSR_POW);
584 core_LeaveInterrupt(ctx);
588 * Template code for an exception handler. Packed into a static void function
589 * in order to make the assembler constrains usable.
591 static void __attribute__((used)) __exception_template()
593 asm volatile(".globl __tmpl_start; .type __tmpl_start,@function\n"
594 "__tmpl_start: \n"
595 " mtsprg1 %%r3 \n" /* save %r3 */
596 " mfcr %%r3 \n" /* copy CR to %r3 */
597 " mtsprg3 %%r3 \n" /* save %r3 */
599 " mfmsr %%r3 \n"
600 " ori %%r3,%%r3,%2 \n" /* Enable address translation for data */
601 " mtmsr %%r3 \n"
602 " sync; isync \n"
604 " mfsrr1 %%r3 \n" /* srr1 (previous MSR) reg into %r3 */
605 " andi. %%r3,%%r3,%0 \n" /* Was the PR bit set in MSR already? */
606 " beq- 1f \n" /* No, we were in supervisor mode */
608 " mfsprg0 %%r3 \n" /* user mode case: SSP into %r3 */
609 " b 2f \n"
610 "1: mr %%r3,%%r1 \n" /* Supervisor case: use current stack */
611 "2: addi %%r3,%%r3,%1 \n"
614 ::"i"(MSR_PR),"i"(-sizeof(context_t)),"i"(MSR_DS));
616 asm volatile(
617 " stw %%r0, %[gpr0](%%r3) \n" /* Store bunch of registers already. I could */
618 " stw %%r1, %[gpr1](%%r3) \n" /* do it in common trampoline code, but it */
619 " stw %%r2, %[gpr2](%%r3) \n" /* is much more sexy to do it here - this code */
620 " mfsprg1 %%r0 \n" /* occupies in theory ZERO bytes in memory */
621 " stw %%r4, %[gpr4](%%r3) \n" /* because the exception vector is 256 bytes long */
622 " stw %%r0, %[gpr3](%%r3) \n" /* and shouldn't be used to anything else than */
623 " stw %%r5, %[gpr5](%%r3) \n" /* exception handler anyway ;) */
624 " mfsprg3 %%r2 \n"
625 " mfsrr0 %%r0 \n"
626 " mfsrr1 %%r1 \n"
627 "__addr_hi: lis %%r5, 0xdeadbeef@ha\n" /* Load the address of an generic handler */
628 "__addr_lo: la %%r5, 0xdeadbeef@l(%%r5)\n" /* yes, load immediate sucks. Think about 64-bit PPC ;) */
629 "__irq_num: li %%r4, 0x5a5a \n" /* Load the exception number */
630 " stw %%r2,%[ccr](%%r3) \n"
631 " stw %%r0,%[srr0](%%r3) \n"
632 " stw %%r1,%[srr1](%%r3) \n"
633 " mfctr %%r0 \n"
634 " mflr %%r1 \n"
635 " mfxer %%r2 \n"
636 " stw %%r0,%[ctr](%%r3) \n"
637 " stw %%r1,%[lr](%%r3) \n"
638 " stw %%r2,%[xer](%%r3) \n"
641 ::[gpr0]"i"(offsetof(regs_t, gpr[0])),
642 [gpr1]"i"(offsetof(regs_t, gpr[1])),
643 [gpr2]"i"(offsetof(regs_t, gpr[2])),
644 [gpr3]"i"(offsetof(regs_t, gpr[3])),
645 [gpr4]"i"(offsetof(regs_t, gpr[4])),
646 [gpr5]"i"(offsetof(regs_t, gpr[5])),
647 [ccr]"i"(offsetof(regs_t, ccr)),
648 [srr0]"i"(offsetof(regs_t, srr0)),
649 [srr1]"i"(offsetof(regs_t, srr1)),
650 [ctr]"i"(offsetof(regs_t, ctr)),
651 [lr]"i"(offsetof(regs_t, lr)),
652 [xer]"i"(offsetof(regs_t, xer)));
655 * Registers %r0 to %r5 are now saved together with CPU state. Go to the
656 * trampoline code which will care about the rest. Adjust the stack frame pointer now,
657 * or else it will be destroyed later by C code.
659 asm volatile("addi %r1,%r3,-16");
662 * Go to the trampoline code. Use long call within whole 4GB addresspace in order to
663 * avoid any trouble in future. Moreover use the PHYSICAL address since at this stage
664 * MMU for code is still not running! If one would like to use MMU at this stage
665 * already, we would have to make region 0x00000000-0x00003000 *EXECUTABLE* :))
667 asm volatile( "lis %r2,(__EXCEPTION_Trampoline - " STR(KERNEL_VIRT_BASE) " + " STR(KERNEL_PHYS_BASE) ")@ha;"
668 "la %r2,(__EXCEPTION_Trampoline - " STR(KERNEL_VIRT_BASE) " + " STR(KERNEL_PHYS_BASE) ")@l(%r2); mtctr %r2;");
670 /* Jump to the trampoline code */
671 asm volatile("bctr;");
674 * Few variables: length of the code above and offsets used to fix the
675 * exception number and handler address.
677 asm volatile("__tmpl_length: .long . - __tmpl_start\n");
678 asm volatile("__tmpl_addr_lo: .long 2 + __addr_lo - __tmpl_start\n");
679 asm volatile("__tmpl_addr_hi: .long 2 + __addr_hi - __tmpl_start\n");
680 asm volatile("__tmpl_irq_num: .long 2 + __irq_num - __tmpl_start\n");
684 * Trampoline code is boring. It stores rest of the CPU context and prepares
685 * everything for execution of the C code.
687 * The only interesting part is the jump to C routine which is done throuhg the
688 * rfi instruction (return from interrupt). I do it so because this way I may
689 * enable the MMU for code and jump CPU to the desired address within one insn.
691 static void __attribute__((used)) __EXCEPTION_Trampoline_template()
693 asm volatile(".section .aros.init,\"ax\"\n\t.align 5\n\t.globl __EXCEPTION_Trampoline\n\t.type __EXCEPTION_Trampoline,@function\n"
694 "__EXCEPTION_Trampoline: \n\t"
695 "stw %%r6,%[gpr6](%%r3) \n\t"
696 "stw %%r7,%[gpr7](%%r3) \n\t"
697 "stw %%r8,%[gpr8](%%r3) \n\t"
698 "stw %%r9,%[gpr9](%%r3) \n\t"
699 "stw %%r10,%[gpr10](%%r3) \n\t"
700 "stw %%r11,%[gpr11](%%r3) \n\t"
701 "stw %%r12,%[gpr12](%%r3) \n\t"
702 "stw %%r13,%[gpr13](%%r3) \n\t"
703 "stw %%r14,%[gpr14](%%r3) \n\t"
704 "stw %%r15,%[gpr15](%%r3) \n\t"
705 "stw %%r16,%[gpr16](%%r3) \n\t"
706 "stw %%r17,%[gpr17](%%r3) \n\t"
707 "stw %%r18,%[gpr18](%%r3) \n\t"
708 "stw %%r19,%[gpr19](%%r3) \n\t"
709 "stw %%r20,%[gpr20](%%r3) \n\t"
710 "stw %%r21,%[gpr21](%%r3) \n\t"
711 "stw %%r22,%[gpr22](%%r3) \n\t"
712 "stw %%r23,%[gpr23](%%r3) \n\t"
713 "stw %%r24,%[gpr24](%%r3) \n\t"
714 "stw %%r25,%[gpr25](%%r3) \n\t"
715 "stw %%r26,%[gpr26](%%r3) \n\t"
716 "stw %%r27,%[gpr27](%%r3) \n\t"
717 "stw %%r28,%[gpr28](%%r3) \n\t"
718 "stw %%r29,%[gpr29](%%r3) \n\t"
719 "stw %%r30,%[gpr30](%%r3) \n\t"
720 "stw %%r31,%[gpr31](%%r3) \n\t"
722 [gpr6]"i"(offsetof(regs_t, gpr[6])),
723 [gpr7]"i"(offsetof(regs_t, gpr[7])),
724 [gpr8]"i"(offsetof(regs_t, gpr[8])),
725 [gpr9]"i"(offsetof(regs_t, gpr[9])),
726 [gpr10]"i"(offsetof(regs_t, gpr[10])),
727 [gpr11]"i"(offsetof(regs_t, gpr[11])),
728 [gpr12]"i"(offsetof(regs_t, gpr[12])),
729 [gpr13]"i"(offsetof(regs_t, gpr[13])),
730 [gpr14]"i"(offsetof(regs_t, gpr[14])),
731 [gpr15]"i"(offsetof(regs_t, gpr[15])),
732 [gpr16]"i"(offsetof(regs_t, gpr[16])),
733 [gpr17]"i"(offsetof(regs_t, gpr[17])),
734 [gpr18]"i"(offsetof(regs_t, gpr[18])),
735 [gpr19]"i"(offsetof(regs_t, gpr[19])),
736 [gpr20]"i"(offsetof(regs_t, gpr[20])),
737 [gpr21]"i"(offsetof(regs_t, gpr[21])),
738 [gpr22]"i"(offsetof(regs_t, gpr[22])),
739 [gpr23]"i"(offsetof(regs_t, gpr[23])),
740 [gpr24]"i"(offsetof(regs_t, gpr[24])),
741 [gpr25]"i"(offsetof(regs_t, gpr[25])),
742 [gpr26]"i"(offsetof(regs_t, gpr[26])),
743 [gpr27]"i"(offsetof(regs_t, gpr[27])),
744 [gpr28]"i"(offsetof(regs_t, gpr[28])),
745 [gpr29]"i"(offsetof(regs_t, gpr[29])),
746 [gpr30]"i"(offsetof(regs_t, gpr[30])),
747 [gpr31]"i"(offsetof(regs_t, gpr[31]))
749 // asm volatile(
750 // "mfmsr %%r0 \n\t"
751 // "ori %%r0,%%r0, %[msrval]@l \n\t"
752 // "mtmsr %%r0; isync \n\t"
753 // "mffs %%r0 \n\t"
754 // "stw %%r0,%[fpscr](%%r3) \n\t"
755 // "stfd %%f0,%[fr0](%%r3) \n\t"
756 // "stfd %%f1,%[fr1](%%r3) \n\t"
757 // "stfd %%f2,%[fr2](%%r3) \n\t"
758 // "stfd %%f3,%[fr3](%%r3) \n\t"
759 // "stfd %%f4,%[fr4](%%r3) \n\t"
760 // "stfd %%f5,%[fr5](%%r3) \n\t"
761 // "stfd %%f6,%[fr6](%%r3) \n\t"
762 // "stfd %%f7,%[fr7](%%r3) \n\t"
763 // "stfd %%f8,%[fr8](%%r3) \n\t"
764 // "stfd %%f9,%[fr9](%%r3) \n\t"
765 // "stfd %%f10,%[fr10](%%r3) \n\t"
766 // "stfd %%f11,%[fr11](%%r3) \n\t"
767 // "stfd %%f12,%[fr12](%%r3) \n\t"
768 // "stfd %%f13,%[fr13](%%r3) \n\t"
769 // "stfd %%f14,%[fr14](%%r3) \n\t"
770 // "stfd %%f15,%[fr15](%%r3) \n\t"
771 // ::
772 // [fpscr]"i"(offsetof(context_t, fpu.fpscr)),
773 // [fr0]"i"(offsetof(context_t, fpu.fpr[0])),
774 // [fr1]"i"(offsetof(context_t, fpu.fpr[1])),
775 // [fr2]"i"(offsetof(context_t, fpu.fpr[2])),
776 // [fr3]"i"(offsetof(context_t, fpu.fpr[3])),
777 // [fr4]"i"(offsetof(context_t, fpu.fpr[4])),
778 // [fr5]"i"(offsetof(context_t, fpu.fpr[5])),
779 // [fr6]"i"(offsetof(context_t, fpu.fpr[6])),
780 // [fr7]"i"(offsetof(context_t, fpu.fpr[7])),
781 // [fr8]"i"(offsetof(context_t, fpu.fpr[8])),
782 // [fr9]"i"(offsetof(context_t, fpu.fpr[9])),
783 // [fr10]"i"(offsetof(context_t, fpu.fpr[10])),
784 // [fr11]"i"(offsetof(context_t, fpu.fpr[11])),
785 // [fr12]"i"(offsetof(context_t, fpu.fpr[12])),
786 // [fr13]"i"(offsetof(context_t, fpu.fpr[13])),
787 // [fr14]"i"(offsetof(context_t, fpu.fpr[14])),
788 // [fr15]"i"(offsetof(context_t, fpu.fpr[15])),
789 // [msrval]"i"(MSR_FP)
790 // );
791 asm volatile(
792 // "stfd %%f16,%[fr16](%%r3) \n\t"
793 // "stfd %%f17,%[fr17](%%r3) \n\t"
794 // "stfd %%f18,%[fr18](%%r3) \n\t"
795 // "stfd %%f19,%[fr19](%%r3) \n\t"
796 // "stfd %%f20,%[fr20](%%r3) \n\t"
797 // "stfd %%f21,%[fr21](%%r3) \n\t"
798 // "stfd %%f22,%[fr22](%%r3) \n\t"
799 // "stfd %%f23,%[fr23](%%r3) \n\t"
800 // "stfd %%f24,%[fr24](%%r3) \n\t"
801 // "stfd %%f25,%[fr25](%%r3) \n\t"
802 // "stfd %%f26,%[fr26](%%r3) \n\t"
803 // "stfd %%f27,%[fr27](%%r3) \n\t"
804 // "stfd %%f28,%[fr28](%%r3) \n\t"
805 // "stfd %%f29,%[fr29](%%r3) \n\t"
806 // "stfd %%f30,%[fr30](%%r3) \n\t"
807 // "stfd %%f31,%[fr31](%%r3) \n\t"
808 "mr %%r28,%%r3 \n\t"
809 "mr %%r29,%%r4 \n\t"
810 "mr %%r30,%%r5 \n\t"
811 "mtsrr0 %%r5 \n\t"
812 "lis %%r9, %[msrval]@ha \n\t"
813 "ori %%r9,%%r9, %[msrval]@l \n\t"
814 "mtsrr1 %%r9 \n\t"
815 "sync; isync; rfi"
817 [fr16]"i"(offsetof(context_t, fpu.fpr[16])),
818 [fr17]"i"(offsetof(context_t, fpu.fpr[17])),
819 [fr18]"i"(offsetof(context_t, fpu.fpr[18])),
820 [fr19]"i"(offsetof(context_t, fpu.fpr[19])),
821 [fr20]"i"(offsetof(context_t, fpu.fpr[20])),
822 [fr21]"i"(offsetof(context_t, fpu.fpr[21])),
823 [fr22]"i"(offsetof(context_t, fpu.fpr[22])),
824 [fr23]"i"(offsetof(context_t, fpu.fpr[23])),
825 [fr24]"i"(offsetof(context_t, fpu.fpr[24])),
826 [fr25]"i"(offsetof(context_t, fpu.fpr[25])),
827 [fr26]"i"(offsetof(context_t, fpu.fpr[26])),
828 [fr27]"i"(offsetof(context_t, fpu.fpr[27])),
829 [fr28]"i"(offsetof(context_t, fpu.fpr[28])),
830 [fr29]"i"(offsetof(context_t, fpu.fpr[29])),
831 [fr30]"i"(offsetof(context_t, fpu.fpr[30])),
832 [fr31]"i"(offsetof(context_t, fpu.fpr[31])),
833 [msrval]"i"(MSR_ME|MSR_FP|MSR_IS|MSR_DS)
839 * Return from interrupt - restores the context passed as a parameter in %r3
840 * register.
842 static void __attribute__((used)) __core_LeaveInterrupt()
844 asm volatile(".section .aros.init,\"ax\"\n\t.align 5\n\t.globl core_LeaveInterrupt\n\t.type core_LeaveInterrupt,@function\n"
845 "core_LeaveInterrupt: \n\t"
846 "lwz %%r31,%[gpr31](%%r3) \n\t"
847 "lwz %%r30,%[gpr30](%%r3) \n\t"
848 "lwz %%r29,%[gpr29](%%r3) \n\t"
849 "lwz %%r28,%[gpr28](%%r3) \n\t"
850 "lwz %%r27,%[gpr27](%%r3) \n\t"
851 "lwz %%r26,%[gpr26](%%r3) \n\t"
852 "lwz %%r25,%[gpr25](%%r3) \n\t"
853 "lwz %%r24,%[gpr24](%%r3) \n\t"
854 "lwz %%r23,%[gpr23](%%r3) \n\t"
855 "lwz %%r22,%[gpr22](%%r3) \n\t"
856 "lwz %%r21,%[gpr21](%%r3) \n\t"
857 "lwz %%r20,%[gpr20](%%r3) \n\t"
858 "lwz %%r19,%[gpr19](%%r3) \n\t"
859 "lwz %%r18,%[gpr18](%%r3) \n\t"
860 "lwz %%r17,%[gpr17](%%r3) \n\t"
861 "lwz %%r16,%[gpr16](%%r3) \n\t"
862 "lwz %%r15,%[gpr15](%%r3) \n\t"
863 "lwz %%r14,%[gpr14](%%r3) \n\t"
864 "lwz %%r13,%[gpr13](%%r3) \n\t"
865 "lwz %%r12,%[gpr12](%%r3) \n\t"
867 [gpr12]"i"(offsetof(regs_t, gpr[12])),
868 [gpr13]"i"(offsetof(regs_t, gpr[13])),
869 [gpr14]"i"(offsetof(regs_t, gpr[14])),
870 [gpr15]"i"(offsetof(regs_t, gpr[15])),
871 [gpr16]"i"(offsetof(regs_t, gpr[16])),
872 [gpr17]"i"(offsetof(regs_t, gpr[17])),
873 [gpr18]"i"(offsetof(regs_t, gpr[18])),
874 [gpr19]"i"(offsetof(regs_t, gpr[19])),
875 [gpr20]"i"(offsetof(regs_t, gpr[20])),
876 [gpr21]"i"(offsetof(regs_t, gpr[21])),
877 [gpr22]"i"(offsetof(regs_t, gpr[22])),
878 [gpr23]"i"(offsetof(regs_t, gpr[23])),
879 [gpr24]"i"(offsetof(regs_t, gpr[24])),
880 [gpr25]"i"(offsetof(regs_t, gpr[25])),
881 [gpr26]"i"(offsetof(regs_t, gpr[26])),
882 [gpr27]"i"(offsetof(regs_t, gpr[27])),
883 [gpr28]"i"(offsetof(regs_t, gpr[28])),
884 [gpr29]"i"(offsetof(regs_t, gpr[29])),
885 [gpr30]"i"(offsetof(regs_t, gpr[30])),
886 [gpr31]"i"(offsetof(regs_t, gpr[31]))
889 // asm volatile(
890 // "lwz %%r0,%[fpscr](%%r3) \n\t"
891 // "mtfsf 255,%%r0 \n\t"
892 // "lfd %%f0,%[fr0](%%r3) \n\t"
893 // "lfd %%f1,%[fr1](%%r3) \n\t"
894 // "lfd %%f2,%[fr2](%%r3) \n\t"
895 // "lfd %%f3,%[fr3](%%r3) \n\t"
896 // "lfd %%f4,%[fr4](%%r3) \n\t"
897 // "lfd %%f5,%[fr5](%%r3) \n\t"
898 // "lfd %%f6,%[fr6](%%r3) \n\t"
899 // "lfd %%f7,%[fr7](%%r3) \n\t"
900 // "lfd %%f8,%[fr8](%%r3) \n\t"
901 // "lfd %%f9,%[fr9](%%r3) \n\t"
902 // "lfd %%f10,%[fr10](%%r3) \n\t"
903 // "lfd %%f11,%[fr11](%%r3) \n\t"
904 // "lfd %%f12,%[fr12](%%r3) \n\t"
905 // "lfd %%f13,%[fr13](%%r3) \n\t"
906 // "lfd %%f14,%[fr14](%%r3) \n\t"
907 // "lfd %%f15,%[fr15](%%r3) \n\t"
908 // ::
909 // [fpscr]"i"(offsetof(context_t, fpu.fpscr)),
910 // [fr0]"i"(offsetof(context_t, fpu.fpr[0])),
911 // [fr1]"i"(offsetof(context_t, fpu.fpr[1])),
912 // [fr2]"i"(offsetof(context_t, fpu.fpr[2])),
913 // [fr3]"i"(offsetof(context_t, fpu.fpr[3])),
914 // [fr4]"i"(offsetof(context_t, fpu.fpr[4])),
915 // [fr5]"i"(offsetof(context_t, fpu.fpr[5])),
916 // [fr6]"i"(offsetof(context_t, fpu.fpr[6])),
917 // [fr7]"i"(offsetof(context_t, fpu.fpr[7])),
918 // [fr8]"i"(offsetof(context_t, fpu.fpr[8])),
919 // [fr9]"i"(offsetof(context_t, fpu.fpr[9])),
920 // [fr10]"i"(offsetof(context_t, fpu.fpr[10])),
921 // [fr11]"i"(offsetof(context_t, fpu.fpr[11])),
922 // [fr12]"i"(offsetof(context_t, fpu.fpr[12])),
923 // [fr13]"i"(offsetof(context_t, fpu.fpr[13])),
924 // [fr14]"i"(offsetof(context_t, fpu.fpr[14])),
925 // [fr15]"i"(offsetof(context_t, fpu.fpr[15]))
926 // );
927 // asm volatile(
928 // "lfd %%f16,%[fr16](%%r3) \n\t"
929 // "lfd %%f17,%[fr17](%%r3) \n\t"
930 // "lfd %%f18,%[fr18](%%r3) \n\t"
931 // "lfd %%f19,%[fr19](%%r3) \n\t"
932 // "lfd %%f20,%[fr20](%%r3) \n\t"
933 // "lfd %%f21,%[fr21](%%r3) \n\t"
934 // "lfd %%f22,%[fr22](%%r3) \n\t"
935 // "lfd %%f23,%[fr23](%%r3) \n\t"
936 // "lfd %%f24,%[fr24](%%r3) \n\t"
937 // "lfd %%f25,%[fr25](%%r3) \n\t"
938 // "lfd %%f26,%[fr26](%%r3) \n\t"
939 // "lfd %%f27,%[fr27](%%r3) \n\t"
940 // "lfd %%f28,%[fr28](%%r3) \n\t"
941 // "lfd %%f29,%[fr29](%%r3) \n\t"
942 // "lfd %%f30,%[fr30](%%r3) \n\t"
943 // "lfd %%f31,%[fr31](%%r3) \n\t"
944 // ::
945 // [fr16]"i"(offsetof(context_t, fpu.fpr[16])),
946 // [fr17]"i"(offsetof(context_t, fpu.fpr[17])),
947 // [fr18]"i"(offsetof(context_t, fpu.fpr[18])),
948 // [fr19]"i"(offsetof(context_t, fpu.fpr[19])),
949 // [fr20]"i"(offsetof(context_t, fpu.fpr[20])),
950 // [fr21]"i"(offsetof(context_t, fpu.fpr[21])),
951 // [fr22]"i"(offsetof(context_t, fpu.fpr[22])),
952 // [fr23]"i"(offsetof(context_t, fpu.fpr[23])),
953 // [fr24]"i"(offsetof(context_t, fpu.fpr[24])),
954 // [fr25]"i"(offsetof(context_t, fpu.fpr[25])),
955 // [fr26]"i"(offsetof(context_t, fpu.fpr[26])),
956 // [fr27]"i"(offsetof(context_t, fpu.fpr[27])),
957 // [fr28]"i"(offsetof(context_t, fpu.fpr[28])),
958 // [fr29]"i"(offsetof(context_t, fpu.fpr[29])),
959 // [fr30]"i"(offsetof(context_t, fpu.fpr[30])),
960 // [fr31]"i"(offsetof(context_t, fpu.fpr[31]))
961 // );
964 asm volatile(
965 "lwz %%r11,%[gpr11](%%r3) \n\t"
966 "lwz %%r0,%[srr0](%%r3) \n\t"
967 "mtsrr0 %%r0 \n\t"
968 "lwz %%r0,%[srr1](%%r3) \n\t"
969 "rlwinm %%r0,%%r0,0,14,12 \n\t"
970 "mtsrr1 %%r0 \n\t"
971 "lwz %%r0,%[ctr](%%r3) \n\t"
972 "mtctr %%r0 \n\t"
973 "lwz %%r0,%[lr](%%r3) \n\t"
974 "mtlr %%r0 \n\t"
975 "lwz %%r0,%[xer](%%r3) \n\t"
976 "mtxer %%r0 \n\t"
977 "lwz %%r10,%[gpr10](%%r3) \n\t"
978 "lwz %%r9,%[gpr9](%%r3) \n\t"
979 "lwz %%r8,%[gpr8](%%r3) \n\t"
980 "lwz %%r7,%[gpr7](%%r3) \n\t"
981 "lwz %%r6,%[gpr6](%%r3) \n\t"
982 "lwz %%r5,%[gpr5](%%r3) \n\t"
983 "lwz %%r4,%[gpr4](%%r3) \n\t"
984 "lwz %%r0,%[gpr3](%%r3) \n\t"
985 "mtsprg1 %%r0 \n\t"
986 "lwz %%r2,%[gpr2](%%r3) \n\t"
987 "stwcx. %%r0,0,%%r1 \n\t"
988 "lwz %%r0,%[ccr](%%r3) \n\t"
989 "mtcr %%r0 \n\t"
990 "lwz %%r1,%[gpr1](%%r3) \n\t"
991 "lwz %%r0,%[gpr0](%%r3) \n\t"
992 "mfsprg1 %%r3 \n\t"
993 "sync; isync; rfi"
995 [ccr]"i"(offsetof(regs_t, ccr)), /* */
996 [srr0]"i"(offsetof(regs_t, srr0)), /* */
997 [srr1]"i"(offsetof(regs_t, srr1)),/* */
998 [ctr]"i"(offsetof(regs_t, ctr)),/**/
999 [lr]"i"(offsetof(regs_t, lr)),/**/
1000 [xer]"i"(offsetof(regs_t, xer)),
1001 [gpr0]"i"(offsetof(regs_t, gpr[0])),
1002 [gpr1]"i"(offsetof(regs_t, gpr[1])),
1003 [gpr2]"i"(offsetof(regs_t, gpr[2])),
1004 [gpr3]"i"(offsetof(regs_t, gpr[3])),
1005 [gpr4]"i"(offsetof(regs_t, gpr[4])),
1006 [gpr5]"i"(offsetof(regs_t, gpr[5])),
1007 [gpr6]"i"(offsetof(regs_t, gpr[6])),
1008 [gpr7]"i"(offsetof(regs_t, gpr[7])),
1009 [gpr8]"i"(offsetof(regs_t, gpr[8])),
1010 [gpr9]"i"(offsetof(regs_t, gpr[9])),
1011 [gpr10]"i"(offsetof(regs_t, gpr[10])),
1012 [gpr11]"i"(offsetof(regs_t, gpr[11]))