Use the emegring common part also for Efika
[AROS.git] / arch / ppc-chrp / efika / kernel / intr.c
bloba979833f2a5f99f9c9f02cd349c452862aa0790d
1 /*
2 * intr.c
4 * Created on: Aug 26, 2008
5 * Author: misc
6 */
8 #include <inttypes.h>
9 #include <aros/kernel.h>
10 #include <aros/libcall.h>
11 #include <asm/mpc5200b.h>
12 #include <stddef.h>
13 #include <string.h>
15 #include <proto/exec.h>
16 #include <proto/kernel.h>
18 #include "kernel_intern.h"
19 #include "syscall.h"
21 extern void __tmpl_start();
22 extern uint32_t __tmpl_addr_lo;
23 extern uint32_t __tmpl_addr_hi;
24 extern uint32_t __tmpl_irq_num;
25 extern uint32_t __tmpl_length;
26 static void init_interrupt(uint8_t num, void *handler);
27 void __attribute__((noreturn)) program_handler(regs_t *ctx, uint8_t exception, void *self);
28 void __attribute__((noreturn)) generic_handler(regs_t *ctx, uint8_t exception, void *self);
29 void __attribute__((noreturn)) decrementer_handler(regs_t *ctx, uint8_t exception, void *self);
30 static void flush_cache(char *start, char *end);
32 AROS_LH4(void *, KrnAddExceptionHandler,
33 AROS_LHA(uint8_t, irq, D0),
34 AROS_LHA(void *, handler, A0),
35 AROS_LHA(void *, handlerData, A1),
36 AROS_LHA(void *, handlerData2, A2),
37 struct KernelBase *, KernelBase, 14, Kernel)
39 AROS_LIBFUNC_INIT
41 struct ExecBase *SysBase = getSysBase();
42 struct ExceptNode *handle = NULL;
43 D(bug("[KRN] KrnAddExceptionHandler(%02x, %012p, %012p, %012p):\n", irq, handler, handlerData, handlerData2));
45 if (irq < 21)
47 /* Go to supervisor mode */
48 goSuper();
50 handle = Allocate(KernelBase->kb_SupervisorMem, sizeof(struct ExceptNode));
51 D(bug("[KRN] handle=%012p\n", handle));
53 if (handle)
55 handle->in_Handler = handler;
56 handle->in_HandlerData = handlerData;
57 handle->in_HandlerData2 = handlerData2;
58 handle->in_type = it_exception;
59 handle->in_nr = irq;
61 Disable();
62 ADDHEAD(&KernelBase->kb_Exceptions[irq], &handle->in_Node);
63 Enable();
66 goUser();
69 return handle;
71 AROS_LIBFUNC_EXIT
74 AROS_LH1(void, KrnRemExceptionHandler,
75 AROS_LHA(void *, handle, A0),
76 struct KernelBase *, KernelBase, 15, Kernel)
78 AROS_LIBFUNC_INIT
80 struct ExecBase *SysBase = getSysBase();
81 struct ExceptNode *h = handle;
83 if (h && (h->in_type == it_exception))
85 goSuper();
87 Disable();
88 REMOVE(h);
89 Enable();
91 Deallocate(KernelBase->kb_SupervisorMem, h, sizeof(struct IntrNode));
93 goUser();
96 AROS_LIBFUNC_EXIT
101 AROS_LH4(void *, KrnAddIRQHandler,
102 AROS_LHA(uint8_t, irq, D0),
103 AROS_LHA(void *, handler, A0),
104 AROS_LHA(void *, handlerData, A1),
105 AROS_LHA(void *, handlerData2, A2),
106 struct KernelBase *, KernelBase, 7, Kernel)
108 AROS_LIBFUNC_INIT
110 struct ExecBase *SysBase = getSysBase();
111 struct IntrNode *handle = NULL;
112 D(bug("[KRN] KrnAddIRQHandler(%02x, %012p, %012p, %012p):\n", irq, handler, handlerData, handlerData2));
114 if (irq < 63)
116 /* Go to supervisor mode */
117 goSuper();
119 handle = Allocate(KernelBase->kb_SupervisorMem, sizeof(struct IntrNode));
120 D(bug("[KRN] handle=%012p\n", handle));
122 if (handle)
124 handle->in_Handler = handler;
125 handle->in_HandlerData = handlerData;
126 handle->in_HandlerData2 = handlerData2;
127 handle->in_type = it_interrupt;
128 handle->in_nr = irq;
130 Disable();
132 ADDHEAD(&KernelBase->kb_Interrupts[irq], &handle->in_Node);
134 ictl_enable_irq(irq);
136 Enable();
139 goUser();
142 return handle;
144 AROS_LIBFUNC_EXIT
147 AROS_LH1(void, KrnRemIRQHandler,
148 AROS_LHA(void *, handle, A0),
149 struct KernelBase *, KernelBase, 8, Kernel)
151 AROS_LIBFUNC_INIT
153 struct ExecBase *SysBase = getSysBase();
154 struct IntrNode *h = handle;
155 uint8_t irq = h->in_nr;
157 if (h && (h->in_type == it_interrupt))
159 goSuper();
161 Disable();
162 REMOVE(h);
163 if (IsListEmpty(&KernelBase->kb_Interrupts[irq]))
165 ictl_disable_irq(irq);
167 Enable();
169 Deallocate(KernelBase->kb_SupervisorMem, h, sizeof(struct IntrNode));
171 goUser();
174 AROS_LIBFUNC_EXIT
179 * G2 core and exceptions.
181 * The MPC5200B CPU just like most of the PowerPC family members, has two fixed
182 * locations for exception handlers: 0x0000_xxxx and 0xFFFF_xxxx. When an
183 * exception occurs, CPU calculates the entry address by shifting exception
184 * number by eight bits left, adds the base location and begins execution of the
185 * code at calculated address.
187 * For AROS it means that it has only 256 bytes (64 instructions) for an
188 * exception entry code. Therefore, just like all PowerPC operating systems,
189 * AROS performs a quick initialization of an exception, saves few registers,
190 * sets the exception number and determines the handler address and then it jumps
191 * to a common trampoline code. There, the rest of the CPU context is saved, MMU
192 * is activated and the handler routine written in C is called.
194 * Leaving the exception is performed through core_LeaveInterrupt() call which
195 * takes the CPU context as parameter.
197 * About MMU.
199 * There is some trouble related to MMU, location of exception handler and
200 * accessing data from there. PPC executes exception handlers in real mode, which
201 * means that the MMU translations are disabled completely. Therefore, going back
202 * to MMU-ed state is performed in two steps:
203 * 1. The MMU for DATA is turned on very early, because otherwise the exception
204 * handler wouldn't be able to access the supervisor stack
205 * 2. The MMU for CODE is turned on as late as possible. It happens when the C
206 * code is called. The Call is performed through the rfi instruction, which
207 * restores the old contents of MSR register (actually it is prepared by the
208 * asm part of exception handler) which in turn enables MMU for CODE.
211 extern uint32_t __vector_imiss;
212 extern uint32_t __vector_dmiss;
213 extern uint32_t __vector_dmissw;
215 void intr_init()
217 D(bug("[KRN] Initializing exception handlers\n"));
219 init_interrupt( 1, generic_handler); /* RESET */
220 init_interrupt( 2, generic_handler); /* Machine check */
221 init_interrupt( 3, mmu_handler); /* DSI */
222 init_interrupt( 4, mmu_handler); /* ISI */
223 init_interrupt( 5, ictl_handler); /* External Intr */
224 init_interrupt( 6, generic_handler); /* Alignment */
225 init_interrupt( 7, program_handler); /* Program */
226 init_interrupt( 8, generic_handler); /* Floating point unavailable */
227 init_interrupt( 9, decrementer_handler);/* Decrementer */
228 init_interrupt(10, generic_handler); /* critical exception */
229 init_interrupt(12, syscall_handler); /* Syscall */
230 init_interrupt(13, generic_handler); /* Trace */
231 init_interrupt(16, generic_handler); /* Instruction translation miss */
232 init_interrupt(17, generic_handler); /* Data load translation miss */
233 init_interrupt(18, generic_handler); /* Data store translation miss */
234 init_interrupt(19, generic_handler); /* Instruction address breakpoint */
235 init_interrupt(20, ictl_handler); /* SMI */
239 * Initializer of an exception handler. It copies the template code into proper
240 * location and adjust two exception-dependent elements in the code - the
241 * instruction which loads exception number: "li %r4,exception_number" and the
242 * two instructions which load the address of a handler:
243 * "lis %r5,handler_address@ha; la %r5, handler_address@l(%r5)"
245 * Once ready, data cache has to be flushed back into memory and the instruction
246 * cache has to be invalidated.
248 static void init_interrupt(uint8_t num, void *handler)
250 if (num > 0 && num < 0x2f)
252 intptr_t target = num << 8;
254 if (num == 16)
255 memcpy((void*)target, &__vector_imiss, 256);
256 else if (num == 17)
257 memcpy((void*)target, &__vector_dmiss, 256);
258 else if (num == 18)
259 memcpy((void*)target, &__vector_dmissw, 256);
260 else
262 memcpy((void*)target, __tmpl_start, __tmpl_length);
264 /* Fix the exception number */
265 *(uint16_t *)(target + __tmpl_irq_num) = num;
267 /* Fix the handler address */
268 *(uint16_t *)(target + __tmpl_addr_lo) = (intptr_t)handler & 0x0000ffff;
269 *(uint16_t *)(target + __tmpl_addr_hi) = (intptr_t)handler >> 16;
272 * Adjustment of the lower halfword of address is done through "la"
273 * instruction, which happens to be the same as addi:
275 * "la %reg1, offset(%reg2) <=> addi %reg1, %reg1, offset"
277 * If the offset is bigger then 32KB (thus seen by addi as a negative
278 * number), increase the upper halfword by one.
280 if ((intptr_t)handler & 0x00008000)
281 (*(uint16_t *)(target + __tmpl_addr_hi))++;
284 /* Flush the cache */
285 flush_cache((char*)target, (char*)target + 0xff);
289 /* Tiny routine to flush caches for a region of memory */
290 static void flush_cache(char *start, char *end)
292 start = (char*)((unsigned long)start & 0xffffffe0);
293 end = (char*)((unsigned long)end & 0xffffffe0);
294 char *ptr;
296 for (ptr = start; ptr < end; ptr +=32)
298 asm volatile("dcbst 0,%0"::"r"(ptr));
300 asm volatile("sync");
302 for (ptr = start; ptr < end; ptr +=32)
304 asm volatile("icbi 0,%0"::"r"(ptr));
307 asm volatile("sync; isync; ");
310 /* FPU handler */
311 void __attribute__((noreturn)) fpu_handler(context_t *ctx, uint8_t exception, void *self)
313 struct KernelBase *KernelBase = getKernelBase();
315 if (KernelBase)
317 if (!IsListEmpty(&KernelBase->kb_Exceptions[exception]))
319 struct ExceptNode *in, *intemp;
321 ForeachNodeSafe(&KernelBase->kb_Exceptions[exception], in, intemp)
324 * call every handler tied to this exception.
326 if (in->in_Handler)
327 in->in_Handler(ctx, in->in_HandlerData, in->in_HandlerData2);
332 core_LeaveInterrupt(ctx);
335 extern uint64_t tbu1;
336 extern uint64_t tbu2;
337 extern uint64_t last_calc;
338 extern uint64_t idle_time;
339 extern uint32_t cpu_usage;
340 extern struct Task *idle_task;
342 /* Decrementer handler */
343 void __attribute__((noreturn)) decrementer_handler(regs_t *ctx, uint8_t exception, void *self)
345 struct KernelBase *KernelBase = getKernelBase();
346 struct ExecBase *SysBase = getSysBase();
347 // static uint32_t cnt = 0;
349 asm volatile("mtdec %0"::"r"(33000000/100));
351 if (KernelBase)
353 if (!IsListEmpty(&KernelBase->kb_Exceptions[exception]))
355 struct ExceptNode *in, *intemp;
357 ForeachNodeSafe(&KernelBase->kb_Exceptions[exception], in, intemp)
360 * call every handler tied to this exception.
362 if (in->in_Handler)
363 in->in_Handler(ctx, in->in_HandlerData, in->in_HandlerData2);
368 if (SysBase && SysBase->Elapsed)
370 if (--SysBase->Elapsed == 0)
372 SysBase->SysFlags |= 0x2000;
373 SysBase->AttnResched |= 0x80;
377 /* CPU usage meter. it should not be here, actually */
378 uint64_t current = mftbu();
379 if (current - last_calc > 33000000)
381 uint32_t total_time = current - last_calc;
383 if (SysBase->ThisTask == idle_task)
385 tbu2 = mftbu();
386 idle_time += tbu2 - tbu1;
387 tbu1 = tbu2;
390 if (total_time < idle_time)
391 total_time=idle_time;
393 cpu_usage = 1000 - ((uint32_t)(idle_time))/(total_time /1000);
395 D(bug("[KRN] CPU usage: %3d.%d\n", cpu_usage / 10, cpu_usage % 10));
397 last_calc = current;
398 idle_time = 0;
401 core_ExitInterrupt(ctx);
404 /* Generic boring handler */
405 void __attribute__((noreturn)) program_handler(regs_t *ctx, uint8_t exception, void *self)
407 struct KernelBase *KernelBase = getKernelBase();
408 struct ExecBase *SysBase = getSysBase();
409 int handled = 0;
411 uint32_t insn = *(uint32_t *)ctx->srr0;
413 if ((insn & 0xfc1fffff) == 0x7c1442a6) /* mfspr sprg4 */
415 ctx->gpr[(insn >> 21) & 0x1f] = getKernelBase();
416 ctx->srr0 += 4;
417 core_LeaveInterrupt(ctx);
419 else if ((insn & 0xfc1fffff) == 0x7c1542a6) /* mfspr sprg5 */
421 ctx->gpr[(insn >> 21) & 0x1f] = getSysBase();
422 ctx->srr0 += 4;
423 core_LeaveInterrupt(ctx);
425 else if (insn == 0x7fe00008)
427 D(bug("[KRN] trap @ %08x (r3=%08x)\n", ctx->srr0, ctx->gpr[3]));
429 if (SysBase)
431 struct Task *t = FindTask(NULL);
432 D(bug("[KRN] %s %p (%s)\n", t->tc_Node.ln_Type == NT_TASK ? "Task":"Process", t, t->tc_Node.ln_Name ? t->tc_Node.ln_Name : "--unknown--"));
434 D(bug("[KRN] SRR0=%08x, SRR1=%08x\n",ctx->srr0, ctx->srr1));
435 D(bug("[KRN] CTR=%08x LR=%08x XER=%08x CCR=%08x\n", ctx->ctr, ctx->lr, ctx->xer, ctx->ccr));
436 D(bug("[KRN] DAR=%08x DSISR=%08x\n", ctx->dar, ctx->dsisr));
438 D(bug("[KRN] HASH1=%08x HASH2=%08x IMISS=%08x DMISS=%08x ICMP=%08x DCMP=%08x\n",
439 rdspr(978), rdspr(979), rdspr(980), rdspr(976), rdspr(981), rdspr(977)));
441 D(bug("[KRN] SPRG0=%08x SPRG1=%08x SPRG2=%08x SPRG3=%08x SPRG4=%08x SPRG5=%08x\n",
442 rdspr(SPRG0),rdspr(SPRG1),rdspr(SPRG2),rdspr(SPRG3),rdspr(SPRG4),rdspr(SPRG5)));
444 D(bug("[KRN] GPR00=%08x GPR01=%08x GPR02=%08x GPR03=%08x\n",
445 ctx->gpr[0],ctx->gpr[1],ctx->gpr[2],ctx->gpr[3]));
446 D(bug("[KRN] GPR04=%08x GPR05=%08x GPR06=%08x GPR07=%08x\n",
447 ctx->gpr[4],ctx->gpr[5],ctx->gpr[6],ctx->gpr[7]));
448 D(bug("[KRN] GPR08=%08x GPR09=%08x GPR10=%08x GPR11=%08x\n",
449 ctx->gpr[8],ctx->gpr[9],ctx->gpr[10],ctx->gpr[11]));
450 D(bug("[KRN] GPR12=%08x GPR13=%08x GPR14=%08x GPR15=%08x\n",
451 ctx->gpr[12],ctx->gpr[13],ctx->gpr[14],ctx->gpr[15]));
453 D(bug("[KRN] GPR16=%08x GPR17=%08x GPR18=%08x GPR19=%08x\n",
454 ctx->gpr[16],ctx->gpr[17],ctx->gpr[18],ctx->gpr[19]));
455 D(bug("[KRN] GPR20=%08x GPR21=%08x GPR22=%08x GPR23=%08x\n",
456 ctx->gpr[20],ctx->gpr[21],ctx->gpr[22],ctx->gpr[23]));
457 D(bug("[KRN] GPR24=%08x GPR25=%08x GPR26=%08x GPR27=%08x\n",
458 ctx->gpr[24],ctx->gpr[25],ctx->gpr[26],ctx->gpr[27]));
459 D(bug("[KRN] GPR28=%08x GPR29=%08x GPR30=%08x GPR31=%08x\n",
460 ctx->gpr[28],ctx->gpr[29],ctx->gpr[30],ctx->gpr[31]));
462 ctx->srr0 += 4;
463 core_LeaveInterrupt(ctx);
465 else
466 generic_handler(ctx, exception, self);
471 /* Generic boring handler */
472 void __attribute__((noreturn)) generic_handler(regs_t *ctx, uint8_t exception, void *self)
474 struct KernelBase *KernelBase = getKernelBase();
475 struct ExecBase *SysBase = getSysBase();
476 int handled = 0;
478 if (KernelBase)
480 if (!IsListEmpty(&KernelBase->kb_Exceptions[exception]))
482 struct ExceptNode *in, *intemp;
484 ForeachNodeSafe(&KernelBase->kb_Exceptions[exception], in, intemp)
487 * call every handler tied to this exception. If any of them
488 * returns a non-zero value, the exception is considered handled.
490 * If no handler will return zero, or there are no handlers at all,
491 * this generic handler will stop cpu.
493 if (in->in_Handler)
494 handled |= in->in_Handler(ctx, in->in_HandlerData, in->in_HandlerData2);
499 D(bug("[KRN] Exception %d handler. Context @ %p, SysBase @ %p, KernelBase @ %p\n", exception, ctx, SysBase, KernelBase));
500 if (SysBase)
502 struct Task *t = FindTask(NULL);
503 D(bug("[KRN] %s %p (%s)\n", t->tc_Node.ln_Type == NT_TASK ? "Task":"Process", t, t->tc_Node.ln_Name ? t->tc_Node.ln_Name : "--unknown--"));
505 D(bug("[KRN] SRR0=%08x, SRR1=%08x\n",ctx->srr0, ctx->srr1));
506 D(bug("[KRN] CTR=%08x LR=%08x XER=%08x CCR=%08x\n", ctx->ctr, ctx->lr, ctx->xer, ctx->ccr));
507 D(bug("[KRN] DAR=%08x DSISR=%08x\n", ctx->dar, ctx->dsisr));
509 D(bug("[KRN] HASH1=%08x HASH2=%08x IMISS=%08x DMISS=%08x ICMP=%08x DCMP=%08x\n",
510 rdspr(978), rdspr(979), rdspr(980), rdspr(976), rdspr(981), rdspr(977)));
512 D(bug("[KRN] SPRG0=%08x SPRG1=%08x SPRG2=%08x SPRG3=%08x SPRG4=%08x SPRG5=%08x\n",
513 rdspr(SPRG0),rdspr(SPRG1),rdspr(SPRG2),rdspr(SPRG3),rdspr(SPRG4),rdspr(SPRG5)));
515 D(bug("[KRN] GPR00=%08x GPR01=%08x GPR02=%08x GPR03=%08x\n",
516 ctx->gpr[0],ctx->gpr[1],ctx->gpr[2],ctx->gpr[3]));
517 D(bug("[KRN] GPR04=%08x GPR05=%08x GPR06=%08x GPR07=%08x\n",
518 ctx->gpr[4],ctx->gpr[5],ctx->gpr[6],ctx->gpr[7]));
519 D(bug("[KRN] GPR08=%08x GPR09=%08x GPR10=%08x GPR11=%08x\n",
520 ctx->gpr[8],ctx->gpr[9],ctx->gpr[10],ctx->gpr[11]));
521 D(bug("[KRN] GPR12=%08x GPR13=%08x GPR14=%08x GPR15=%08x\n",
522 ctx->gpr[12],ctx->gpr[13],ctx->gpr[14],ctx->gpr[15]));
524 D(bug("[KRN] GPR16=%08x GPR17=%08x GPR18=%08x GPR19=%08x\n",
525 ctx->gpr[16],ctx->gpr[17],ctx->gpr[18],ctx->gpr[19]));
526 D(bug("[KRN] GPR20=%08x GPR21=%08x GPR22=%08x GPR23=%08x\n",
527 ctx->gpr[20],ctx->gpr[21],ctx->gpr[22],ctx->gpr[23]));
528 D(bug("[KRN] GPR24=%08x GPR25=%08x GPR26=%08x GPR27=%08x\n",
529 ctx->gpr[24],ctx->gpr[25],ctx->gpr[26],ctx->gpr[27]));
530 D(bug("[KRN] GPR28=%08x GPR29=%08x GPR30=%08x GPR31=%08x\n",
531 ctx->gpr[28],ctx->gpr[29],ctx->gpr[30],ctx->gpr[31]));
533 D(bug("[KRN] Instruction dump:\n"));
534 int i;
535 ULONG *p = (ULONG*)ctx->srr0;
536 for (i=0; i < 8; i++)
538 D(bug("[KRN] %08x: %08x\n", &p[i], p[i]));
541 if (!handled)
543 D(bug("[KRN] **UNHANDLED EXCEPTION** stopping here...\n"));
545 while(1) {
546 wrmsr(rdmsr() | MSR_POW);
549 core_LeaveInterrupt(ctx);
553 * Template code for an exception handler. Packed into a static void function
554 * in order to make the assembler constrains usable.
556 static void __attribute__((used)) __exception_template()
558 asm volatile(".globl __tmpl_start; .type __tmpl_start,@function\n"
559 "__tmpl_start: \n"
560 " mtsprg1 %%r3 \n" /* save %r3 */
561 " mfcr %%r3 \n" /* copy CR to %r3 */
562 " mtsprg3 %%r3 \n" /* save %r3 */
564 " mfmsr %%r3 \n"
565 " ori %%r3,%%r3,%2 \n" /* Enable address translation for data */
566 " mtmsr %%r3 \n"
567 " sync; isync \n"
569 " mfsrr1 %%r3 \n" /* srr1 (previous MSR) reg into %r3 */
570 " andi. %%r3,%%r3,%0 \n" /* Was the PR bit set in MSR already? */
571 " beq- 1f \n" /* No, we were in supervisor mode */
573 " mfsprg0 %%r3 \n" /* user mode case: SSP into %r3 */
574 " b 2f \n"
575 "1: mr %%r3,%%r1 \n" /* Supervisor case: use current stack */
576 "2: addi %%r3,%%r3,%1 \n"
579 ::"i"(MSR_PR),"i"(-sizeof(context_t)),"i"(MSR_DS));
581 asm volatile(
582 " stw %%r0, %[gpr0](%%r3) \n" /* Store bunch of registers already. I could */
583 " stw %%r1, %[gpr1](%%r3) \n" /* do it in common trampoline code, but it */
584 " stw %%r2, %[gpr2](%%r3) \n" /* is much more sexy to do it here - this code */
585 " mfsprg1 %%r0 \n" /* occupies in theory ZERO bytes in memory */
586 " stw %%r4, %[gpr4](%%r3) \n" /* because the exception vector is 256 bytes long */
587 " stw %%r0, %[gpr3](%%r3) \n" /* and shouldn't be used to anything else than */
588 " stw %%r5, %[gpr5](%%r3) \n" /* exception handler anyway ;) */
589 " mfsprg3 %%r2 \n"
590 " mfsrr0 %%r0 \n"
591 " mfsrr1 %%r1 \n"
592 "__addr_hi: lis %%r5, 0xdeadbeef@ha\n" /* Load the address of an generic handler */
593 "__addr_lo: la %%r5, 0xdeadbeef@l(%%r5)\n" /* yes, load immediate sucks. Think about 64-bit PPC ;) */
594 "__irq_num: li %%r4, 0x5a5a \n" /* Load the exception number */
595 " stw %%r2,%[ccr](%%r3) \n"
596 " stw %%r0,%[srr0](%%r3) \n"
597 " stw %%r1,%[srr1](%%r3) \n"
598 " mfctr %%r0 \n"
599 " mflr %%r1 \n"
600 " mfxer %%r2 \n"
601 " stw %%r0,%[ctr](%%r3) \n"
602 " stw %%r1,%[lr](%%r3) \n"
603 " stw %%r2,%[xer](%%r3) \n"
606 ::[gpr0]"i"(offsetof(regs_t, gpr[0])),
607 [gpr1]"i"(offsetof(regs_t, gpr[1])),
608 [gpr2]"i"(offsetof(regs_t, gpr[2])),
609 [gpr3]"i"(offsetof(regs_t, gpr[3])),
610 [gpr4]"i"(offsetof(regs_t, gpr[4])),
611 [gpr5]"i"(offsetof(regs_t, gpr[5])),
612 [ccr]"i"(offsetof(regs_t, ccr)),
613 [srr0]"i"(offsetof(regs_t, srr0)),
614 [srr1]"i"(offsetof(regs_t, srr1)),
615 [ctr]"i"(offsetof(regs_t, ctr)),
616 [lr]"i"(offsetof(regs_t, lr)),
617 [xer]"i"(offsetof(regs_t, xer)));
620 * Registers %r0 to %r5 are now saved together with CPU state. Go to the
621 * trampoline code which will care about the rest. Adjust the stack frame pointer now,
622 * or else it will be destroyed later by C code.
624 asm volatile("addi %r1,%r3,-16");
627 * Go to the trampoline code. Use long call within whole 4GB addresspace in order to
628 * avoid any trouble in future. Moreover use the PHYSICAL address since at this stage
629 * MMU for code is still not running! If one would like to use MMU at this stage
630 * already, we would have to make region 0x00000000-0x00003000 *EXECUTABLE* :))
632 asm volatile( "lis %r2,(__EXCEPTION_Trampoline - " STR(KERNEL_VIRT_BASE) " + " STR(KERNEL_PHYS_BASE) ")@ha;"
633 "la %r2,(__EXCEPTION_Trampoline - " STR(KERNEL_VIRT_BASE) " + " STR(KERNEL_PHYS_BASE) ")@l(%r2); mtctr %r2;");
635 /* Jump to the trampoline code */
636 asm volatile("bctr;");
639 * Few variables: length of the code above and offsets used to fix the
640 * exception number and handler address.
642 asm volatile("__tmpl_length: .long . - __tmpl_start\n");
643 asm volatile("__tmpl_addr_lo: .long 2 + __addr_lo - __tmpl_start\n");
644 asm volatile("__tmpl_addr_hi: .long 2 + __addr_hi - __tmpl_start\n");
645 asm volatile("__tmpl_irq_num: .long 2 + __irq_num - __tmpl_start\n");
649 * Trampoline code is boring. It stores rest of the CPU context and prepares
650 * everything for execution of the C code.
652 * The only interesting part is the jump to C routine which is done throuhg the
653 * rfi instruction (return from interrupt). I do it so because this way I may
654 * enable the MMU for code and jump CPU to the desired address within one insn.
656 static void __attribute__((used)) __EXCEPTION_Trampoline_template()
658 asm volatile(".section .aros.init,\"ax\"\n\t.align 5\n\t.globl __EXCEPTION_Trampoline\n\t.type __EXCEPTION_Trampoline,@function\n"
659 "__EXCEPTION_Trampoline: \n\t"
660 "stw %%r6,%[gpr6](%%r3) \n\t"
661 "stw %%r7,%[gpr7](%%r3) \n\t"
662 "stw %%r8,%[gpr8](%%r3) \n\t"
663 "stw %%r9,%[gpr9](%%r3) \n\t"
664 "stw %%r10,%[gpr10](%%r3) \n\t"
665 "stw %%r11,%[gpr11](%%r3) \n\t"
666 "stw %%r12,%[gpr12](%%r3) \n\t"
667 "stw %%r13,%[gpr13](%%r3) \n\t"
668 "stw %%r14,%[gpr14](%%r3) \n\t"
669 "stw %%r15,%[gpr15](%%r3) \n\t"
670 "stw %%r16,%[gpr16](%%r3) \n\t"
671 "stw %%r17,%[gpr17](%%r3) \n\t"
672 "stw %%r18,%[gpr18](%%r3) \n\t"
673 "stw %%r19,%[gpr19](%%r3) \n\t"
674 "stw %%r20,%[gpr20](%%r3) \n\t"
675 "stw %%r21,%[gpr21](%%r3) \n\t"
676 "stw %%r22,%[gpr22](%%r3) \n\t"
677 "stw %%r23,%[gpr23](%%r3) \n\t"
678 "stw %%r24,%[gpr24](%%r3) \n\t"
679 "stw %%r25,%[gpr25](%%r3) \n\t"
680 "stw %%r26,%[gpr26](%%r3) \n\t"
681 "stw %%r27,%[gpr27](%%r3) \n\t"
682 "stw %%r28,%[gpr28](%%r3) \n\t"
683 "stw %%r29,%[gpr29](%%r3) \n\t"
684 "stw %%r30,%[gpr30](%%r3) \n\t"
685 "stw %%r31,%[gpr31](%%r3) \n\t"
687 [gpr6]"i"(offsetof(regs_t, gpr[6])),
688 [gpr7]"i"(offsetof(regs_t, gpr[7])),
689 [gpr8]"i"(offsetof(regs_t, gpr[8])),
690 [gpr9]"i"(offsetof(regs_t, gpr[9])),
691 [gpr10]"i"(offsetof(regs_t, gpr[10])),
692 [gpr11]"i"(offsetof(regs_t, gpr[11])),
693 [gpr12]"i"(offsetof(regs_t, gpr[12])),
694 [gpr13]"i"(offsetof(regs_t, gpr[13])),
695 [gpr14]"i"(offsetof(regs_t, gpr[14])),
696 [gpr15]"i"(offsetof(regs_t, gpr[15])),
697 [gpr16]"i"(offsetof(regs_t, gpr[16])),
698 [gpr17]"i"(offsetof(regs_t, gpr[17])),
699 [gpr18]"i"(offsetof(regs_t, gpr[18])),
700 [gpr19]"i"(offsetof(regs_t, gpr[19])),
701 [gpr20]"i"(offsetof(regs_t, gpr[20])),
702 [gpr21]"i"(offsetof(regs_t, gpr[21])),
703 [gpr22]"i"(offsetof(regs_t, gpr[22])),
704 [gpr23]"i"(offsetof(regs_t, gpr[23])),
705 [gpr24]"i"(offsetof(regs_t, gpr[24])),
706 [gpr25]"i"(offsetof(regs_t, gpr[25])),
707 [gpr26]"i"(offsetof(regs_t, gpr[26])),
708 [gpr27]"i"(offsetof(regs_t, gpr[27])),
709 [gpr28]"i"(offsetof(regs_t, gpr[28])),
710 [gpr29]"i"(offsetof(regs_t, gpr[29])),
711 [gpr30]"i"(offsetof(regs_t, gpr[30])),
712 [gpr31]"i"(offsetof(regs_t, gpr[31]))
714 asm volatile(
715 "mfmsr %%r0 \n\t"
716 "ori %%r0,%%r0, %[msrval]@l \n\t"
717 "mtmsr %%r0; isync \n\t"
718 "stfd %%f0,%[fr0](%%r3) \n\t"
719 "mffs %%f0 \n\t"
720 "stfd %%f0,%[fpscr](%%r3) \n\t"
721 "stfd %%f1,%[fr1](%%r3) \n\t"
722 "stfd %%f2,%[fr2](%%r3) \n\t"
723 "stfd %%f3,%[fr3](%%r3) \n\t"
724 "stfd %%f4,%[fr4](%%r3) \n\t"
725 "stfd %%f5,%[fr5](%%r3) \n\t"
726 "stfd %%f6,%[fr6](%%r3) \n\t"
727 "stfd %%f7,%[fr7](%%r3) \n\t"
728 "stfd %%f8,%[fr8](%%r3) \n\t"
729 "stfd %%f9,%[fr9](%%r3) \n\t"
730 "stfd %%f10,%[fr10](%%r3) \n\t"
731 "stfd %%f11,%[fr11](%%r3) \n\t"
732 "stfd %%f12,%[fr12](%%r3) \n\t"
733 "stfd %%f13,%[fr13](%%r3) \n\t"
734 "stfd %%f14,%[fr14](%%r3) \n\t"
735 "stfd %%f15,%[fr15](%%r3) \n\t"
737 [fpscr]"i"(offsetof(context_t, fpu.fpscr)),
738 [fr0]"i"(offsetof(context_t, fpu.fpr[0])),
739 [fr1]"i"(offsetof(context_t, fpu.fpr[1])),
740 [fr2]"i"(offsetof(context_t, fpu.fpr[2])),
741 [fr3]"i"(offsetof(context_t, fpu.fpr[3])),
742 [fr4]"i"(offsetof(context_t, fpu.fpr[4])),
743 [fr5]"i"(offsetof(context_t, fpu.fpr[5])),
744 [fr6]"i"(offsetof(context_t, fpu.fpr[6])),
745 [fr7]"i"(offsetof(context_t, fpu.fpr[7])),
746 [fr8]"i"(offsetof(context_t, fpu.fpr[8])),
747 [fr9]"i"(offsetof(context_t, fpu.fpr[9])),
748 [fr10]"i"(offsetof(context_t, fpu.fpr[10])),
749 [fr11]"i"(offsetof(context_t, fpu.fpr[11])),
750 [fr12]"i"(offsetof(context_t, fpu.fpr[12])),
751 [fr13]"i"(offsetof(context_t, fpu.fpr[13])),
752 [fr14]"i"(offsetof(context_t, fpu.fpr[14])),
753 [fr15]"i"(offsetof(context_t, fpu.fpr[15])),
754 [msrval]"i"(MSR_FP)
756 asm volatile(
757 "stfd %%f16,%[fr16](%%r3) \n\t"
758 "stfd %%f17,%[fr17](%%r3) \n\t"
759 "stfd %%f18,%[fr18](%%r3) \n\t"
760 "stfd %%f19,%[fr19](%%r3) \n\t"
761 "stfd %%f20,%[fr20](%%r3) \n\t"
762 "stfd %%f21,%[fr21](%%r3) \n\t"
763 "stfd %%f22,%[fr22](%%r3) \n\t"
764 "stfd %%f23,%[fr23](%%r3) \n\t"
765 "stfd %%f24,%[fr24](%%r3) \n\t"
766 "stfd %%f25,%[fr25](%%r3) \n\t"
767 "stfd %%f26,%[fr26](%%r3) \n\t"
768 "stfd %%f27,%[fr27](%%r3) \n\t"
769 "stfd %%f28,%[fr28](%%r3) \n\t"
770 "stfd %%f29,%[fr29](%%r3) \n\t"
771 "stfd %%f30,%[fr30](%%r3) \n\t"
772 "stfd %%f31,%[fr31](%%r3) \n\t"
773 "mr %%r28,%%r3 \n\t"
774 "mr %%r29,%%r4 \n\t"
775 "mr %%r30,%%r5 \n\t"
776 "mtsrr0 %%r5 \n\t"
777 "lis %%r9, %[msrval]@ha \n\t"
778 "ori %%r9,%%r9, %[msrval]@l \n\t"
779 "mtsrr1 %%r9 \n\t"
780 "sync; isync; rfi"
782 [fr16]"i"(offsetof(context_t, fpu.fpr[16])),
783 [fr17]"i"(offsetof(context_t, fpu.fpr[17])),
784 [fr18]"i"(offsetof(context_t, fpu.fpr[18])),
785 [fr19]"i"(offsetof(context_t, fpu.fpr[19])),
786 [fr20]"i"(offsetof(context_t, fpu.fpr[20])),
787 [fr21]"i"(offsetof(context_t, fpu.fpr[21])),
788 [fr22]"i"(offsetof(context_t, fpu.fpr[22])),
789 [fr23]"i"(offsetof(context_t, fpu.fpr[23])),
790 [fr24]"i"(offsetof(context_t, fpu.fpr[24])),
791 [fr25]"i"(offsetof(context_t, fpu.fpr[25])),
792 [fr26]"i"(offsetof(context_t, fpu.fpr[26])),
793 [fr27]"i"(offsetof(context_t, fpu.fpr[27])),
794 [fr28]"i"(offsetof(context_t, fpu.fpr[28])),
795 [fr29]"i"(offsetof(context_t, fpu.fpr[29])),
796 [fr30]"i"(offsetof(context_t, fpu.fpr[30])),
797 [fr31]"i"(offsetof(context_t, fpu.fpr[31])),
798 [msrval]"i"(MSR_ME|MSR_FP|MSR_IS|MSR_DS)
804 * Return from interrupt - restores the context passed as a parameter in %r3
805 * register.
807 static void __attribute__((used)) __core_LeaveInterrupt()
809 asm volatile(".section .aros.init,\"ax\"\n\t.align 5\n\t.globl core_LeaveInterrupt\n\t.type core_LeaveInterrupt,@function\n"
810 "core_LeaveInterrupt: \n\t"
811 "lwz %%r31,%[gpr31](%%r3) \n\t"
812 "lwz %%r30,%[gpr30](%%r3) \n\t"
813 "lwz %%r29,%[gpr29](%%r3) \n\t"
814 "lwz %%r28,%[gpr28](%%r3) \n\t"
815 "lwz %%r27,%[gpr27](%%r3) \n\t"
816 "lwz %%r26,%[gpr26](%%r3) \n\t"
817 "lwz %%r25,%[gpr25](%%r3) \n\t"
818 "lwz %%r24,%[gpr24](%%r3) \n\t"
819 "lwz %%r23,%[gpr23](%%r3) \n\t"
820 "lwz %%r22,%[gpr22](%%r3) \n\t"
821 "lwz %%r21,%[gpr21](%%r3) \n\t"
822 "lwz %%r20,%[gpr20](%%r3) \n\t"
823 "lwz %%r19,%[gpr19](%%r3) \n\t"
824 "lwz %%r18,%[gpr18](%%r3) \n\t"
825 "lwz %%r17,%[gpr17](%%r3) \n\t"
826 "lwz %%r16,%[gpr16](%%r3) \n\t"
827 "lwz %%r15,%[gpr15](%%r3) \n\t"
828 "lwz %%r14,%[gpr14](%%r3) \n\t"
829 "lwz %%r13,%[gpr13](%%r3) \n\t"
830 "lwz %%r12,%[gpr12](%%r3) \n\t"
832 [gpr12]"i"(offsetof(regs_t, gpr[12])),
833 [gpr13]"i"(offsetof(regs_t, gpr[13])),
834 [gpr14]"i"(offsetof(regs_t, gpr[14])),
835 [gpr15]"i"(offsetof(regs_t, gpr[15])),
836 [gpr16]"i"(offsetof(regs_t, gpr[16])),
837 [gpr17]"i"(offsetof(regs_t, gpr[17])),
838 [gpr18]"i"(offsetof(regs_t, gpr[18])),
839 [gpr19]"i"(offsetof(regs_t, gpr[19])),
840 [gpr20]"i"(offsetof(regs_t, gpr[20])),
841 [gpr21]"i"(offsetof(regs_t, gpr[21])),
842 [gpr22]"i"(offsetof(regs_t, gpr[22])),
843 [gpr23]"i"(offsetof(regs_t, gpr[23])),
844 [gpr24]"i"(offsetof(regs_t, gpr[24])),
845 [gpr25]"i"(offsetof(regs_t, gpr[25])),
846 [gpr26]"i"(offsetof(regs_t, gpr[26])),
847 [gpr27]"i"(offsetof(regs_t, gpr[27])),
848 [gpr28]"i"(offsetof(regs_t, gpr[28])),
849 [gpr29]"i"(offsetof(regs_t, gpr[29])),
850 [gpr30]"i"(offsetof(regs_t, gpr[30])),
851 [gpr31]"i"(offsetof(regs_t, gpr[31]))
854 asm volatile(
855 "lfd %%f0,%[fpscr](%%r3) \n\t"
856 "mtfsf 255,%%f0 \n\t"
857 "lfd %%f0,%[fr0](%%r3) \n\t"
858 "lfd %%f1,%[fr1](%%r3) \n\t"
859 "lfd %%f2,%[fr2](%%r3) \n\t"
860 "lfd %%f3,%[fr3](%%r3) \n\t"
861 "lfd %%f4,%[fr4](%%r3) \n\t"
862 "lfd %%f5,%[fr5](%%r3) \n\t"
863 "lfd %%f6,%[fr6](%%r3) \n\t"
864 "lfd %%f7,%[fr7](%%r3) \n\t"
865 "lfd %%f8,%[fr8](%%r3) \n\t"
866 "lfd %%f9,%[fr9](%%r3) \n\t"
867 "lfd %%f10,%[fr10](%%r3) \n\t"
868 "lfd %%f11,%[fr11](%%r3) \n\t"
869 "lfd %%f12,%[fr12](%%r3) \n\t"
870 "lfd %%f13,%[fr13](%%r3) \n\t"
871 "lfd %%f14,%[fr14](%%r3) \n\t"
872 "lfd %%f15,%[fr15](%%r3) \n\t"
874 [fpscr]"i"(offsetof(context_t, fpu.fpscr)),
875 [fr0]"i"(offsetof(context_t, fpu.fpr[0])),
876 [fr1]"i"(offsetof(context_t, fpu.fpr[1])),
877 [fr2]"i"(offsetof(context_t, fpu.fpr[2])),
878 [fr3]"i"(offsetof(context_t, fpu.fpr[3])),
879 [fr4]"i"(offsetof(context_t, fpu.fpr[4])),
880 [fr5]"i"(offsetof(context_t, fpu.fpr[5])),
881 [fr6]"i"(offsetof(context_t, fpu.fpr[6])),
882 [fr7]"i"(offsetof(context_t, fpu.fpr[7])),
883 [fr8]"i"(offsetof(context_t, fpu.fpr[8])),
884 [fr9]"i"(offsetof(context_t, fpu.fpr[9])),
885 [fr10]"i"(offsetof(context_t, fpu.fpr[10])),
886 [fr11]"i"(offsetof(context_t, fpu.fpr[11])),
887 [fr12]"i"(offsetof(context_t, fpu.fpr[12])),
888 [fr13]"i"(offsetof(context_t, fpu.fpr[13])),
889 [fr14]"i"(offsetof(context_t, fpu.fpr[14])),
890 [fr15]"i"(offsetof(context_t, fpu.fpr[15]))
892 asm volatile(
893 "lfd %%f16,%[fr16](%%r3) \n\t"
894 "lfd %%f17,%[fr17](%%r3) \n\t"
895 "lfd %%f18,%[fr18](%%r3) \n\t"
896 "lfd %%f19,%[fr19](%%r3) \n\t"
897 "lfd %%f20,%[fr20](%%r3) \n\t"
898 "lfd %%f21,%[fr21](%%r3) \n\t"
899 "lfd %%f22,%[fr22](%%r3) \n\t"
900 "lfd %%f23,%[fr23](%%r3) \n\t"
901 "lfd %%f24,%[fr24](%%r3) \n\t"
902 "lfd %%f25,%[fr25](%%r3) \n\t"
903 "lfd %%f26,%[fr26](%%r3) \n\t"
904 "lfd %%f27,%[fr27](%%r3) \n\t"
905 "lfd %%f28,%[fr28](%%r3) \n\t"
906 "lfd %%f29,%[fr29](%%r3) \n\t"
907 "lfd %%f30,%[fr30](%%r3) \n\t"
908 "lfd %%f31,%[fr31](%%r3) \n\t"
910 [fr16]"i"(offsetof(context_t, fpu.fpr[16])),
911 [fr17]"i"(offsetof(context_t, fpu.fpr[17])),
912 [fr18]"i"(offsetof(context_t, fpu.fpr[18])),
913 [fr19]"i"(offsetof(context_t, fpu.fpr[19])),
914 [fr20]"i"(offsetof(context_t, fpu.fpr[20])),
915 [fr21]"i"(offsetof(context_t, fpu.fpr[21])),
916 [fr22]"i"(offsetof(context_t, fpu.fpr[22])),
917 [fr23]"i"(offsetof(context_t, fpu.fpr[23])),
918 [fr24]"i"(offsetof(context_t, fpu.fpr[24])),
919 [fr25]"i"(offsetof(context_t, fpu.fpr[25])),
920 [fr26]"i"(offsetof(context_t, fpu.fpr[26])),
921 [fr27]"i"(offsetof(context_t, fpu.fpr[27])),
922 [fr28]"i"(offsetof(context_t, fpu.fpr[28])),
923 [fr29]"i"(offsetof(context_t, fpu.fpr[29])),
924 [fr30]"i"(offsetof(context_t, fpu.fpr[30])),
925 [fr31]"i"(offsetof(context_t, fpu.fpr[31]))
929 asm volatile(
930 "lwz %%r11,%[gpr11](%%r3) \n\t"
931 "lwz %%r0,%[srr0](%%r3) \n\t"
932 "mtsrr0 %%r0 \n\t"
933 "lwz %%r0,%[srr1](%%r3) \n\t"
934 "rlwinm %%r0,%%r0,0,14,12 \n\t"
935 "mtsrr1 %%r0 \n\t"
936 "lwz %%r0,%[ctr](%%r3) \n\t"
937 "mtctr %%r0 \n\t"
938 "lwz %%r0,%[lr](%%r3) \n\t"
939 "mtlr %%r0 \n\t"
940 "lwz %%r0,%[xer](%%r3) \n\t"
941 "mtxer %%r0 \n\t"
942 "lwz %%r10,%[gpr10](%%r3) \n\t"
943 "lwz %%r9,%[gpr9](%%r3) \n\t"
944 "lwz %%r8,%[gpr8](%%r3) \n\t"
945 "lwz %%r7,%[gpr7](%%r3) \n\t"
946 "lwz %%r6,%[gpr6](%%r3) \n\t"
947 "lwz %%r5,%[gpr5](%%r3) \n\t"
948 "lwz %%r4,%[gpr4](%%r3) \n\t"
949 "lwz %%r0,%[gpr3](%%r3) \n\t"
950 "mtsprg1 %%r0 \n\t"
951 "lwz %%r2,%[gpr2](%%r3) \n\t"
952 "stwcx. %%r0,0,%%r1 \n\t"
953 "lwz %%r0,%[ccr](%%r3) \n\t"
954 "mtcr %%r0 \n\t"
955 "lwz %%r1,%[gpr1](%%r3) \n\t"
956 "lwz %%r0,%[gpr0](%%r3) \n\t"
957 "mfsprg1 %%r3 \n\t"
958 "sync; isync; rfi"
960 [ccr]"i"(offsetof(regs_t, ccr)), /* */
961 [srr0]"i"(offsetof(regs_t, srr0)), /* */
962 [srr1]"i"(offsetof(regs_t, srr1)),/* */
963 [ctr]"i"(offsetof(regs_t, ctr)),/**/
964 [lr]"i"(offsetof(regs_t, lr)),/**/
965 [xer]"i"(offsetof(regs_t, xer)),
966 [gpr0]"i"(offsetof(regs_t, gpr[0])),
967 [gpr1]"i"(offsetof(regs_t, gpr[1])),
968 [gpr2]"i"(offsetof(regs_t, gpr[2])),
969 [gpr3]"i"(offsetof(regs_t, gpr[3])),
970 [gpr4]"i"(offsetof(regs_t, gpr[4])),
971 [gpr5]"i"(offsetof(regs_t, gpr[5])),
972 [gpr6]"i"(offsetof(regs_t, gpr[6])),
973 [gpr7]"i"(offsetof(regs_t, gpr[7])),
974 [gpr8]"i"(offsetof(regs_t, gpr[8])),
975 [gpr9]"i"(offsetof(regs_t, gpr[9])),
976 [gpr10]"i"(offsetof(regs_t, gpr[10])),
977 [gpr11]"i"(offsetof(regs_t, gpr[11]))