2 Copyright © 2013-2015, The AROS Development Team. All rights reserved.
6 #include <aros/kernel.h>
7 #include <aros/libcall.h>
8 #include <exec/execbase.h>
9 #include <hardware/intbits.h>
10 #include <aros/arm/cpucontext.h>
13 #include <proto/kernel.h>
17 #include "kernel_intern.h"
18 #include "kernel_debug.h"
19 #include "kernel_cpu.h"
20 #include "kernel_syscall.h"
21 #include "kernel_scheduler.h"
22 #include "kernel_intr.h"
27 extern struct Task
*sysIdleTask
;
28 uint32_t __arm_affinitymask
__attribute__((section(".data"))) = 1;
31 " .globl mpcore_trampoline \n"
32 " .type mpcore_trampoline,%function \n"
33 "mpcore_trampoline: \n"
34 " ldr r3, mpcore_pde \n"
35 " mcr p15, 0, r3, c2, c0, 0 \n"
37 " mcr p15, 0, r3, c2, c0, 2 \n"
39 " mcr p15, 0, r3, c3, c0, 0 \n"
40 " mrc p15, 0, r4, c1, c0, 0 \n"
42 " mcr p15, 0, r3, c7, c10, 4 \n"
43 " orr r4, r4, #0x800000 \n"
45 " mcr p15, 0, r4, c1, c0, 0 \n"
46 " mcr p15, 0, r3, c7, c5, 4 \n"
48 " ldr sp, mpcore_data \n"
49 " ldr pc, mpcore_code \n"
51 " .globl mpcore_pde \n"
52 "mpcore_pde: .word 0 \n"
53 "mpcore_code: .word 0 \n"
54 "mpcore_data: .word 0 \n"
55 " .globl mpcore_end \n"
63 asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(tmp
));
64 tmp
|= (1 << 2) | (1 << 12) | (1 << 11); /* I and D caches, branch prediction */
65 tmp
= (tmp
& ~2) | (1 << 22); /* Unaligned access enable */
66 asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r"(tmp
));
68 cpu_Init(&__arm_arosintern
, NULL
);
70 asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp
));
72 __arm_affinitymask
|= (1 << (tmp
& 0x3));
74 bug("[KRN] Core %d up and waiting for interrupts\n", tmp
& 0x3);
76 for (;;) asm volatile("wfi");
79 void cpu_Delay(int usecs
)
82 for (delay
= 0; delay
< usecs
; delay
++) asm volatile ("mov r0, r0\n");
85 void cpu_Save_VFP16_State(void *buffer
);
86 void cpu_Save_VFP32_State(void *buffer
);
87 void cpu_Restore_VFP16_State(void *buffer
);
88 void cpu_Restore_VFP32_State(void *buffer
);
91 "cpu_Save_VFP16_State: \n"
93 " str r3, [r0, #256] \n"
94 " vstmia r0, {d0-d15} \n"
97 "cpu_Save_VFP32_State: \n"
99 " str r3, [r0, #256] \n"
100 " .word 0xec800b40 \n" // vstmia r0, {d0-d31}
103 "cpu_Restore_VFP16_State: \n"
104 " ldr r3, [r0, #256] \n"
106 " vldmia r0, {d0-d15} \n"
109 "cpu_Restore_VFP32_State: \n"
110 " ldr r3, [r0, #256] \n"
112 " .word 0xec900b20 \n" // vldmia r0, {d0-d31}
116 void cpu_Init_VFP_State(void *buffer
)
118 bzero(buffer
, sizeof(struct VFPContext
));
121 void cpu_Probe(struct ARM_Implementation
*krnARMImpl
)
125 asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r" (tmp
));
126 if ((tmp
& 0xfff0) == 0xc070)
128 krnARMImpl
->ARMI_Family
= 7;
130 krnARMImpl
->ARMI_Save_VFP_State
= &cpu_Save_VFP16_State
;
131 krnARMImpl
->ARMI_Restore_VFP_State
= &cpu_Restore_VFP16_State
;
133 // Read the Multiprocessor Affinity Register (MPIDR)
134 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (tmp
));
144 krnARMImpl
->ARMI_Family
= 6;
145 krnARMImpl
->ARMI_Save_VFP_State
= &cpu_Save_VFP16_State
;
146 krnARMImpl
->ARMI_Restore_VFP_State
= &cpu_Restore_VFP16_State
;
149 krnARMImpl
->ARMI_Init_VFP_State
= &cpu_Init_VFP_State
;
150 krnARMImpl
->ARMI_Delay
= &cpu_Delay
;
153 void cpu_Init(struct ARM_Implementation
*krnARMImpl
, struct TagItem
*msg
)
155 register unsigned int fpuflags
;
159 /* Enable Vector Floating Point Calculations */
160 asm volatile("mrc p15,0,%[fpuflags],c1,c0,2\n" : [fpuflags
] "=r" (fpuflags
)); // Read Access Control Register
161 fpuflags
|= (VFPSingle
| VFPDouble
); // Enable Single & Double Precision
162 asm volatile("mcr p15,0,%[fpuflags],c1,c0,2\n" : : [fpuflags
] "r" (fpuflags
)); // Set Access Control Register
164 " mov %[fpuflags],%[vfpenable] \n" // Enable VFP
165 " fmxr fpexc,%[fpuflags] \n"
166 : [fpuflags
] "=r" (fpuflags
) : [vfpenable
] "I" (VFPEnable
));
169 void cpu_Switch(regs_t
*regs
)
173 D(bug("[Kernel] cpu_Switch()\n"));
175 task
= SysBase
->ThisTask
;
177 /* Copy current task's context into the ETask structure */
178 /* Restore the task's state */
179 STORE_TASKSTATE(task
, regs
)
181 if (__arm_arosintern
.ARMI_GetTime
)
183 /* Update the taks CPU time .. */
184 GetIntETask(task
)->iet_CpuTime
+= __arm_arosintern
.ARMI_GetTime() - GetIntETask(task
)->iet_private1
;
190 void cpu_Dispatch(regs_t
*regs
)
194 D(bug("[Kernel] cpu_Dispatch()\n"));
196 /* Break Disable() if needed */
197 if (SysBase
->IDNestCnt
>= 0) {
198 SysBase
->IDNestCnt
= -1;
199 ((uint32_t *)regs
)[13] &= ~0x80;
202 if (!(task
= core_Dispatch()))
205 D(bug("[Kernel] cpu_Dispatch: Letting '%s' run for a bit..\n", task
->tc_Node
.ln_Name
));
207 /* Restore the task's state */
208 RESTORE_TASKSTATE(task
, regs
)
210 DREGS(cpu_DumpRegs(regs
));
212 /* Handle tasks's flags */
213 if (task
->tc_Flags
& TF_EXCEPT
)
216 if (__arm_arosintern
.ARMI_GetTime
)
218 /* Store the launch time */
219 GetIntETask(task
)->iet_private1
= __arm_arosintern
.ARMI_GetTime();
222 if (task
->tc_Flags
& TF_LAUNCH
)
224 AROS_UFC1(void, task
->tc_Launch
,
225 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
229 void cpu_DumpRegs(regs_t
*regs
)
233 bug("[KRN] Register Dump:\n");
234 for (i
= 0; i
< 12; i
++)
236 bug("[KRN] r%02d: 0x%08x\n", i
, ((uint32_t *)regs
)[i
]);
238 bug("[KRN] (ip) r12: 0x%08x\n", ((uint32_t *)regs
)[12]);
239 bug("[KRN] (sp) r13: 0x%08x\n", ((uint32_t *)regs
)[13]);
240 bug("[KRN] (lr) r14: 0x%08x\n", ((uint32_t *)regs
)[14]);
241 bug("[KRN] (pc) r15: 0x%08x\n", ((uint32_t *)regs
)[15]);
242 bug("[KRN] cpsr: 0x%08x\n", ((uint32_t *)regs
)[16]);