2 Copyright © 2013-2015, The AROS Development Team. All rights reserved.
6 #include <exec_platform.h>
8 #include <aros/kernel.h>
9 #include <aros/libcall.h>
10 #include <exec/execbase.h>
11 #include <hardware/intbits.h>
12 #include <aros/arm/cpucontext.h>
15 #include "kernel_base.h"
17 #include <proto/kernel.h>
21 #include "kernel_intern.h"
22 #include "kernel_debug.h"
23 #include "kernel_cpu.h"
24 #include <kernel_objects.h>
25 #include "kernel_syscall.h"
26 #include "kernel_scheduler.h"
27 #include "kernel_intr.h"
32 extern struct Task
*sysIdleTask
;
33 uint32_t __arm_affinitymask
__attribute__((section(".data"))) = 1;
35 extern BOOL
Exec_InitETask(struct Task
*, struct ExecBase
*);
38 " .globl mpcore_trampoline \n"
39 " .type mpcore_trampoline,%function \n"
40 "mpcore_trampoline: \n"
41 " ldr r3, mpcore_pde \n"
42 " mcr p15, 0, r3, c2, c0, 0 \n"
44 " mcr p15, 0, r3, c2, c0, 2 \n"
46 " mcr p15, 0, r3, c3, c0, 0 \n"
47 " mrc p15, 0, r4, c1, c0, 0 \n"
49 " mcr p15, 0, r3, c7, c10, 4 \n"
50 " orr r4, r4, #0x800000 \n"
52 " mcr p15, 0, r4, c1, c0, 0 \n"
53 " mcr p15, 0, r3, c7, c5, 4 \n"
55 " ldr sp, mpcore_stack \n"
56 " ldr r3, mpcore_tls \n"
57 " mcr p15, 0, r3, c13, c0, 3 \n"
58 " ldr pc, mpcore_code \n"
60 " .globl mpcore_pde \n"
61 "mpcore_pde: .word 0 \n"
62 "mpcore_code: .word 0 \n"
63 "mpcore_stack: .word 0 \n"
64 "mpcore_tls: .word 0 \n"
65 " .globl mpcore_end \n"
72 #if defined(__AROSEXEC_SMP__)
74 struct ExecBase
*SysBase
;
75 struct KernelBase
*KernelBase
;
78 struct ExceptionContext
*ctx
;
81 asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(tmp
));
82 tmp
|= (1 << 2) | (1 << 12) | (1 << 11); /* I and D caches, branch prediction */
83 tmp
= (tmp
& ~2) | (1 << 22); /* Unaligned access enable */
84 asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r"(tmp
));
86 cpu_Init(&__arm_arosintern
, NULL
);
88 asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp
));
90 #if defined(__AROSEXEC_SMP__)
91 asm volatile (" mrc p15, 0, %0, c13, c0, 3 " : "=r" (__tls
));
93 /* Now we are ready to boostrap and launch the schedular */
94 bug("[KRN] Core %d Boostrapping..\n", (tmp
& 0x3));
95 bug("[KRN] Core %d TLS @ 0x%p\n", (tmp
& 0x3), (__tls
));
96 KernelBase
= __tls
->KernelBase
; // TLS_GET(KernelBase)
97 SysBase
= __tls
->SysBase
; // TLS_GET(SysBase)
98 bug("[KRN] Core %d KernelBase @ 0x%p\n", (tmp
& 0x3), KernelBase
);
99 bug("[KRN] Core %d SysBase @ 0x%p\n", (tmp
& 0x3), SysBase
);
101 t
= AllocMem(sizeof(struct Task
), MEMF_PUBLIC
|MEMF_CLEAR
);
102 ml
= AllocMem(sizeof(struct MemList
), MEMF_PUBLIC
|MEMF_CLEAR
);
106 bug("[KRN] Core %d FATAL : Failed to allocate memory for bootstrap task!", (tmp
& 0x3));
107 goto cpu_registerfatal
;
110 bug("[KRN] Core %d Bootstrap task @ 0x%p\n", (tmp
& 0x3), t
);
111 bug("[KRN] Core %d cpu context size %d\n", (tmp
& 0x3), KernelBase
->kb_ContextSize
);
113 ctx
= KrnCreateContext();
116 bug("[KRN] Core %d FATAL : Failed to create the boostrap task context!\n", (tmp
& 0x3));
117 goto cpu_registerfatal
;
120 bug("[KRN] Core %d cpu ctx @ 0x%p\n", (tmp
& 0x3), ctx
);
122 NEWLIST(&t
->tc_MemEntry
);
124 t
->tc_Node
.ln_Name
= AllocVec(20, MEMF_CLEAR
);
125 sprintf( t
->tc_Node
.ln_Name
, "Core(%d) Bootstrap", (tmp
& 0x3));
126 t
->tc_Node
.ln_Type
= NT_TASK
;
127 t
->tc_Node
.ln_Pri
= 0;
128 t
->tc_State
= TS_RUN
;
129 t
->tc_SigAlloc
= 0xFFFF;
131 /* Build bootstraps memory list */
132 ml
->ml_NumEntries
= 1;
133 ml
->ml_ME
[0].me_Addr
= t
;
134 ml
->ml_ME
[0].me_Length
= sizeof(struct Task
);
135 AddHead(&t
->tc_MemEntry
, &ml
->ml_Node
);
137 /* Create a ETask structure and attach CPU context */
138 if (!Exec_InitETask(t
, SysBase
))
140 bug("[KRN] Core %d FATAL : Failed to allocate memory for boostrap extended data!\n", (tmp
& 0x3));
141 goto cpu_registerfatal
;
143 t
->tc_UnionETask
.tc_ETask
->et_RegFrame
= ctx
;
145 /* This Bootstrap task can run only on one of the available cores */
146 GetIntETask(t
->tc_UnionETask
.tc_ETask
)->iet_CpuNumber
= (tmp
& 0x3);
147 GetIntETask(t
->tc_UnionETask
.tc_ETask
)->iet_CpuAffinity
= 1 << (tmp
& 0x3);
153 bug("[KRN] Core %d operational\n", (tmp
& 0x3));
155 __arm_affinitymask
|= (1 << (tmp
& 0x3));
159 bug("[KRN] Core %d waiting for interrupts\n", (tmp
& 0x3));
161 for (;;) asm volatile("wfi");
164 void cpu_Delay(int usecs
)
167 for (delay
= 0; delay
< usecs
; delay
++) asm volatile ("mov r0, r0\n");
170 void cpu_Save_VFP16_State(void *buffer
);
171 void cpu_Save_VFP32_State(void *buffer
);
172 void cpu_Restore_VFP16_State(void *buffer
);
173 void cpu_Restore_VFP32_State(void *buffer
);
176 "cpu_Save_VFP16_State: \n"
178 " str r3, [r0, #256] \n"
179 " vstmia r0, {d0-d15} \n"
182 "cpu_Save_VFP32_State: \n"
184 " str r3, [r0, #256] \n"
185 " .word 0xec800b40 \n" // vstmia r0, {d0-d31}
188 "cpu_Restore_VFP16_State: \n"
189 " ldr r3, [r0, #256] \n"
191 " vldmia r0, {d0-d15} \n"
194 "cpu_Restore_VFP32_State: \n"
195 " ldr r3, [r0, #256] \n"
197 " .word 0xec900b20 \n" // vldmia r0, {d0-d31}
201 void cpu_Init_VFP_State(void *buffer
)
203 bzero(buffer
, sizeof(struct VFPContext
));
206 void cpu_Probe(struct ARM_Implementation
*krnARMImpl
)
210 asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r" (tmp
));
211 if ((tmp
& 0xfff0) == 0xc070)
213 krnARMImpl
->ARMI_Family
= 7;
215 krnARMImpl
->ARMI_Save_VFP_State
= &cpu_Save_VFP16_State
;
216 krnARMImpl
->ARMI_Restore_VFP_State
= &cpu_Restore_VFP16_State
;
218 #if defined(__AROSEXEC_SMP__)
219 // Read the Multiprocessor Affinity Register (MPIDR)
220 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (tmp
));
230 krnARMImpl
->ARMI_Family
= 6;
231 krnARMImpl
->ARMI_Save_VFP_State
= &cpu_Save_VFP16_State
;
232 krnARMImpl
->ARMI_Restore_VFP_State
= &cpu_Restore_VFP16_State
;
235 krnARMImpl
->ARMI_Init_VFP_State
= &cpu_Init_VFP_State
;
236 krnARMImpl
->ARMI_Delay
= &cpu_Delay
;
239 void cpu_Init(struct ARM_Implementation
*krnARMImpl
, struct TagItem
*msg
)
241 register unsigned int fpuflags
;
247 /* Only boot processor calls cpu_Init with a valid msg */
251 /* Enable Vector Floating Point Calculations */
252 asm volatile("mrc p15,0,%[fpuflags],c1,c0,2\n" : [fpuflags
] "=r" (fpuflags
)); // Read Access Control Register
253 fpuflags
|= (VFPSingle
| VFPDouble
); // Enable Single & Double Precision
254 asm volatile("mcr p15,0,%[fpuflags],c1,c0,2\n" : : [fpuflags
] "r" (fpuflags
)); // Set Access Control Register
256 " mov %[fpuflags],%[vfpenable] \n" // Enable VFP
257 " fmxr fpexc,%[fpuflags] \n"
258 : [fpuflags
] "=r" (fpuflags
) : [vfpenable
] "I" (VFPEnable
));
261 void cpu_Switch(regs_t
*regs
)
265 D(bug("[Kernel] cpu_Switch()\n"));
267 task
= GET_THIS_TASK
;
269 /* Copy current task's context into the ETask structure */
270 /* Restore the task's state */
271 STORE_TASKSTATE(task
, regs
)
273 if (__arm_arosintern
.ARMI_GetTime
)
275 /* Update the taks CPU time .. */
276 GetIntETask(task
)->iet_CpuTime
+= __arm_arosintern
.ARMI_GetTime() - GetIntETask(task
)->iet_private1
;
282 void cpu_Dispatch(regs_t
*regs
)
286 D(bug("[Kernel] cpu_Dispatch()\n"));
288 /* Break Disable() if needed */
289 if (SysBase
->IDNestCnt
>= 0) {
290 SysBase
->IDNestCnt
= -1;
291 ((uint32_t *)regs
)[13] &= ~0x80;
294 if (!(task
= core_Dispatch()))
297 D(bug("[Kernel] cpu_Dispatch: Letting '%s' run for a bit..\n", task
->tc_Node
.ln_Name
));
299 /* Restore the task's state */
300 RESTORE_TASKSTATE(task
, regs
)
302 DREGS(cpu_DumpRegs(regs
));
304 /* Handle tasks's flags */
305 if (task
->tc_Flags
& TF_EXCEPT
)
308 if (__arm_arosintern
.ARMI_GetTime
)
310 /* Store the launch time */
311 GetIntETask(task
)->iet_private1
= __arm_arosintern
.ARMI_GetTime();
314 if (task
->tc_Flags
& TF_LAUNCH
)
316 AROS_UFC1(void, task
->tc_Launch
,
317 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
321 void cpu_DumpRegs(regs_t
*regs
)
325 bug("[KRN] Register Dump:\n");
326 for (i
= 0; i
< 12; i
++)
328 bug("[KRN] r%02d: 0x%08x\n", i
, ((uint32_t *)regs
)[i
]);
330 bug("[KRN] (ip) r12: 0x%08x\n", ((uint32_t *)regs
)[12]);
331 bug("[KRN] (sp) r13: 0x%08x\n", ((uint32_t *)regs
)[13]);
332 bug("[KRN] (lr) r14: 0x%08x\n", ((uint32_t *)regs
)[14]);
333 bug("[KRN] (pc) r15: 0x%08x\n", ((uint32_t *)regs
)[15]);
334 bug("[KRN] cpsr: 0x%08x\n", ((uint32_t *)regs
)[16]);