2 Copyright © 2013-2015, The AROS Development Team. All rights reserved.
6 #include <exec_platform.h>
8 #include <aros/kernel.h>
9 #include <aros/libcall.h>
10 #include <exec/execbase.h>
11 #include <hardware/intbits.h>
12 #include <aros/arm/cpucontext.h>
13 #include <asm/arm/cpu.h>
16 #include "kernel_base.h"
18 #include <proto/kernel.h>
22 #include "kernel_intern.h"
23 #include "kernel_debug.h"
24 #include "kernel_cpu.h"
25 #include <kernel_objects.h>
26 #include "kernel_syscall.h"
27 #include "kernel_scheduler.h"
28 #include "kernel_intr.h"
33 extern struct Task
*sysIdleTask
;
34 uint32_t __arm_affinitymask
__attribute__((section(".data"))) = 1;
36 extern BOOL
Exec_InitETask(struct Task
*, struct ExecBase
*);
39 " .globl mpcore_trampoline \n"
40 " .type mpcore_trampoline,%function \n"
41 "mpcore_trampoline: \n"
42 " ldr r3, mpcore_pde \n"
43 " mcr p15, 0, r3, c2, c0, 0 \n"
45 " mcr p15, 0, r3, c2, c0, 2 \n"
47 " mcr p15, 0, r3, c3, c0, 0 \n"
48 " mrc p15, 0, r4, c1, c0, 0 \n"
50 " mcr p15, 0, r3, c7, c10, 4 \n"
51 " orr r4, r4, #0x800000 \n"
53 " mcr p15, 0, r4, c1, c0, 0 \n"
54 " mcr p15, 0, r3, c7, c5, 4 \n"
56 " ldr sp, mpcore_fstack \n"
58 " ldr sp, mpcore_stack \n"
59 " ldr r3, mpcore_tls \n"
60 " mcr p15, 0, r3, c13, c0, 3 \n"
61 " ldr pc, mpcore_code \n"
63 " .globl mpcore_pde \n"
64 "mpcore_pde: .word 0 \n"
65 "mpcore_code: .word 0 \n"
66 "mpcore_stack: .word 0 \n"
67 "mpcore_tls: .word 0 \n"
68 "mpcore_fstack: .word 0 \n"
69 " .globl mpcore_end \n"
76 #if defined(__AROSEXEC_SMP__)
78 struct ExecBase
*SysBase
;
79 struct KernelBase
*KernelBase
;
82 struct ExceptionContext
*ctx
;
85 asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(tmp
));
86 tmp
|= (1 << 2) | (1 << 12) | (1 << 11); /* I and D caches, branch prediction */
87 tmp
= (tmp
& ~2) | (1 << 22); /* Unaligned access enable */
88 asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r"(tmp
));
90 cpu_Init(&__arm_arosintern
, NULL
);
92 asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp
));
94 #if defined(__AROSEXEC_SMP__)
95 asm volatile (" mrc p15, 0, %0, c13, c0, 3 " : "=r" (__tls
));
97 /* Now we are ready to boostrap and launch the schedular */
98 bug("[KRN] Core %d Boostrapping..\n", (tmp
& 0x3));
100 asm volatile ("mrs %0, cpsr" :"=r"(ttmp
));
101 bug("[KRN] Core %d CPSR=%08x\n", (tmp
& 0x3), ttmp
);
103 asm volatile ("msr cpsr_cxsf, %0" ::"r"(ttmp
));
104 bug("[KRN] Core %d CPSR=%08x\n", (tmp
& 0x3), ttmp
);
106 bug("[KRN] Core %d TLS @ 0x%p\n", (tmp
& 0x3), (__tls
));
107 KernelBase
= __tls
->KernelBase
; // TLS_GET(KernelBase)
108 SysBase
= __tls
->SysBase
; // TLS_GET(SysBase)
109 bug("[KRN] Core %d KernelBase @ 0x%p\n", (tmp
& 0x3), KernelBase
);
110 bug("[KRN] Core %d SysBase @ 0x%p\n", (tmp
& 0x3), SysBase
);
112 t
= AllocMem(sizeof(struct Task
), MEMF_PUBLIC
|MEMF_CLEAR
);
113 ml
= AllocMem(sizeof(struct MemList
), MEMF_PUBLIC
|MEMF_CLEAR
);
117 bug("[KRN] Core %d FATAL : Failed to allocate memory for bootstrap task!", (tmp
& 0x3));
118 goto cpu_registerfatal
;
121 bug("[KRN] Core %d Bootstrap task @ 0x%p\n", (tmp
& 0x3), t
);
122 bug("[KRN] Core %d cpu context size %d\n", (tmp
& 0x3), KernelBase
->kb_ContextSize
);
124 ctx
= KrnCreateContext();
127 bug("[KRN] Core %d FATAL : Failed to create the boostrap task context!\n", (tmp
& 0x3));
128 goto cpu_registerfatal
;
131 bug("[KRN] Core %d cpu ctx @ 0x%p\n", (tmp
& 0x3), ctx
);
133 NEWLIST(&t
->tc_MemEntry
);
135 t
->tc_Node
.ln_Name
= AllocVec(20, MEMF_CLEAR
);
136 sprintf( t
->tc_Node
.ln_Name
, "Core(%d) Bootstrap", (tmp
& 0x3));
137 t
->tc_Node
.ln_Type
= NT_TASK
;
138 t
->tc_Node
.ln_Pri
= 0;
139 t
->tc_State
= TS_RUN
;
140 t
->tc_SigAlloc
= 0xFFFF;
142 /* Build bootstraps memory list */
143 ml
->ml_NumEntries
= 1;
144 ml
->ml_ME
[0].me_Addr
= t
;
145 ml
->ml_ME
[0].me_Length
= sizeof(struct Task
);
146 AddHead(&t
->tc_MemEntry
, &ml
->ml_Node
);
148 /* Create a ETask structure and attach CPU context */
149 if (!Exec_InitETask(t
, SysBase
))
151 bug("[KRN] Core %d FATAL : Failed to allocate memory for boostrap extended data!\n", (tmp
& 0x3));
152 goto cpu_registerfatal
;
154 t
->tc_UnionETask
.tc_ETask
->et_RegFrame
= ctx
;
156 /* This Bootstrap task can run only on one of the available cores */
157 IntETask(t
->tc_UnionETask
.tc_ETask
)->iet_CpuNumber
= (tmp
& 0x3);
158 IntETask(t
->tc_UnionETask
.tc_ETask
)->iet_CpuAffinity
= 1 << (tmp
& 0x3);
162 if (__arm_arosintern
.ARMI_InitCore
)
163 __arm_arosintern
.ARMI_InitCore(KernelBase
, SysBase
);
167 bug("[KRN] Core %d operational\n", (tmp
& 0x3));
169 // amlock = KrnSpinLock(amlock, 0);
170 __arm_affinitymask
|= (1 << (tmp
& 0x3));
171 // KrnSpinUnLock(amlock);
175 bug("[KRN] Core %d waiting for interrupts\n", (tmp
& 0x3));
177 for (;;) asm volatile("wfi");
180 void cpu_Delay(int usecs
)
183 for (delay
= 0; delay
< usecs
; delay
++) asm volatile ("mov r0, r0\n");
186 void cpu_Save_VFP16_State(void *buffer
);
187 void cpu_Save_VFP32_State(void *buffer
);
188 void cpu_Restore_VFP16_State(void *buffer
);
189 void cpu_Restore_VFP32_State(void *buffer
);
192 "cpu_Save_VFP16_State: \n"
194 " str r3, [r0, #256] \n"
195 " vstmia r0, {d0-d15} \n"
198 "cpu_Save_VFP32_State: \n"
200 " str r3, [r0, #256] \n"
201 " .word 0xec800b40 \n" // vstmia r0, {d0-d31}
204 "cpu_Restore_VFP16_State: \n"
205 " ldr r3, [r0, #256] \n"
207 " vldmia r0, {d0-d15} \n"
210 "cpu_Restore_VFP32_State: \n"
211 " ldr r3, [r0, #256] \n"
213 " .word 0xec900b20 \n" // vldmia r0, {d0-d31}
217 void cpu_Init_VFP_State(void *buffer
)
219 bzero(buffer
, sizeof(struct VFPContext
));
222 void cpu_Probe(struct ARM_Implementation
*krnARMImpl
)
226 asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r" (tmp
));
227 if ((tmp
& 0xfff0) == 0xc070)
229 krnARMImpl
->ARMI_Family
= 7;
231 krnARMImpl
->ARMI_Save_VFP_State
= &cpu_Save_VFP16_State
;
232 krnARMImpl
->ARMI_Restore_VFP_State
= &cpu_Restore_VFP16_State
;
234 #if defined(__AROSEXEC_SMP__)
235 // Read the Multiprocessor Affinity Register (MPIDR)
236 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (tmp
));
246 krnARMImpl
->ARMI_Family
= 6;
247 krnARMImpl
->ARMI_Save_VFP_State
= &cpu_Save_VFP16_State
;
248 krnARMImpl
->ARMI_Restore_VFP_State
= &cpu_Restore_VFP16_State
;
251 krnARMImpl
->ARMI_Init_VFP_State
= &cpu_Init_VFP_State
;
252 krnARMImpl
->ARMI_Delay
= &cpu_Delay
;
255 void cpu_Init(struct ARM_Implementation
*krnARMImpl
, struct TagItem
*msg
)
257 register unsigned int fpuflags
;
263 /* Only boot processor calls cpu_Init with a valid msg */
267 /* Enable Vector Floating Point Calculations */
268 asm volatile("mrc p15,0,%[fpuflags],c1,c0,2\n" : [fpuflags
] "=r" (fpuflags
)); // Read Access Control Register
269 fpuflags
|= (VFPSingle
| VFPDouble
); // Enable Single & Double Precision
270 asm volatile("mcr p15,0,%[fpuflags],c1,c0,2\n" : : [fpuflags
] "r" (fpuflags
)); // Set Access Control Register
272 " mov %[fpuflags],%[vfpenable] \n" // Enable VFP
273 " fmxr fpexc,%[fpuflags] \n"
274 : [fpuflags
] "=r" (fpuflags
) : [vfpenable
] "I" (VFPEnable
));
277 #define ADDTIME(dest, src) \
278 (dest)->tv_micro += (src)->tv_micro; \
279 (dest)->tv_secs += (src)->tv_secs; \
280 while((dest)->tv_micro > 999999) \
283 (dest)->tv_micro -= 1000000; \
286 void cpu_Switch(regs_t
*regs
)
290 struct timeval timeVal
;
292 D(bug("[Kernel] cpu_Switch()\n"));
294 task
= GET_THIS_TASK
;
296 /* Copy current task's context into the ETask structure */
297 /* Restore the task's state */
298 STORE_TASKSTATE(task
, regs
)
300 if (__arm_arosintern
.ARMI_GetTime
)
302 /* Update the taks CPU time .. */
303 timeCur
= __arm_arosintern
.ARMI_GetTime() - GetIntETask(task
)->iet_private1
;
304 timeVal
.tv_secs
= timeCur
/ 1000000;
305 timeVal
.tv_micro
= timeCur
% 1000000;
307 ADDTIME(&GetIntETask(task
)->iet_CpuTime
, &timeVal
);
313 void cpu_Dispatch(regs_t
*regs
)
317 D(bug("[Kernel] cpu_Dispatch()\n"));
319 /* Break Disable() if needed */
320 if (SysBase
->IDNestCnt
>= 0) {
321 SysBase
->IDNestCnt
= -1;
322 ((uint32_t *)regs
)[13] &= ~0x80;
325 if (!(task
= core_Dispatch()))
328 D(bug("[Kernel] cpu_Dispatch: Letting '%s' run for a bit..\n", task
->tc_Node
.ln_Name
));
330 /* Restore the task's state */
331 RESTORE_TASKSTATE(task
, regs
)
333 DREGS(cpu_DumpRegs(regs
));
335 /* Handle tasks's flags */
336 if (task
->tc_Flags
& TF_EXCEPT
)
339 if (__arm_arosintern
.ARMI_GetTime
)
341 /* Store the launch time */
342 GetIntETask(task
)->iet_private1
= __arm_arosintern
.ARMI_GetTime();
343 if (!GetIntETask(task
)->iet_StartTime
.tv_secs
&& !GetIntETask(task
)->iet_StartTime
.tv_micro
)
345 GetIntETask(task
)->iet_StartTime
.tv_secs
= GetIntETask(task
)->iet_private1
/ 1000000;
346 GetIntETask(task
)->iet_StartTime
.tv_micro
= GetIntETask(task
)->iet_private1
% 1000000;
350 if (task
->tc_Flags
& TF_LAUNCH
)
352 AROS_UFC1(void, task
->tc_Launch
,
353 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
357 void cpu_DumpRegs(regs_t
*regs
)
361 bug("[KRN] Register Dump:\n");
362 for (i
= 0; i
< 12; i
++)
364 bug("[KRN] r%02d: 0x%08x\n", i
, ((uint32_t *)regs
)[i
]);
366 bug("[KRN] (ip) r12: 0x%08x\n", ((uint32_t *)regs
)[12]);
367 bug("[KRN] (sp) r13: 0x%08x\n", ((uint32_t *)regs
)[13]);
368 bug("[KRN] (lr) r14: 0x%08x\n", ((uint32_t *)regs
)[14]);
369 bug("[KRN] (pc) r15: 0x%08x\n", ((uint32_t *)regs
)[15]);
370 bug("[KRN] cpsr: 0x%08x\n", ((uint32_t *)regs
)[16]);