2 Copyright © 2013-2015, The AROS Development Team. All rights reserved.
6 #include <aros/kernel.h>
7 #include <aros/libcall.h>
8 #include <exec/execbase.h>
9 #include <hardware/intbits.h>
10 #include <aros/arm/cpucontext.h>
11 #include <asm/arm/cpu.h>
14 #include <aros/types/spinlock_s.h>
16 #include "kernel_base.h"
18 #include <proto/kernel.h>
22 #include "kernel_intern.h"
23 #include "kernel_debug.h"
24 #include "kernel_cpu.h"
25 #include <kernel_objects.h>
26 #include "kernel_syscall.h"
27 #include "kernel_scheduler.h"
28 #include "kernel_intr.h"
34 uint32_t __arm_affinitymask
__attribute__((section(".data"))) = 1;
35 spinlock_t __arm_affinitymasklock
;
37 #if defined(__AROSEXEC_SMP__)
38 extern struct Task
*cpu_InitBootStrap(struct ExecBase
*);
39 extern void cpu_BootStrap(struct Task
*, struct ExecBase
*);
43 " .globl mpcore_trampoline \n"
44 " .type mpcore_trampoline,%function \n"
45 "mpcore_trampoline: \n"
46 " ldr r3, mpcore_pde \n"
47 " mcr p15, 0, r3, c2, c0, 0 \n"
49 " mcr p15, 0, r3, c2, c0, 2 \n"
51 " mcr p15, 0, r3, c3, c0, 0 \n"
52 " mrc p15, 0, r4, c1, c0, 0 \n"
54 " mcr p15, 0, r3, c7, c10, 4 \n"
55 " orr r4, r4, #0x800000 \n"
57 " mcr p15, 0, r4, c1, c0, 0 \n"
58 " mcr p15, 0, r3, c7, c5, 4 \n"
60 " ldr sp, mpcore_fstack \n"
62 " ldr sp, mpcore_stack \n"
63 " ldr r3, mpcore_tls \n"
64 " mcr p15, 0, r3, c13, c0, 3 \n"
65 " ldr pc, mpcore_code \n"
67 " .globl mpcore_pde \n"
68 "mpcore_pde: .word 0 \n"
69 "mpcore_code: .word 0 \n"
70 "mpcore_stack: .word 0 \n"
71 "mpcore_tls: .word 0 \n"
72 "mpcore_fstack: .word 0 \n"
73 " .globl mpcore_end \n"
77 spinlock_t startup_lock
;
82 #if defined(__AROSEXEC_SMP__)
84 struct ExecBase
*SysBase
;
85 struct KernelBase
*KernelBase
;
88 asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(tmp
));
89 tmp
|= (1 << 2) | (1 << 12) | (1 << 11); /* I and D caches, branch prediction */
90 tmp
= (tmp
& ~2) | (1 << 22); /* Unaligned access enable */
91 asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r"(tmp
));
93 cpu_Init(&__arm_arosintern
, NULL
);
95 asm volatile (" mrc p15, 0, %0, c0, c0, 5 " : "=r" (tmp
));
97 #if defined(__AROSEXEC_SMP__)
98 __tls
= TLS_PTR_GET();
100 /* Now we are ready to boostrap and launch the schedular */
101 bug("[Kernel] CPU #%02d Boostrapping..\n", (tmp
& 0x3));
103 asm volatile ("mrs %0, cpsr" :"=r"(ttmp
));
104 bug("[Kernel] CPU #%02d CPSR=%08x\n", (tmp
& 0x3), ttmp
);
106 asm volatile ("msr cpsr_cxsf, %0" ::"r"(ttmp
));
107 bug("[Kernel] CPU #%02d CPSR=%08x\n", (tmp
& 0x3), ttmp
);
109 bug("[Kernel] CPU #%02d TLS @ 0x%p\n", (tmp
& 0x3), (__tls
));
110 KernelBase
= (struct KernelBase
*)__tls
->KernelBase
; // TLS_GET(KernelBase)
111 SysBase
= (struct ExecBase
*)__tls
->SysBase
; // TLS_GET(SysBase)
112 bug("[Kernel] CPU #%02d KernelBase @ 0x%p\n", (tmp
& 0x3), KernelBase
);
113 bug("[Kernel] CPU #%02d SysBase @ 0x%p\n", (tmp
& 0x3), SysBase
);
115 if ((__tls
->ThisTask
= cpu_InitBootStrap(SysBase
)) == NULL
)
116 goto cpu_registerfatal
;
118 if (__arm_arosintern
.ARMI_InitCore
)
119 __arm_arosintern
.ARMI_InitCore(KernelBase
, SysBase
);
121 cpu_BootStrap(__tls
->ThisTask
, SysBase
);
124 bug("[Kernel] CPU #%02d operational\n", (tmp
& 0x3));
126 KrnSpinLock(&__arm_affinitymasklock
, NULL
, SPINLOCK_MODE_WRITE
);
127 __arm_affinitymask
|= (1 << (tmp
& 0x3));
128 KrnSpinUnLock(&__arm_affinitymasklock
);
132 bug("[Kernel] CPU #%02d waiting for interrupts\n", (tmp
& 0x3));
134 KrnSpinUnLock(&startup_lock
);
136 for (;;) asm volatile("wfi");
139 void cpu_Delay(int usecs
)
142 for (delay
= 0; delay
< usecs
; delay
++) asm volatile ("mov r0, r0\n");
145 void cpu_Save_VFP16_State(void *buffer
);
146 void cpu_Save_VFP32_State(void *buffer
);
147 void cpu_Restore_VFP16_State(void *buffer
);
148 void cpu_Restore_VFP32_State(void *buffer
);
151 "cpu_Save_VFP16_State: \n"
153 " str r3, [r0, #256] \n"
154 " vstmia r0, {d0-d15} \n"
157 "cpu_Save_VFP32_State: \n"
159 " str r3, [r0, #256] \n"
160 " .word 0xec800b40 \n" // vstmia r0, {d0-d31}
163 "cpu_Restore_VFP16_State: \n"
164 " ldr r3, [r0, #256] \n"
166 " vldmia r0, {d0-d15} \n"
169 "cpu_Restore_VFP32_State: \n"
170 " ldr r3, [r0, #256] \n"
172 " .word 0xec900b20 \n" // vldmia r0, {d0-d31}
176 void cpu_Init_VFP_State(void *buffer
)
178 bzero(buffer
, sizeof(struct VFPContext
));
181 void cpu_Probe(struct ARM_Implementation
*krnARMImpl
)
185 __arm_affinitymasklock
= (spinlock_t
)SPINLOCK_INIT_UNLOCKED
;
186 __arm_affinitymask
= 1;
188 asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r" (tmp
));
189 if ((tmp
& 0xfff0) == 0xc070)
191 krnARMImpl
->ARMI_Family
= 7;
193 krnARMImpl
->ARMI_Save_VFP_State
= &cpu_Save_VFP16_State
;
194 krnARMImpl
->ARMI_Restore_VFP_State
= &cpu_Restore_VFP16_State
;
196 #if defined(__AROSEXEC_SMP__)
197 // Read the Multiprocessor Affinity Register (MPIDR)
198 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (tmp
));
202 __arm_affinitymask
= 1 << (tmp
& 3);
209 krnARMImpl
->ARMI_Family
= 6;
210 krnARMImpl
->ARMI_Save_VFP_State
= &cpu_Save_VFP16_State
;
211 krnARMImpl
->ARMI_Restore_VFP_State
= &cpu_Restore_VFP16_State
;
214 krnARMImpl
->ARMI_Init_VFP_State
= &cpu_Init_VFP_State
;
215 krnARMImpl
->ARMI_Delay
= &cpu_Delay
;
218 void cpu_Init(struct ARM_Implementation
*krnARMImpl
, struct TagItem
*msg
)
220 register unsigned int fpuflags
;
226 /* Only boot processor calls cpu_Init with a valid msg */
230 /* Enable Vector Floating Point Calculations */
231 asm volatile("mrc p15,0,%[fpuflags],c1,c0,2\n" : [fpuflags
] "=r" (fpuflags
)); // Read Access Control Register
232 fpuflags
|= (VFPSingle
| VFPDouble
); // Enable Single & Double Precision
233 asm volatile("mcr p15,0,%[fpuflags],c1,c0,2\n" : : [fpuflags
] "r" (fpuflags
)); // Set Access Control Register
235 " mov %[fpuflags],%[vfpenable] \n" // Enable VFP
236 " fmxr fpexc,%[fpuflags] \n"
237 : [fpuflags
] "=r" (fpuflags
) : [vfpenable
] "I" (VFPEnable
));
240 #define ADDTIME(dest, src) \
241 (dest)->tv_micro += (src)->tv_micro; \
242 (dest)->tv_secs += (src)->tv_secs; \
243 while((dest)->tv_micro > 999999) \
246 (dest)->tv_micro -= 1000000; \
249 void cpu_Switch(regs_t
*regs
)
253 struct timeval timeVal
;
254 #if defined(__AROSEXEC_SMP__) || defined(DEBUG)
255 int cpunum
= GetCPUNumber();
258 DSCHED(bug("[Kernel] cpu_Switch(%02d)\n", cpunum
));
260 task
= GET_THIS_TASK
;
262 /* Cache running task's context */
263 STORE_TASKSTATE(task
, regs
)
265 if (__arm_arosintern
.ARMI_GetTime
)
267 /* Update the taks CPU time .. */
268 timeCur
= __arm_arosintern
.ARMI_GetTime() - GetIntETask(task
)->iet_private1
;
269 timeVal
.tv_secs
= timeCur
/ 1000000;
270 timeVal
.tv_micro
= timeCur
% 1000000;
272 ADDTIME(&GetIntETask(task
)->iet_CpuTime
, &timeVal
);
278 void cpu_Dispatch(regs_t
*regs
)
280 #if defined(__AROSEXEC_SMP__) || defined(DEBUG)
281 int cpunum
= GetCPUNumber();
286 DSCHED(bug("[Kernel] cpu_Dispatch(%02d)\n", cpunum
));
288 /* Break Disable() if needed */
289 if (SysBase
->IDNestCnt
>= 0) {
290 SysBase
->IDNestCnt
= -1;
291 ((uint32_t *)regs
)[13] &= ~0x80;
294 while (!(task
= core_Dispatch()))
296 DSCHED(bug("[Kernel] cpu_Dispatch[%02d]: Nothing to run - idling\n", cpunum
));
300 DSCHED(bug("[Kernel] cpu_Dispatch[%02d]: 0x%p [R ] '%s'\n", cpunum
, task
, task
->tc_Node
.ln_Name
));
302 /* Restore the task's state */
303 RESTORE_TASKSTATE(task
, regs
)
305 DREGS(cpu_DumpRegs(regs
));
307 /* Handle tasks's flags */
308 if (task
->tc_Flags
& TF_EXCEPT
)
311 #if defined(__AROSEXEC_SMP__)
312 GetIntETask(task
)->iet_CpuNumber
= cpunum
;
315 if (__arm_arosintern
.ARMI_GetTime
)
317 /* Store the launch time */
318 GetIntETask(task
)->iet_private1
= __arm_arosintern
.ARMI_GetTime();
319 if (!GetIntETask(task
)->iet_StartTime
.tv_secs
&& !GetIntETask(task
)->iet_StartTime
.tv_micro
)
321 GetIntETask(task
)->iet_StartTime
.tv_secs
= GetIntETask(task
)->iet_private1
/ 1000000;
322 GetIntETask(task
)->iet_StartTime
.tv_micro
= GetIntETask(task
)->iet_private1
% 1000000;
326 if (task
->tc_Flags
& TF_LAUNCH
)
328 AROS_UFC1(void, task
->tc_Launch
,
329 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
331 /* Leave interrupt and jump to the new task */
334 void cpu_DumpRegs(regs_t
*regs
)
336 int cpunum
= GetCPUNumber();
339 bug("[Kernel][%02d] Register Dump:\n", cpunum
);
340 for (i
= 0; i
< 12; i
++)
342 bug("[Kernel][%02d] r%02d: 0x%08x\n", cpunum
, i
, ((uint32_t *)regs
)[i
]);
344 bug("[Kernel][%02d] (ip) r12: 0x%08x\n", cpunum
, ((uint32_t *)regs
)[12]);
345 bug("[Kernel][%02d] (sp) r13: 0x%08x\n", cpunum
, ((uint32_t *)regs
)[13]);
346 bug("[Kernel][%02d] (lr) r14: 0x%08x\n", cpunum
, ((uint32_t *)regs
)[14]);
347 bug("[Kernel][%02d] (pc) r15: 0x%08x\n", cpunum
, ((uint32_t *)regs
)[15]);
348 bug("[Kernel][%02d] cpsr: 0x%08x\n", cpunum
, ((uint32_t *)regs
)[16]);