revert between 56095 -> 55830 in arch
[AROS.git] / arch / arm-native / kernel / kernel_cpu.c
blobcc99b235bf9dd887d3ae360f1485a60d94f3cb71
1 /*
2 Copyright © 2013-2016, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/types/timespec_s.h>
7 #include <aros/kernel.h>
8 #include <aros/libcall.h>
9 #include <exec/execbase.h>
10 #include <hardware/intbits.h>
11 #include <aros/arm/cpucontext.h>
12 #include <strings.h>
14 #include <aros/types/spinlock_s.h>
16 #include "kernel_base.h"
18 #include <proto/kernel.h>
20 #include "etask.h"
22 #include "kernel_intern.h"
23 #include "kernel_debug.h"
24 #include "kernel_cpu.h"
25 #include <kernel_objects.h>
26 #include "kernel_syscall.h"
27 #include "kernel_scheduler.h"
28 #include "kernel_intr.h"
30 #include "tls.h"
32 #define D(x)
33 #define DSCHED(x)
34 #define DREGS(x)
36 #if defined(__AROSEXEC_SMP__)
37 extern struct Task *cpu_InitBootStrap(struct ExecBase *);
38 extern void cpu_BootStrap(struct Task *, struct ExecBase *);
39 #endif
41 asm(
42 " .globl mpcore_trampoline \n"
43 " .type mpcore_trampoline,%function \n"
44 "mpcore_trampoline: \n"
45 " mrs r4, cpsr_all \n" /* Check if in hypervisor mode */
46 " and r4, r4, #0x1f \n" /* In that case try to leave it */
47 " mov r8, #0x1a \n"
48 " cmp r4, r8 \n"
49 " beq leave_hyper \n"
50 "mpcore_continue_boot: \n"
51 " cps #0x13 \n"
52 " mrc p15,0,r4,c1,c0,2 \n" /* Enable signle and double VFP coprocessors */
53 " orr r4, r4, #0x00f00000 \n" /* This is necessary since gcc might want to use vfp registers */
54 " mcr p15,0,r4,c1,c0,2 \n" /* Either as cache for general purpose regs or e.g. for division. This is the case with gcc9 */
55 " mov r4,#0x40000000 \n"
56 " fmxr fpexc,r4 \n" /* Enable VFP now */
57 #if AROS_BIG_ENDIAN
58 " setend be \n" /* If AROS is big endian set the endianess of cpu here */
59 #endif
60 " ldr r3, mpcore_pde \n" /* MMU table */
61 " mcr p15, 0, r3, c2, c0, 0 \n"
62 " mov r3, #0 \n"
63 " mcr p15, 0, r3, c2, c0, 2 \n"
64 " mov r3, #1 \n"
65 " mcr p15, 0, r3, c3, c0, 0 \n"
66 " mrc p15, 0, r4, c1, c0, 0 \n"
67 " mov r3, #0 \n"
68 " mcr p15, 0, r3, c7, c10, 4 \n"
69 " orr r4, r4, #0x800000 \n" /* v6 page tables */
70 " orr r4, r4, #1 \n" /* Enable MMU */
71 #if AROS_BIG_ENDIAN
72 " orr r4, r4, #0x2000000 \n" /* EE bit - BigEndian exceptions and BigEndian page tables */
73 #endif
74 " mcr p15, 0, r4, c1, c0, 0 \n"
75 " mcr p15, 0, r3, c7, c5, 4 \n"
76 " cps #0x11 \n"
77 #if AROS_BIG_ENDIAN
78 " setend be \n" /* If AROS is big endian set the endianess of cpu here */
79 #endif
80 " ldr sp, mpcore_fstack \n"
81 " cps #0x13 \n"
82 " ldr sp, mpcore_stack \n"
83 " ldr r3, mpcore_tls \n"
84 " mcr p15, 0, r3, c13, c0, 3 \n"
85 " ldr pc, mpcore_code \n"
87 "leave_hyper: \n" /* Escape hypervisor mode forever */
88 " adr r4, mpcore_continue_boot\n"
89 " .byte 0x04,0xf3,0x2e,0xe1 \n" /* msr ELR_hyp, r4 */
90 " mrs r4, cpsr_all \n"
91 " and r4, r4, #0x1f \n"
92 " orr r4, r4, #0x13 \n"
93 " .byte 0x04,0xf3,0x6e,0xe1 \n" /* msr SPSR_hyp, r4 */
94 " .byte 0x6e,0x00,0x60,0xe1 \n" /* eret */ /* Exit hypervisor */
96 " .globl mpcore_pde \n"
97 "mpcore_pde: .word 0 \n"
98 "mpcore_code: .word 0 \n"
99 "mpcore_stack: .word 0 \n"
100 "mpcore_tls: .word 0 \n"
101 "mpcore_fstack: .word 0 \n"
102 " .globl mpcore_end \n"
103 "mpcore_end: "
106 spinlock_t startup_lock;
108 void cpu_Register()
110 uint32_t tmp;
111 #if defined(__AROSEXEC_SMP__)
112 tls_t *__tls;
113 struct ExecBase *SysBase;
114 #endif
115 struct KernelBase *KernelBase;
116 cpuid_t cpunum = GetCPUNumber();
118 asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(tmp));
119 tmp |= (1 << 2) | (1 << 12) | (1 << 11); // I and D caches, branch prediction
120 tmp = (tmp & ~2) | (1 << 22); // Unaligned access enable
121 asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r"(tmp));
123 cpu_Init(&__arm_arosintern, NULL);
125 #if defined(__AROSEXEC_SMP__)
126 __tls = TLS_PTR_GET();
128 /* Now we are ready to bootstrap and launch the scheduler */
129 bug("[Kernel:%02d] Bootstrapping...\n", cpunum);
131 asm volatile ("mrs %0, cpsr" :"=r"(tmp));
132 bug("[Kernel:%02d] CPSR=%08x\n", cpunum, tmp);
133 tmp &= ~(1 << 6);
134 asm volatile ("msr cpsr_cxsf, %0" ::"r"(tmp));
135 bug("[Kernel:%02d] CPSR=%08x\n", cpunum, tmp);
137 bug("[Kernel:%02d] TLS @ 0x%p\n", cpunum, (__tls));
138 KernelBase = (struct KernelBase *)__tls->KernelBase; // TLS_GET(KernelBase)
139 SysBase = (struct ExecBase *)__tls->SysBase; // TLS_GET(SysBase)
140 bug("[Kernel:%02d] KernelBase @ 0x%p\n", cpunum, KernelBase);
141 bug("[Kernel:%02d] SysBase @ 0x%p\n", cpunum, SysBase);
143 if ((__tls->ThisTask = cpu_InitBootStrap(SysBase)) == NULL)
144 goto cpu_registerfatal;
146 if (__arm_arosintern.ARMI_InitCore)
147 __arm_arosintern.ARMI_InitCore(KernelBase, SysBase);
149 cpu_BootStrap(__tls->ThisTask, SysBase);
150 #else
151 KernelBase = (struct KernelBase *)TLS_GET(KernelBase);
152 #endif
154 bug("[Kernel:%02d] Operational\n", cpunum);
156 #if defined(__AROSEXEC_SMP__)
157 cpu_registerfatal:
158 #endif
159 bug("[Kernel:%02d] Waiting for interrupts\n", cpunum);
161 KrnSpinUnLock(&startup_lock);
163 #if !defined(__AROSEXEC_SMP__)
164 do {
165 #endif
166 asm volatile("wfi");
167 #if !defined(__AROSEXEC_SMP__)
168 } while (1);
169 #else
171 /* switch to user mode, and load the bs task stack */
172 bug("[Kernel:%02d] Dropping into USER mode ... \n", cpunum);
174 uint32_t bs_stack = __tls->ThisTask->tc_SPUpper;
175 asm volatile(
176 "cps %[mode_user]\n"
177 #if AROS_BIG_ENDIAN
178 "setend be\n"
179 #endif
180 "mov sp, %[bs_stack]\n"
181 : : [bs_stack] "r" (bs_stack), [mode_user] "I" (CPUMODE_USER)
184 /* We now start up the interrupts */
185 Permit();
186 Enable();
187 #endif
190 void cpu_Delay(int usecs)
192 unsigned int delay;
193 for (delay = 0; delay < usecs; delay++) asm volatile ("mov r0, r0\n");
196 void cpu_Save_VFP16_State(void *buffer);
197 void cpu_Save_VFP32_State(void *buffer);
198 void cpu_Restore_VFP16_State(void *buffer);
199 void cpu_Restore_VFP32_State(void *buffer);
201 asm(
202 "cpu_Save_VFP16_State: \n"
203 " vmsr fpscr, r3 \n"
204 " str r3, [r0, #256] \n"
205 " vstmia r0, {d0-d15} \n"
206 " bx lr \n"
208 "cpu_Save_VFP32_State: \n"
209 " vmsr fpscr, r3 \n"
210 " str r3, [r0, #256] \n"
211 " .word 0xec800b40 \n" // vstmia r0, {d0-d31}
212 " bx lr \n"
214 "cpu_Restore_VFP16_State: \n"
215 " ldr r3, [r0, #256] \n"
216 " vmrs r3, fpscr \n"
217 " vldmia r0, {d0-d15} \n"
218 " bx lr \n"
220 "cpu_Restore_VFP32_State: \n"
221 " ldr r3, [r0, #256] \n"
222 " vmrs r3, fpscr \n"
223 " .word 0xec900b20 \n" // vldmia r0, {d0-d31}
224 " bx lr \n"
227 void cpu_Init_VFP_State(void *buffer)
229 bzero(buffer, sizeof(struct VFPContext));
232 void cpu_Probe(struct ARM_Implementation *krnARMImpl)
234 uint32_t tmp;
236 asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r" (tmp));
237 if ((tmp & 0xfff0) == 0xc070 || (tmp & 0xfff0) == 0xd030)
239 krnARMImpl->ARMI_Family = 7;
241 krnARMImpl->ARMI_Save_VFP_State = &cpu_Save_VFP16_State;
242 krnARMImpl->ARMI_Restore_VFP_State = &cpu_Restore_VFP16_State;
244 #if defined(__AROSEXEC_SMP__)
245 // Read the Multiprocessor Affinity Register (MPIDR)
246 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (tmp));
248 if (tmp & (2 << 30))
250 //Multicore system
252 #endif
254 else
256 krnARMImpl->ARMI_Family = 6;
257 krnARMImpl->ARMI_Save_VFP_State = &cpu_Save_VFP16_State;
258 krnARMImpl->ARMI_Restore_VFP_State = &cpu_Restore_VFP16_State;
261 krnARMImpl->ARMI_Init_VFP_State = &cpu_Init_VFP_State;
262 krnARMImpl->ARMI_Delay = &cpu_Delay;
265 void cpu_Init(struct ARM_Implementation *krnARMImpl, struct TagItem *msg)
267 register unsigned int fpuflags;
268 cpuid_t cpunum = GetCPUNumber();
270 core_SetupMMU(msg);
272 __arm_arosintern.ARMI_AffinityMask |= (1 << cpunum);
274 /* Enable Vector Floating Point Calculations */
275 asm volatile("mrc p15,0,%[fpuflags],c1,c0,2\n" : [fpuflags] "=r" (fpuflags)); // Read Access Control Register
276 fpuflags |= (VFPSingle | VFPDouble); // Enable Single & Double Precision
277 asm volatile("mcr p15,0,%[fpuflags],c1,c0,2\n" : : [fpuflags] "r" (fpuflags)); // Set Access Control Register
278 asm volatile(
279 " mov %[fpuflags],%[vfpenable] \n" // Enable VFP
280 " fmxr fpexc,%[fpuflags] \n"
281 : [fpuflags] "=r" (fpuflags) : [vfpenable] "I" (VFPEnable));
284 void cpu_Switch(regs_t *regs)
286 struct Task *task;
287 UQUAD timeCur;
288 struct timespec timeSpec;
289 DSCHED(
290 cpuid_t cpunum = GetCPUNumber();
291 bug("[Kernel:%02d] cpu_Switch()\n", cpunum);
294 task = GET_THIS_TASK;
296 /* Cache running task's context */
297 STORE_TASKSTATE(task, regs)
299 if (__arm_arosintern.ARMI_GetTime)
301 /* Update the task's CPU time */
302 timeCur = __arm_arosintern.ARMI_GetTime() - IntETask(task->tc_UnionETask.tc_ETask)->iet_private1;
303 timeSpec.tv_sec = timeCur / 1000000000;
304 timeSpec.tv_nsec = timeCur % 1000000000;
306 IntETask(task->tc_UnionETask.tc_ETask)->iet_CpuTime.tv_nsec += timeSpec.tv_nsec;
307 IntETask(task->tc_UnionETask.tc_ETask)->iet_CpuTime.tv_sec += timeSpec.tv_sec;
308 while(IntETask(task->tc_UnionETask.tc_ETask)->iet_CpuTime.tv_nsec >= 1000000000)
310 IntETask(task->tc_UnionETask.tc_ETask)->iet_CpuTime.tv_nsec -= 1000000000;
311 IntETask(task->tc_UnionETask.tc_ETask)->iet_CpuTime.tv_sec++;
315 core_Switch();
318 void cpu_Dispatch(regs_t *regs)
320 struct Task *task;
321 #if defined(__AROSEXEC_SMP__)
322 cpuid_t cpunum = GetCPUNumber();
323 DSCHED(
324 bug("[Kernel:%02d] cpu_Dispatch()\n", cpunum);
326 #else
327 DSCHED(
328 cpuid_t cpunum = GetCPUNumber();
329 bug("[Kernel:%02d] cpu_Dispatch()\n", cpunum);
331 #endif
333 #if 0
334 /* Break Disable() if needed */
335 if (IDNESTCOUNT_GET >= 0) {
336 IDNESTCOUNT_SET(-1);
337 ((uint32_t *)regs)[16] &= ~0x80;
339 #endif
341 while (!(task = core_Dispatch()))
343 DSCHED(bug("[Kernel:%02d] cpu_Dispatch: Nothing to run - idling\n", cpunum));
344 asm volatile("wfi");
347 DSCHED(bug("[Kernel:%02d] cpu_Dispatch: 0x%p [R ] '%s'\n", cpunum, task, task->tc_Node.ln_Name));
349 /* Restore the task's state */
350 RESTORE_TASKSTATE(task, regs)
352 DREGS(cpu_DumpRegs(regs));
354 /* Handle tasks's flags */
355 if (task->tc_Flags & TF_EXCEPT)
356 Exception();
358 #if defined(__AROSEXEC_SMP__)
359 IntETask(task->tc_UnionETask.tc_ETask)->iet_CpuNumber = cpunum;
360 #endif
362 if (__arm_arosintern.ARMI_GetTime)
364 /* Store the launch time */
365 IntETask(task->tc_UnionETask.tc_ETask)->iet_private1 = __arm_arosintern.ARMI_GetTime();
366 if (!IntETask(task->tc_UnionETask.tc_ETask)->iet_StartTime.tv_sec && !IntETask(task->tc_UnionETask.tc_ETask)->iet_StartTime.tv_nsec)
368 IntETask(task->tc_UnionETask.tc_ETask)->iet_StartTime.tv_sec = IntETask(task->tc_UnionETask.tc_ETask)->iet_private1 / 1000000;
369 IntETask(task->tc_UnionETask.tc_ETask)->iet_StartTime.tv_nsec = (IntETask(task->tc_UnionETask.tc_ETask)->iet_private1 % 1000000) * 1000;
373 if (task->tc_Flags & TF_LAUNCH)
375 AROS_UFC1(void, task->tc_Launch,
376 AROS_UFCA(struct ExecBase *, SysBase, A6));
378 /* Leave interrupt and jump to the new task */
381 void cpu_DumpRegs(regs_t *regs)
383 cpuid_t cpunum = GetCPUNumber();
384 int i;
386 bug("[Kernel:%02d] CPU Register Dump:\n", cpunum);
387 for (i = 0; i < 12; i++)
389 bug("[Kernel:%02d] r%02d: 0x%08x\n", cpunum, i, ((uint32_t *)regs)[i]);
391 bug("[Kernel:%02d] (ip) r12: 0x%08x\n", cpunum, ((uint32_t *)regs)[12]);
392 bug("[Kernel:%02d] (sp) r13: 0x%08x\n", cpunum, ((uint32_t *)regs)[13]);
393 bug("[Kernel:%02d] (lr) r14: 0x%08x\n", cpunum, ((uint32_t *)regs)[14]);
394 bug("[Kernel:%02d] (pc) r15: 0x%08x\n", cpunum, ((uint32_t *)regs)[15]);
395 bug("[Kernel:%02d] cpsr: 0x%08x\n", cpunum, ((uint32_t *)regs)[16]);