remove unecessary lock (ARMI_AffinityMask is updated while startup_lock is held)
[AROS.git] / arch / arm-native / kernel / kernel_cpu.c
blobfa7130c4ecda7207a312f77297ca84fadd5ff8c3
1 /*
2 Copyright © 2013-2015, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/kernel.h>
7 #include <aros/libcall.h>
8 #include <exec/execbase.h>
9 #include <hardware/intbits.h>
10 #include <aros/arm/cpucontext.h>
11 #include <strings.h>
13 #include <aros/types/spinlock_s.h>
15 #include "kernel_base.h"
17 #include <proto/kernel.h>
19 #include "etask.h"
21 #include "kernel_intern.h"
22 #include "kernel_debug.h"
23 #include "kernel_cpu.h"
24 #include <kernel_objects.h>
25 #include "kernel_syscall.h"
26 #include "kernel_scheduler.h"
27 #include "kernel_intr.h"
29 #define D(x)
30 #define DSCHED(x)
31 #define DREGS(x)
33 #if defined(__AROSEXEC_SMP__)
34 extern struct Task *cpu_InitBootStrap(struct ExecBase *);
35 extern void cpu_BootStrap(struct Task *, struct ExecBase *);
36 #endif
38 asm(
39 " .globl mpcore_trampoline \n"
40 " .type mpcore_trampoline,%function \n"
41 "mpcore_trampoline: \n"
42 " ldr r3, mpcore_pde \n"
43 " mcr p15, 0, r3, c2, c0, 0 \n"
44 " mov r3, #0 \n"
45 " mcr p15, 0, r3, c2, c0, 2 \n"
46 " mov r3, #1 \n"
47 " mcr p15, 0, r3, c3, c0, 0 \n"
48 " mrc p15, 0, r4, c1, c0, 0 \n"
49 " mov r3, #0 \n"
50 " mcr p15, 0, r3, c7, c10, 4 \n"
51 " orr r4, r4, #0x800000 \n"
52 " orr r4, r4, #1 \n"
53 " mcr p15, 0, r4, c1, c0, 0 \n"
54 " mcr p15, 0, r3, c7, c5, 4 \n"
55 " cps #0x11 \n"
56 " ldr sp, mpcore_fstack \n"
57 " cps #0x13 \n"
58 " ldr sp, mpcore_stack \n"
59 " ldr r3, mpcore_tls \n"
60 " mcr p15, 0, r3, c13, c0, 3 \n"
61 " ldr pc, mpcore_code \n"
63 " .globl mpcore_pde \n"
64 "mpcore_pde: .word 0 \n"
65 "mpcore_code: .word 0 \n"
66 "mpcore_stack: .word 0 \n"
67 "mpcore_tls: .word 0 \n"
68 "mpcore_fstack: .word 0 \n"
69 " .globl mpcore_end \n"
70 "mpcore_end: "
73 spinlock_t startup_lock;
75 void cpu_Register()
77 uint32_t tmp;
78 #if defined(__AROSEXEC_SMP__)
79 tls_t *__tls;
80 struct ExecBase *SysBase;
81 struct KernelBase *KernelBase;
82 #endif
83 cpuid_t cpunum = GetCPUNumber();
85 asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r"(tmp));
86 tmp |= (1 << 2) | (1 << 12) | (1 << 11); // I and D caches, branch prediction
87 tmp = (tmp & ~2) | (1 << 22); // Unaligned access enable
88 asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r"(tmp));
90 cpu_Init(&__arm_arosintern, NULL);
92 #if defined(__AROSEXEC_SMP__)
93 __tls = TLS_PTR_GET();
95 /* Now we are ready to boostrap and launch the schedular */
96 bug("[Kernel:%02d] Boostrapping..\n", cpunum);
98 asm volatile ("mrs %0, cpsr" :"=r"(tmp));
99 bug("[Kernel:%02d] CPSR=%08x\n", cpunum, tmp);
100 tmp &= ~(1 << 6);
101 asm volatile ("msr cpsr_cxsf, %0" ::"r"(tmp));
102 bug("[Kernel:%02d] CPSR=%08x\n", cpunum, tmp);
104 bug("[Kernel:%02d] TLS @ 0x%p\n", cpunum, (__tls));
105 KernelBase = (struct KernelBase *)__tls->KernelBase; // TLS_GET(KernelBase)
106 SysBase = (struct ExecBase *)__tls->SysBase; // TLS_GET(SysBase)
107 bug("[Kernel:%02d] KernelBase @ 0x%p\n", cpunum, KernelBase);
108 bug("[Kernel:%02d] SysBase @ 0x%p\n", cpunum, SysBase);
110 if ((__tls->ThisTask = cpu_InitBootStrap(SysBase)) == NULL)
111 goto cpu_registerfatal;
113 if (__arm_arosintern.ARMI_InitCore)
114 __arm_arosintern.ARMI_InitCore(KernelBase, SysBase);
116 cpu_BootStrap(__tls->ThisTask, SysBase);
117 #endif
119 bug("[Kernel:%02d] Operational\n", cpunum);
121 cpu_registerfatal:
122 bug("[Kernel:%02d] Waiting for interrupts\n", cpunum);
124 KrnSpinUnLock(&startup_lock);
126 #if !defined(__AROSEXEC_SMP__)
127 do {
128 #endif
129 asm volatile("wfi");
130 #if !defined(__AROSEXEC_SMP__)
131 } while (1);
132 #else
134 /* switch to user mode, and load the bs task stack */
135 bug("[Kernel:%02d] Dropping into USER mode ... \n", cpunum);
137 uint32_t bs_stack = __tls->ThisTask->tc_SPUpper;
138 asm volatile(
139 "cps %[mode_user]\n"
140 "mov sp, %[bs_stack]\n"
141 : : [bs_stack] "r" (bs_stack), [mode_user] "I" (CPUMODE_USER)
144 /* We now start up the interrupts */
145 Permit();
146 Enable();
147 #endif
150 void cpu_Delay(int usecs)
152 unsigned int delay;
153 for (delay = 0; delay < usecs; delay++) asm volatile ("mov r0, r0\n");
156 void cpu_Save_VFP16_State(void *buffer);
157 void cpu_Save_VFP32_State(void *buffer);
158 void cpu_Restore_VFP16_State(void *buffer);
159 void cpu_Restore_VFP32_State(void *buffer);
161 asm(
162 "cpu_Save_VFP16_State: \n"
163 " vmsr fpscr, r3 \n"
164 " str r3, [r0, #256] \n"
165 " vstmia r0, {d0-d15} \n"
166 " bx lr \n"
168 "cpu_Save_VFP32_State: \n"
169 " vmsr fpscr, r3 \n"
170 " str r3, [r0, #256] \n"
171 " .word 0xec800b40 \n" // vstmia r0, {d0-d31}
172 " bx lr \n"
174 "cpu_Restore_VFP16_State: \n"
175 " ldr r3, [r0, #256] \n"
176 " vmrs r3, fpscr \n"
177 " vldmia r0, {d0-d15} \n"
178 " bx lr \n"
180 "cpu_Restore_VFP32_State: \n"
181 " ldr r3, [r0, #256] \n"
182 " vmrs r3, fpscr \n"
183 " .word 0xec900b20 \n" // vldmia r0, {d0-d31}
184 " bx lr \n"
187 void cpu_Init_VFP_State(void *buffer)
189 bzero(buffer, sizeof(struct VFPContext));
192 void cpu_Probe(struct ARM_Implementation *krnARMImpl)
194 uint32_t tmp;
196 asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r" (tmp));
197 if ((tmp & 0xfff0) == 0xc070)
199 krnARMImpl->ARMI_Family = 7;
201 krnARMImpl->ARMI_Save_VFP_State = &cpu_Save_VFP16_State;
202 krnARMImpl->ARMI_Restore_VFP_State = &cpu_Restore_VFP16_State;
204 #if defined(__AROSEXEC_SMP__)
205 // Read the Multiprocessor Affinity Register (MPIDR)
206 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (tmp));
208 if (tmp & (2 << 30))
210 //Multicore system
212 #endif
214 else
216 krnARMImpl->ARMI_Family = 6;
217 krnARMImpl->ARMI_Save_VFP_State = &cpu_Save_VFP16_State;
218 krnARMImpl->ARMI_Restore_VFP_State = &cpu_Restore_VFP16_State;
221 krnARMImpl->ARMI_Init_VFP_State = &cpu_Init_VFP_State;
222 krnARMImpl->ARMI_Delay = &cpu_Delay;
225 void cpu_Init(struct ARM_Implementation *krnARMImpl, struct TagItem *msg)
227 register unsigned int fpuflags;
228 cpuid_t cpunum = GetCPUNumber();
230 core_SetupMMU(msg);
232 __arm_arosintern.ARMI_AffinityMask |= (1 << cpunum);
234 /* Enable Vector Floating Point Calculations */
235 asm volatile("mrc p15,0,%[fpuflags],c1,c0,2\n" : [fpuflags] "=r" (fpuflags)); // Read Access Control Register
236 fpuflags |= (VFPSingle | VFPDouble); // Enable Single & Double Precision
237 asm volatile("mcr p15,0,%[fpuflags],c1,c0,2\n" : : [fpuflags] "r" (fpuflags)); // Set Access Control Register
238 asm volatile(
239 " mov %[fpuflags],%[vfpenable] \n" // Enable VFP
240 " fmxr fpexc,%[fpuflags] \n"
241 : [fpuflags] "=r" (fpuflags) : [vfpenable] "I" (VFPEnable));
244 #define ADDTIME(dest, src) \
245 (dest)->tv_micro += (src)->tv_micro; \
246 (dest)->tv_secs += (src)->tv_secs; \
247 while((dest)->tv_micro > 999999) \
249 (dest)->tv_secs++; \
250 (dest)->tv_micro -= 1000000; \
253 void cpu_Switch(regs_t *regs)
255 struct Task *task;
256 UQUAD timeCur;
257 struct timeval timeVal;
258 #if defined(__AROSEXEC_SMP__) || defined(DEBUG)
259 cpuid_t cpunum = GetCPUNumber();
260 #endif
262 DSCHED(bug("[Kernel:%02d] cpu_Switch()\n", cpunum));
264 task = GET_THIS_TASK;
266 /* Cache running task's context */
267 STORE_TASKSTATE(task, regs)
269 if (__arm_arosintern.ARMI_GetTime)
271 /* Update the taks CPU time .. */
272 timeCur = __arm_arosintern.ARMI_GetTime() - GetIntETask(task)->iet_private1;
273 timeVal.tv_secs = timeCur / 1000000;
274 timeVal.tv_micro = timeCur % 1000000;
276 ADDTIME(&GetIntETask(task)->iet_CpuTime, &timeVal);
279 core_Switch();
282 void cpu_Dispatch(regs_t *regs)
284 #if defined(__AROSEXEC_SMP__) || defined(DEBUG)
285 cpuid_t cpunum = GetCPUNumber();
286 #endif
288 struct Task *task;
290 DSCHED(bug("[Kernel:%02d] cpu_Dispatch()\n", cpunum));
292 /* Break Disable() if needed */
293 if (IDNESTCOUNT_GET >= 0) {
294 IDNESTCOUNT_SET(-1);
295 ((uint32_t *)regs)[13] &= ~0x80;
298 while (!(task = core_Dispatch()))
300 DSCHED(bug("[Kernel:%02d] cpu_Dispatch: Nothing to run - idling\n", cpunum));
301 asm volatile("wfi");
304 DSCHED(bug("[Kernel:%02d] cpu_Dispatch: 0x%p [R ] '%s'\n", cpunum, task, task->tc_Node.ln_Name));
306 /* Restore the task's state */
307 RESTORE_TASKSTATE(task, regs)
309 DREGS(cpu_DumpRegs(regs));
311 /* Handle tasks's flags */
312 if (task->tc_Flags & TF_EXCEPT)
313 Exception();
315 #if defined(__AROSEXEC_SMP__)
316 GetIntETask(task)->iet_CpuNumber = cpunum;
317 #endif
319 if (__arm_arosintern.ARMI_GetTime)
321 /* Store the launch time */
322 GetIntETask(task)->iet_private1 = __arm_arosintern.ARMI_GetTime();
323 if (!GetIntETask(task)->iet_StartTime.tv_secs && !GetIntETask(task)->iet_StartTime.tv_micro)
325 GetIntETask(task)->iet_StartTime.tv_secs = GetIntETask(task)->iet_private1 / 1000000;
326 GetIntETask(task)->iet_StartTime.tv_micro = GetIntETask(task)->iet_private1 % 1000000;
330 if (task->tc_Flags & TF_LAUNCH)
332 AROS_UFC1(void, task->tc_Launch,
333 AROS_UFCA(struct ExecBase *, SysBase, A6));
335 /* Leave interrupt and jump to the new task */
338 void cpu_DumpRegs(regs_t *regs)
340 cpuid_t cpunum = GetCPUNumber();
341 int i;
343 bug("[Kernel:%02d] CPU Register Dump:\n", cpunum);
344 for (i = 0; i < 12; i++)
346 bug("[Kernel:%02d] r%02d: 0x%08x\n", cpunum, i, ((uint32_t *)regs)[i]);
348 bug("[Kernel:%02d] (ip) r12: 0x%08x\n", cpunum, ((uint32_t *)regs)[12]);
349 bug("[Kernel:%02d] (sp) r13: 0x%08x\n", cpunum, ((uint32_t *)regs)[13]);
350 bug("[Kernel:%02d] (lr) r14: 0x%08x\n", cpunum, ((uint32_t *)regs)[14]);
351 bug("[Kernel:%02d] (pc) r15: 0x%08x\n", cpunum, ((uint32_t *)regs)[15]);
352 bug("[Kernel:%02d] cpsr: 0x%08x\n", cpunum, ((uint32_t *)regs)[16]);