add a proper tombstoned flag.
[AROS.git] / arch / all-pc / kernel / smp.c
blobdca73d61215b595a3dc633c14ec65fc541f5f0c3
1 /*
2 Copyright © 1995-2017, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #define __KERNEL_NOLIBBASE__
8 #include <aros/types/spinlock_s.h>
9 #include <aros/atomic.h>
10 #include <asm/io.h>
11 #include <exec/execbase.h>
12 #include <exec/memory.h>
13 #include <proto/exec.h>
14 #include <proto/kernel.h>
16 #include "kernel_base.h"
17 #include "kernel_debug.h"
18 #include "kernel_globals.h"
19 #include "kernel_intern.h"
20 #include "kernel_syscall.h"
21 #include "kernel_ipi.h"
22 #include "smp.h"
24 #define D(x)
25 #define DWAKE(x)
27 extern const void *_binary_smpbootstrap_start;
28 extern const void *_binary_smpbootstrap_size;
30 extern APTR PlatformAllocGDT(struct KernelBase *, apicid_t);
31 extern APTR PlatformAllocTLS(struct KernelBase *, apicid_t);
32 extern APTR PlatformAllocIDT(struct KernelBase *, apicid_t);
34 #if defined(__AROSEXEC_SMP__)
35 extern void cpu_PrepareExec(struct ExecBase *);
36 extern struct Task *cpu_InitBootStrap(apicid_t);
37 extern void cpu_BootStrap(struct Task *);
38 #endif
40 static void smp_Entry(IPTR stackBase, spinlock_t *apicReadyLock, struct KernelBase *KernelBase, apicid_t apicCPUNo)
43 * This is the entry point for secondary cores.
44 * KernelBase is already set up by the primary CPU, so we can use it.
46 struct APICData *apicData = KernelBase->kb_PlatformData->kb_APIC;
47 __unused struct CPUData *apicCPU;
49 IPTR _APICBase;
50 apicid_t _APICID;
52 #if defined(__AROSEXEC_SMP__)
53 struct Task *apicBSTask;
54 #endif
55 #if (__WORDSIZE==64)
57 /* Enable fxsave/fxrstor */
58 wrcr(cr4, rdcr(cr4) | _CR4_OSFXSR | _CR4_OSXMMEXCPT);
59 #endif
61 apicCPU = &apicData->cores[apicCPUNo];
64 /* Find out ourselves */
65 _APICBase = core_APIC_GetBase();
66 _APICID = core_APIC_GetID(_APICBase);
68 bug("[Kernel:SMP] %s[%03u]: APIC ID %03u starting up...\n", __func__, apicCPUNo, _APICID);
69 if (apicCPU->cpu_LocalID != _APICID)
71 bug("[Kernel:SMP] %s[%03u]: Warning! expected ID %03u\n", __func__, apicCPUNo, apicCPU->cpu_LocalID);
73 bug("[Kernel:SMP] %s[%03u]: APIC base @ 0x%p\n", __func__, apicCPUNo, _APICBase);
74 #if (__WORDSIZE==64)
75 bug("[Kernel:SMP] %s[%03u]: KernelBootPrivate 0x%p\n", __func__, apicCPUNo, __KernBootPrivate);
76 #endif
77 bug("[Kernel:SMP] %s[%03u]: StackBase 0x%p\n", __func__, apicCPUNo, stackBase);
78 bug("[Kernel:SMP] %s[%03u]: Ready Lock 0x%p\n", __func__, apicCPUNo, apicReadyLock);
81 #if (0)
82 apicCPUNo = core_APIC_GetNumber(apicData);
83 #endif
85 D(bug("[Kernel:SMP] %s[%03u]: APIC CPU Data @ 0x%p\n", __func__, apicCPUNo, apicCPU));
87 /* Set up GDT and LDT for our core */
88 D(bug("[Kernel:SMP] %s[%03u]: GDT @ 0x%p, TLS @ 0x%p\n", __func__, apicCPUNo, apicCPU->cpu_GDT, apicCPU->cpu_TLS));
89 #if (__WORDSIZE==64)
90 core_SetupGDT(__KernBootPrivate, apicCPUNo, apicCPU->cpu_GDT, apicCPU->cpu_TLS, __KernBootPrivate->TSS);
92 core_CPUSetup(apicCPUNo, apicCPU->cpu_GDT, stackBase);
93 #endif
95 D(bug("[Kernel:SMP] %s[%03u]: Core IDT @ 0x%p\n", __func__, apicCPUNo, apicCPU->cpu_IDT));
96 #if (__WORDSIZE==64)
97 core_SetupIDT(apicCPUNo, apicCPU->cpu_IDT);
99 if (!core_SetIDTGate((struct int_gate_64bit *)apicCPU->cpu_IDT, APIC_IRQ_SYSCALL, (uintptr_t)IntrDefaultGates[APIC_IRQ_SYSCALL], TRUE))
101 krnPanic(NULL, "Failed to set APIC Syscall Vector\n"
102 "Vector #$%02X\n", APIC_IRQ_SYSCALL);
104 D(bug("[Kernel:SMP] %s[%03u]: APIC Syscall Vector configured\n", __func__, apicCPUNo));
106 if (!core_SetIDTGate((struct int_gate_64bit *)apicCPU->cpu_IDT, 0xff, (uintptr_t)IntrDefaultGates[0xff], TRUE))
108 krnPanic(NULL, "Failed to set APIC Spurious Vector\n"
109 "Vector #$%02X\n", 0xff);
111 D(bug("[Kernel:SMP] %s[%03u]: APIC Spurious Vector configured\n", __func__, apicCPUNo));
113 D(bug("[Kernel:SMP] %s[%03u]: Preparing MMU...\n", __func__, apicCPUNo));
114 core_LoadMMU(&__KernBootPrivate->MMU);
115 #endif
117 #if defined(__AROSEXEC_SMP__)
118 D(bug("[Kernel:SMP] %s[%03u]: SysBase @ 0x%p\n", __func__, apicCPUNo, SysBase));
120 TLS_SET(SysBase,SysBase);
121 TLS_SET(KernelBase,KernelBase);
123 if ((apicBSTask = cpu_InitBootStrap(apicCPUNo)) != NULL)
125 apicBSTask->tc_SPLower = NULL;
126 apicBSTask->tc_SPUpper = (APTR)~0;
128 cpu_BootStrap(apicBSTask);
130 #else
131 bug("[Kernel:SMP] APIC #%u of %u Going IDLE (Halting)...\n", apicCPUNo + 1, apicData->apic_count);
132 #endif
134 /* Signal the bootstrap core that we are running */
135 KrnSpinUnLock((spinlock_t *)apicReadyLock);
137 #if defined(__AROSEXEC_SMP__)
138 if (apicBSTask)
140 D(bug("[Kernel:SMP] %s[%03u]: Starting up Scheduler...\n", __func__, apicCPUNo);)
142 /* clean up now we are done */
143 RemTask(apicBSTask);
146 bug("[Kernel:SMP] APIC #%u Failed to bootstrap (Halting)...\n", apicCPUNo + 1);
147 while (1) asm volatile("cli; hlt");
148 #else
149 while (1) asm volatile("hlt");
150 #endif
153 static int smp_Setup(struct KernelBase *KernelBase)
155 struct PlatformData *pdata = KernelBase->kb_PlatformData;
156 unsigned long bslen = (unsigned long)&_binary_smpbootstrap_size;
157 struct MemHeader *lowmem;
158 APTR smpboot = NULL;
159 struct SMPBootstrap *bs;
161 D(bug("[Kernel:SMP] %s()\n", __func__));
163 /* Find a suitable memheader to allocate the bootstrap from .. */
164 ForeachNode(&SysBase->MemList, lowmem)
166 /* Is it in lowmem? */
167 if ((IPTR)lowmem->mh_Lower < 0x000100000)
169 D(bug("[Kernel:SMP] Trying memheader @ 0x%p\n", lowmem));
170 D(bug("[Kernel:SMP] * 0x%p - 0x%p (%s pri %d)\n", lowmem->mh_Lower, lowmem->mh_Upper, lowmem->mh_Node.ln_Name, lowmem->mh_Node.ln_Pri));
173 * Attempt to allocate space for the SMP bootstrap code.
174 * NB: Its address must be page-aligned!.
175 * NB2: Every CPU starts up in real mode
177 smpboot = Allocate(lowmem, bslen + PAGE_SIZE - 1);
178 if (smpboot)
179 break;
183 if (!smpboot)
185 bug("[Kernel:SMP] Failed to allocate %dbytes for SMP bootstrap\n", bslen + PAGE_SIZE - 1);
186 return 0;
189 /* Install SMP bootstrap code */
190 bs = (APTR)AROS_ROUNDUP2((IPTR)smpboot, PAGE_SIZE);
191 CopyMem(&_binary_smpbootstrap_start, bs, (unsigned long)&_binary_smpbootstrap_size);
192 pdata->kb_APIC_TrampolineBase = bs;
194 D(bug("[Kernel:SMP] Copied APIC bootstrap code to 0x%p\n", bs));
197 * Store constant arguments in bootstrap's data area
198 * WARNING!!! The bootstrap code assumes PML4 is placed in a 32-bit memory,
199 * and there seem to be no easy way to fix this.
200 * If AROS kickstart is ever loaded into high memory, we would need to take
201 * a special care about it.
203 bs->Arg3 = (IPTR)KernelBase;
204 #if (__WORDSIZE==64)
205 //TODO: Allocate the cores own MMU structures and copy necessary data to it
206 bs->PML4 = __KernBootPrivate->MMU.mmu_PML4;
207 #endif
208 bs->IP = smp_Entry;
210 return 1;
214 * Here we wake up our secondary cores.
216 static int smp_Wake(struct KernelBase *KernelBase)
218 struct PlatformData *pdata = KernelBase->kb_PlatformData;
219 struct SMPBootstrap *bs = pdata->kb_APIC_TrampolineBase;
220 struct APICData *apicData = pdata->kb_APIC;
221 APTR _APICStackBase;
222 IPTR wakeresult = -1;
223 apicid_t cpuNo;
224 #if defined(__AROSEXEC_SMP__)
225 tls_t *apicTLS;
226 #endif
227 spinlock_t *apicReadyLocks;
229 apicReadyLocks = AllocMem(sizeof(spinlock_t) * apicData->apic_count, MEMF_CLEAR|MEMF_ANY);
230 D(bug("[Kernel:SMP] %d Ready spinlocks starting at 0x%p\n", apicData->apic_count, apicReadyLocks));
232 /* Core number 0 is our bootstrap core, so we start from No 1 */
233 for (cpuNo = 1; cpuNo < apicData->apic_count; cpuNo++)
235 struct APICCPUWake_Data apicWake =
238 apicData->lapicBase,
239 apicData->cores[cpuNo].cpu_LocalID
242 D(bug("[Kernel:SMP] Launching CPU #%u (ID %03u)\n", cpuNo + 1, apicData->cores[cpuNo].cpu_LocalID));
244 KrnSpinInit(&apicReadyLocks[cpuNo]);
246 apicData->cores[cpuNo].cpu_GDT = PlatformAllocGDT(KernelBase, apicData->cores[cpuNo].cpu_LocalID);
247 apicData->cores[cpuNo].cpu_TLS = PlatformAllocTLS(KernelBase, apicData->cores[cpuNo].cpu_LocalID);
248 #if defined(__AROSEXEC_SMP__)
249 apicTLS = apicData->cores[cpuNo].cpu_TLS;
250 apicTLS->ScheduleData = AllocMem(sizeof(struct X86SchedulerPrivate), MEMF_PUBLIC);
251 core_InitScheduleData(apicTLS->ScheduleData);
252 D(bug("[Kernel:SMP] Scheduling Data @ 0x%p\n", apicTLS->ScheduleData));
253 #endif
254 apicData->cores[cpuNo].cpu_IDT = PlatformAllocIDT(KernelBase, apicData->cores[cpuNo].cpu_LocalID);
257 * First we need to allocate a stack for our CPU.
258 * We allocate the same three stacks as in core_CPUSetup().
260 _APICStackBase = AllocMem(STACK_SIZE * 3, MEMF_CLEAR);
261 D(bug("[Kernel:SMP] Allocated STACK for APIC ID %03u @ 0x%p ..\n", apicData->cores[cpuNo].cpu_LocalID, _APICStackBase));
262 if (!_APICStackBase)
263 return 0;
265 /* Pass some vital information to the
266 * waking CPU */
267 bs->Arg1 = (IPTR)_APICStackBase;
268 bs->Arg2 = (IPTR)&apicReadyLocks[cpuNo];
269 // Arg3 = KernelBase - already set by smp_Setup()
270 bs->Arg4 = (IPTR)cpuNo;
271 bs->SP = _APICStackBase + STACK_SIZE;
273 /* Lock the spinlock before launching the core */
274 KrnSpinLock(&apicReadyLocks[cpuNo], NULL, SPINLOCK_MODE_WRITE);
276 /* Start IPI sequence */
277 wakeresult = krnSysCallCPUWake(&apicWake);
279 /* wakeresult != 0 means error */
280 if (!wakeresult)
282 UQUAD current, start = RDTSC();
284 * Before we proceed we need to make sure that the core has picked up
285 * its stack and we can reload bootstrap argument area with another one.
287 DWAKE(bug("[Kernel:SMP] Waiting for CPU #%u to initialise .. ", cpuNo + 1));
288 while (!KrnSpinTryLock(&apicReadyLocks[cpuNo], SPINLOCK_MODE_READ))
290 asm volatile("pause");
291 current = RDTSC();
292 if (((current - start)/apicData->cores[0].cpu_TSCFreq) >
293 #if (DEBUG > 0)
294 50000
295 #else
297 #endif
300 wakeresult = -1;
301 break;
304 if (wakeresult != -1)
306 KrnSpinUnLock(&apicReadyLocks[cpuNo]);
307 D(bug("[Kernel:SMP] CPU #%u started up\n", cpuNo + 1));
310 D(if (wakeresult) { bug("[Kernel:SMP] core_APIC_Wake() failed, status 0x%p\n", wakeresult); } )
313 D(bug("[Kernel:SMP] Done\n"));
315 return 1;
318 int smp_Initialize(void)
320 struct KernelBase *KernelBase = getKernelBase();
321 struct PlatformData *pdata = KernelBase->kb_PlatformData;
323 if (pdata->kb_APIC && (pdata->kb_APIC->apic_count > 1))
325 int number_of_ipi_messages = 0;
326 struct IPIHook *hooks;
327 int i;
329 #if defined(__AROSEXEC_SMP__)
330 cpu_PrepareExec(SysBase);
331 #endif
333 D(bug("[Kernel:SMP] %s: Initializing Lists for IPI messages ...\n", __func__));
334 NEWLIST(&pdata->kb_FreeIPIHooks);
335 NEWLIST(&pdata->kb_BusyIPIHooks);
336 KrnSpinInit(&pdata->kb_FreeIPIHooksLock);
337 KrnSpinInit(&pdata->kb_BusyIPIHooksLock);
339 number_of_ipi_messages = pdata->kb_APIC->apic_count * 10;
340 D(bug("[Kernel:SMP] %s: Allocating %d IPI CALL_HOOK messages ...\n", __func__, number_of_ipi_messages));
341 hooks = AllocMem(sizeof(struct IPIHook) * number_of_ipi_messages, MEMF_PUBLIC | MEMF_CLEAR);
342 if (hooks)
344 for (i=0; i < number_of_ipi_messages; i++)
346 hooks[i].ih_CPUDone = KrnAllocCPUMask();
347 hooks[i].ih_CPURequested = KrnAllocCPUMask();
348 KrnSpinInit(&hooks[i].ih_Lock);
350 ADDHEAD(&pdata->kb_FreeIPIHooks, &hooks[i]);
353 else
355 bug("[Kernel:SMP] %s: Failed to get IPI slots!\n", __func__);
358 if (!smp_Setup(KernelBase))
360 D(bug("[Kernel:SMP] Failed to prepare the environment!\n"));
362 pdata->kb_APIC->apic_count = 1; /* We have only one working CPU */
363 return 0;
366 return smp_Wake(KernelBase);
369 /* This is not an SMP machine, but it's okay */
370 return 1;