revert between 56095 -> 55830 in arch
[AROS.git] / arch / all-pc / kernel / kernel_ipi.c
blob73730bc0bbc03917aa7d3174fe17534fb6d46ef5
1 /*
2 Copyright © 1995-2018, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <asm/cpu.h>
7 #include <strings.h>
9 #define __KERNEL_NOLIBBASE__
10 #include <proto/kernel.h>
13 #include "kernel_base.h"
14 #include "kernel_intern.h"
15 #include "kernel_debug.h"
17 #include "kernel_ipi.h"
19 #include <kernel_scheduler.h>
20 #include <kernel_intr.h>
22 #include "apic_ia32.h"
24 #define D(x)
26 void core_DoIPI(uint8_t ipi_number, void *cpu_mask, struct KernelBase *KernelBase)
28 int cpunum = KrnGetCPUNumber();
29 ULONG cmd = APIC_CPU_EXCEPT_TO_VECTOR(APIC_EXCEPT_IPI_NOP + ipi_number) | ICR_INT_ASSERT;
30 struct PlatformData *kernPlatD = (struct PlatformData *)KernelBase->kb_PlatformData;
31 struct APICData *apicPrivate = kernPlatD->kb_APIC;
32 IPTR __APICBase = apicPrivate->lapicBase;
34 D(bug("[Kernel:IPI] Sending IPI %02d form CPU.%03u to target mask @ 0x%p\n", ipi_number, cpunum, cpu_mask));
36 asm volatile("sfence");
38 if ((cmd & 0xff) <= APIC_CPU_EXCEPT_TO_VECTOR(APIC_EXCEPT_IPI_CAUSE))
40 // special case - send IPI to all
41 if ((IPTR)cpu_mask == TASKAFFINITY_ANY)
43 // Shorthand - all including self
44 cmd |= 0x80000;
46 D(bug("[Kernel:IPI] waiting for DS bit to be clear\n"));
47 while (APIC_REG(__APICBase, APIC_ICRL) & ICR_DS) asm volatile("pause");
48 D(bug("[Kernel:IPI] sending IPI cmd %08x\n", cmd));
49 APIC_REG(__APICBase, APIC_ICRL) = cmd;
51 else if ((IPTR)cpu_mask == TASKAFFINITY_ALL_BUT_SELF)
53 // Shorthand - all excluding self
54 cmd |= 0xc0000;
56 D(bug("[Kernel:IPI] waiting for DS bit to be clear\n"));
57 while (APIC_REG(__APICBase, APIC_ICRL) & ICR_DS) asm volatile("pause");
58 D(bug("[Kernel:IPI] sending IPI cmd %08x\n", cmd));
59 APIC_REG(__APICBase, APIC_ICRL) = cmd;
61 else
63 int i;
65 // No shortcut, send IPI to each CPU one after another
66 for (i=0; i < apicPrivate->apic_count; i++)
68 if (KrnCPUInMask(i, cpu_mask))
70 D(bug("[Kernel:IPI] waiting for DS bit to be clear\n"));
71 while (APIC_REG(__APICBase, APIC_ICRL) & ICR_DS) asm volatile("pause");
72 if (cpunum == i)
74 D(bug("[Kernel:IPI] sending IPI cmd %08x to destination %08x (self-ipi)\n", cmd, (apicPrivate->cores[i].cpu_LocalID << 24)));
75 APIC_REG(__APICBase, APIC_ICRL) = cmd | 0x40000;
77 else
79 D(bug("[Kernel:IPI] sending IPI cmd %08x to destination %08x\n", cmd, (apicPrivate->cores[i].cpu_LocalID << 24)));
80 APIC_REG(__APICBase, APIC_ICRH) = (apicPrivate->cores[i].cpu_LocalID << 24);
81 APIC_REG(__APICBase, APIC_ICRL) = cmd;
89 int core_DoCallIPI(struct Hook *hook, void *cpu_mask, int async, int nargs, IPTR *args, APTR _KB)
91 struct KernelBase *KernelBase = _KB;
92 struct PlatformData *pdata = KernelBase->kb_PlatformData;
93 struct IPIHook *ipi = NULL;
94 struct APICData *apicPrivate = pdata->kb_APIC;
95 int cpunum = KrnGetCPUNumber();
96 int ret = FALSE;
97 int i;
99 D(bug("[Kernel:IPI] %s: Calling hook %p, async=%d\n", __func__, hook, async));
101 if (nargs > IPI_CALL_HOOK_MAX_ARGS)
102 return ret;
104 if (hook)
107 Allocate IPIHook just by removing it form the Free list:
108 First Disable() so that we are not interrupted on this CPU core, then use SpinLock to protect us from
109 other cores accessing the list.
111 If the FreeIPIHooks list is empty, just do busyloop wait - other cores shall free the hook sooner or later
115 Disable();
116 KrnSpinLock(&pdata->kb_FreeIPIHooksLock, NULL, SPINLOCK_MODE_WRITE);
117 ipi = (struct IPIHook *)REMHEAD(&pdata->kb_FreeIPIHooks);
118 KrnSpinUnLock(&pdata->kb_FreeIPIHooksLock);
119 Enable();
120 if (ipi == NULL)
122 (bug("[Kernel:IPI] %s: Failed to allocate IPIHook entry\n", __func__));
123 // Tell CPU we are idling aroud a lock...
124 asm volatile("pause");
126 } while(ipi == NULL);
128 D(bug("[Kernel:IPI] %s: Allocated IPIHook %p\n", __func__, ipi));
131 Copy IPI data from struct Hook provided by caller into allocated ipi
133 ipi->ih_Hook.h_Entry = hook->h_Entry;
134 ipi->ih_Hook.h_SubEntry = hook->h_SubEntry;
135 ipi->ih_Hook.h_Data = hook->h_Data;
138 Copy call hook arguments
140 for (i=0; i < nargs; i++)
141 ipi->ih_Args[i] = args[i];
143 if (async)
145 ipi->ih_Async = 1;
147 else
149 ipi->ih_Async = 0;
151 KrnSpinInit(&ipi->ih_SyncLock);
152 KrnSpinLock(&ipi->ih_SyncLock, NULL, SPINLOCK_MODE_WRITE);
155 /* Clear CPU done mask */
156 bzero(ipi->ih_CPUDone, sizeof(ULONG)*((31 + apicPrivate->apic_count) / 32));
158 /* Copy CPU mask */
159 if (cpu_mask && cpu_mask != (void*)TASKAFFINITY_ANY && cpu_mask != (void*)TASKAFFINITY_ALL_BUT_SELF)
160 bcopy(cpu_mask, ipi->ih_CPURequested, sizeof(ULONG)*((31 + apicPrivate->apic_count) / 32));
161 else
163 int i;
165 bzero(ipi->ih_CPURequested, sizeof(ULONG)*((31 + apicPrivate->apic_count) / 32));
167 for (i=0; i < apicPrivate->apic_count; i++)
169 if ((cpu_mask == (APTR)TASKAFFINITY_ALL_BUT_SELF) && (i == cpunum))
170 continue;
172 bit_test_and_set_long(ipi->ih_CPURequested, i);
177 Put the IPIHook on the BusyIPIHooks list, so that it gets processed once IPIs are called
179 Disable();
180 KrnSpinLock(&pdata->kb_BusyIPIHooksLock, NULL, SPINLOCK_MODE_WRITE);
181 ADDTAIL(&pdata->kb_BusyIPIHooks, ipi);
182 KrnSpinUnLock(&pdata->kb_BusyIPIHooksLock);
183 Enable();
185 D(bug("[Kernel:IPI] %s: Sending IPI message\n", __func__, ipi));
187 ret = TRUE;
189 /* Issue IPI_CALL_HOOK to requested CPUs */
190 core_DoIPI(IPI_CALL_HOOK, cpu_mask, KernelBase);
192 /* If synchronous IPI, wait for completion */
193 if (!async)
195 D(bug("[Kernel:IPI] %s: Synchronous IPI, waiting for completion\n", __func__));
196 KrnSpinLock(&ipi->ih_SyncLock, NULL, SPINLOCK_MODE_WRITE);
197 KrnSpinUnLock(&ipi->ih_SyncLock);
198 D(bug("[Kernel:IPI] %s: Synchronous IPI completed\n", __func__));
202 return ret;
205 static void core_IPICallHookHandle(struct ExceptionContext *regs, struct KernelBase *KernelBase)
207 struct PlatformData *pdata = KernelBase->kb_PlatformData;
208 struct IPIHook *ipi = NULL;
209 struct IPIHook *n = NULL;
210 struct APICData *apicPrivate = pdata->kb_APIC;
211 int cpunum = KrnGetCPUNumber();
213 D(bug("[Kernel:IPI.CPU.%03u] %s\n", cpunum, __func__));
215 KrnSpinLock(&pdata->kb_BusyIPIHooksLock, NULL, SPINLOCK_MODE_WRITE);
217 ForeachNodeSafe(&pdata->kb_BusyIPIHooks, ipi, n)
219 D(bug("[Kernel:IPI.CPU.%03u] %s: Checking node %p\n", cpunum, __func__, ipi));
220 if (KrnCPUInMask(cpunum, ipi->ih_CPURequested))
224 In case of asynchronous hook, the object we pass is IPIHook itself. Dangerous, but we needed to copy the
225 original Hook since it could have existed on stack of caller...
227 if (ipi->ih_Async)
229 D(bug("[Kernel:IPI.CPU.%03u] %s: Calling HOOK Entry %p with Data %p\n", cpunum, __func__,
230 ipi->ih_Hook.h_Entry, &ipi->ih_Hook));
232 CALLHOOKPKT(&ipi->ih_Hook, NULL, 0);
234 else
236 D(bug("[Kernel:IPI.CPU.%03u] %s: Calling HOOK Entry %p with Data %p\n", cpunum, __func__,
237 ipi->ih_Hook.h_Entry, ipi->ih_Hook.h_Data));
239 CALLHOOKPKT(&ipi->ih_Hook, NULL, 0);
242 /* This part operates on locked IPIHook */
243 KrnSpinLock(&ipi->ih_Lock, NULL, SPINLOCK_MODE_WRITE);
245 /* Mark this CPU as done */
246 bit_test_and_set_long(ipi->ih_CPUDone, cpunum);
248 /* Check if all requested CPUs have handled the IPI */
249 if (!memcmp(ipi->ih_CPUDone, ipi->ih_CPURequested, sizeof(ULONG) * ((31 + apicPrivate->apic_count)/32)))
251 D(bug("[Kernel:IPI.CPU.%03u] %s: IPIHook completed. Releasing it\n", cpunum, __func__));
252 int async = ipi->ih_Async;
254 /* Yes, remove this ipi from BusyList */
255 REMOVE(ipi);
257 /* Put it on FreeList */
258 KrnSpinLock(&pdata->kb_FreeIPIHooksLock, NULL, SPINLOCK_MODE_WRITE);
259 ADDTAIL(&pdata->kb_FreeIPIHooks, ipi);
260 KrnSpinUnLock(&pdata->kb_FreeIPIHooksLock);
262 if (!async)
264 D(bug("[Kernel:IPI.CPU.%03u] %s: Releasing sync lock\n", cpunum, __func__));
265 KrnSpinUnLock(&ipi->ih_SyncLock);
268 /* End of IPIHook critical section */
269 KrnSpinUnLock(&ipi->ih_Lock);
273 KrnSpinUnLock(&pdata->kb_BusyIPIHooksLock);
276 int core_IPIHandle(struct ExceptionContext *regs, void *data1, struct KernelBase *KernelBase)
278 D(int cpunum = KrnGetCPUNumber();)
279 IPTR ipi_number = (IPTR)data1;
280 IPTR __APICBase = core_APIC_GetBase();
282 D(bug("[Kernel:IPI] CPU.%03u IPI%02d\n", cpunum, ipi_number));
284 switch (ipi_number)
286 case IPI_RESCHEDULE:
287 APIC_REG(__APICBase, APIC_EOI) = 0;
288 // If IPI was called when CPU was in user mode, and not in forbid state,
289 // perform task switch - otherwise set delayed schedule flag.
290 if (INTR_FROMUSERMODE && (TDNESTCOUNT_GET < 0))
292 if (core_Schedule())
294 cpu_Switch(regs);
295 cpu_Dispatch(regs);
298 else
300 FLAG_SCHEDSWITCH_SET;
302 break;
304 case IPI_CALL_HOOK:
305 core_IPICallHookHandle(regs, KernelBase);
306 APIC_REG(__APICBase, APIC_EOI) = 0;
307 break;
310 return TRUE;