-Move some modules from i386 to generic space.
[newos.git] / kernel / smp.c
blob0ba31ae9212989bd3a80585da8669ac10c52b139
1 /*
2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/thread.h>
7 #include <kernel/console.h>
8 #include <kernel/debug.h>
9 #include <kernel/int.h>
10 #include <kernel/smp_priv.h>
11 #include <kernel/smp.h>
12 #include <kernel/heap.h>
13 #include <newos/errors.h>
14 #include <kernel/cpu.h>
16 #include <kernel/arch/smp.h>
18 #include <string.h>
20 // without smp code, there's no point compiling this stuff
21 #if _WITH_SMP
22 #define MSG_POOL_SIZE (_MAX_CPUS * 4)
24 struct smp_msg {
25 struct smp_msg *next;
26 int message;
27 unsigned long data;
28 unsigned long data2;
29 unsigned long data3;
30 void *data_ptr;
31 int flags;
32 int ref_count;
33 volatile bool done;
34 unsigned int proc_bitmap;
35 int lock;
38 enum {
39 MAILBOX_LOCAL = 1,
40 MAILBOX_BCAST
43 static spinlock_t boot_cpu_spin[_MAX_CPUS] = { 0, };
45 static struct smp_msg *free_msgs = NULL;
46 static volatile int free_msg_count = 0;
47 static spinlock_t free_msg_spinlock = 0;
49 static struct smp_msg *smp_msgs[_MAX_CPUS] = { NULL, };
50 static spinlock_t cpu_msg_spinlock[_MAX_CPUS] = { 0, };
52 static struct smp_msg *smp_broadcast_msgs = NULL;
53 static spinlock_t broadcast_msg_spinlock = 0;
55 static bool ici_enabled = false;
57 static int smp_num_cpus = 1;
59 static int smp_process_pending_ici(int curr_cpu);
61 void acquire_spinlock(spinlock_t *lock)
63 if(smp_num_cpus > 1) {
64 int curr_cpu = smp_get_current_cpu();
65 if(int_are_interrupts_enabled())
66 panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock);
67 while(1) {
68 while(*lock != 0)
69 smp_process_pending_ici(curr_cpu);
70 if(atomic_set(lock, 1) == 0)
71 break;
76 static void acquire_spinlock_nocheck(spinlock_t *lock)
78 if(smp_num_cpus > 1) {
79 while(1) {
80 while(*lock != 0)
82 if(atomic_set(lock, 1) == 0)
83 break;
88 void release_spinlock(spinlock_t *lock)
90 *lock = 0;
93 // finds a free message and gets it
94 // NOTE: has side effect of disabling interrupts
95 static void find_free_message(struct smp_msg **msg)
98 // dprintf("find_free_message: entry\n");
100 retry:
101 while(free_msg_count <= 0)
103 int_disable_interrupts();
104 acquire_spinlock(&free_msg_spinlock);
106 if(free_msg_count <= 0) {
107 // someone grabbed one while we were getting the lock,
108 // go back to waiting for it
109 release_spinlock(&free_msg_spinlock);
110 int_restore_interrupts();
111 goto retry;
114 *msg = free_msgs;
115 free_msgs = (*msg)->next;
116 free_msg_count--;
118 release_spinlock(&free_msg_spinlock);
120 // dprintf("find_free_message: returning msg 0x%x\n", *msg);
123 static void return_free_message(struct smp_msg *msg)
125 // dprintf("return_free_message: returning msg 0x%x\n", msg);
126 acquire_spinlock_nocheck(&free_msg_spinlock);
127 msg->next = free_msgs;
128 free_msgs = msg;
129 free_msg_count++;
130 release_spinlock(&free_msg_spinlock);
133 static struct smp_msg *smp_check_for_message(int curr_cpu, int *source_mailbox)
135 struct smp_msg *msg;
137 acquire_spinlock_nocheck(&cpu_msg_spinlock[curr_cpu]);
138 msg = smp_msgs[curr_cpu];
139 if(msg != NULL) {
140 smp_msgs[curr_cpu] = msg->next;
141 release_spinlock(&cpu_msg_spinlock[curr_cpu]);
142 // dprintf(" found msg 0x%x in cpu mailbox\n", msg);
143 *source_mailbox = MAILBOX_LOCAL;
144 } else {
145 // try getting one from the broadcast mailbox
147 release_spinlock(&cpu_msg_spinlock[curr_cpu]);
148 acquire_spinlock_nocheck(&broadcast_msg_spinlock);
150 msg = smp_broadcast_msgs;
151 while(msg != NULL) {
152 if(CHECK_BIT(msg->proc_bitmap, curr_cpu) != 0) {
153 // we have handled this one already
154 msg = msg->next;
155 continue;
158 // mark it so we wont try to process this one again
159 msg->proc_bitmap = SET_BIT(msg->proc_bitmap, curr_cpu);
160 *source_mailbox = MAILBOX_BCAST;
161 break;
163 release_spinlock(&broadcast_msg_spinlock);
164 // dprintf(" found msg 0x%x in broadcast mailbox\n", msg);
166 return msg;
169 static void smp_finish_message_processing(int curr_cpu, struct smp_msg *msg, int source_mailbox)
171 int old_refcount;
173 old_refcount = atomic_add(&msg->ref_count, -1);
174 if(old_refcount == 1) {
175 // we were the last one to decrement the ref_count
176 // it's our job to remove it from the list & possibly clean it up
177 struct smp_msg **mbox = NULL;
178 spinlock_t *spinlock = NULL;
180 // clean up the message from one of the mailboxes
181 switch(source_mailbox) {
182 case MAILBOX_BCAST:
183 mbox = &smp_broadcast_msgs;
184 spinlock = &broadcast_msg_spinlock;
185 break;
186 case MAILBOX_LOCAL:
187 mbox = &smp_msgs[curr_cpu];
188 spinlock = &cpu_msg_spinlock[curr_cpu];
189 break;
192 acquire_spinlock_nocheck(spinlock);
194 // dprintf("cleaning up message 0x%x\n", msg);
196 if(msg == *mbox) {
197 (*mbox) = msg->next;
198 } else {
199 // we need to walk to find the message in the list.
200 // we can't use any data found when previously walking through
201 // the list, since the list may have changed. But, we are guaranteed
202 // to at least have msg in it.
203 struct smp_msg *last = NULL;
204 struct smp_msg *msg1;
206 msg1 = *mbox;
207 while(msg1 != NULL && msg1 != msg) {
208 last = msg1;
209 msg1 = msg1->next;
212 // by definition, last must be something
213 if(msg1 == msg && last != NULL) {
214 last->next = msg->next;
215 } else {
216 dprintf("last == NULL or msg != msg1!!!\n");
220 release_spinlock(spinlock);
222 if(msg->data_ptr != NULL)
223 kfree(msg->data_ptr);
225 if(msg->flags == SMP_MSG_FLAG_SYNC) {
226 msg->done = true;
227 // the caller cpu should now free the message
228 } else {
229 // in the !SYNC case, we get to free the message
230 return_free_message(msg);
235 static int smp_process_pending_ici(int curr_cpu)
237 struct smp_msg *msg;
238 bool halt = false;
239 int source_mailbox = 0;
240 int retval = INT_NO_RESCHEDULE;
242 msg = smp_check_for_message(curr_cpu, &source_mailbox);
243 if(msg == NULL)
244 return retval;
246 // dprintf(" message = %d\n", msg->message);
247 switch(msg->message) {
248 case SMP_MSG_INVL_PAGE_RANGE:
249 arch_cpu_invalidate_TLB_range((addr_t)msg->data, (addr_t)msg->data2);
250 break;
251 case SMP_MSG_INVL_PAGE_LIST:
252 arch_cpu_invalidate_TLB_list((addr_t *)msg->data, (int)msg->data2);
253 break;
254 case SMP_MSG_GLOBAL_INVL_PAGE:
255 arch_cpu_global_TLB_invalidate();
256 break;
257 case SMP_MSG_RESCHEDULE:
258 retval = INT_RESCHEDULE;
259 break;
260 case SMP_MSG_CPU_HALT:
261 halt = true;
262 dprintf("cpu %d halted!\n", curr_cpu);
263 break;
264 case SMP_MSG_1:
265 default:
266 dprintf("smp_intercpu_int_handler: got unknown message %d\n", msg->message);
269 // finish dealing with this message, possibly removing it from the list
270 smp_finish_message_processing(curr_cpu, msg, source_mailbox);
272 // special case for the halt message
273 // we otherwise wouldn't have gotten the opportunity to clean up
274 if(halt) {
275 int_disable_interrupts();
276 for(;;);
279 return retval;
282 void smp_send_ici(int target_cpu, int message, unsigned long data, unsigned long data2, unsigned long data3, void *data_ptr, int flags)
284 struct smp_msg *msg;
286 // dprintf("smp_send_ici: target 0x%x, mess 0x%x, data 0x%x, data2 0x%x, data3 0x%x, ptr 0x%x, flags 0x%x\n",
287 // target_cpu, message, data, data2, data3, data_ptr, flags);
289 if(ici_enabled) {
290 int curr_cpu;
292 // find_free_message leaves interrupts disabled
293 find_free_message(&msg);
295 curr_cpu = smp_get_current_cpu();
296 if(target_cpu == curr_cpu) {
297 return_free_message(msg);
298 int_restore_interrupts();
299 return; // nope, cant do that
302 // set up the message
303 msg->message = message;
304 msg->data = data;
305 msg->data = data2;
306 msg->data = data3;
307 msg->data_ptr = data_ptr;
308 msg->ref_count = 1;
309 msg->flags = flags;
310 msg->done = false;
312 // stick it in the appropriate cpu's mailbox
313 acquire_spinlock_nocheck(&cpu_msg_spinlock[target_cpu]);
314 msg->next = smp_msgs[target_cpu];
315 smp_msgs[target_cpu] = msg;
316 release_spinlock(&cpu_msg_spinlock[target_cpu]);
318 arch_smp_send_ici(target_cpu);
320 if(flags == SMP_MSG_FLAG_SYNC) {
321 // wait for the other cpu to finish processing it
322 // the interrupt handler will ref count it to <0
323 // if the message is sync after it has removed it from the mailbox
324 while(msg->done == false)
325 smp_process_pending_ici(curr_cpu);
326 // for SYNC messages, it's our responsibility to put it
327 // back into the free list
328 return_free_message(msg);
331 int_restore_interrupts();
335 void smp_send_broadcast_ici(int message, unsigned long data, unsigned long data2, unsigned long data3, void *data_ptr, int flags)
337 struct smp_msg *msg;
339 // dprintf("smp_send_broadcast_ici: cpu %d mess 0x%x, data 0x%x, data2 0x%x, data3 0x%x, ptr 0x%x, flags 0x%x\n",
340 // smp_get_current_cpu(), message, data, data2, data3, data_ptr, flags);
342 if(ici_enabled) {
343 int curr_cpu;
345 // find_free_message leaves interrupts disabled
346 find_free_message(&msg);
348 curr_cpu = smp_get_current_cpu();
350 msg->message = message;
351 msg->data = data;
352 msg->data2 = data2;
353 msg->data3 = data3;
354 msg->data_ptr = data_ptr;
355 msg->ref_count = smp_num_cpus - 1;
356 msg->flags = flags;
357 msg->proc_bitmap = SET_BIT(0, curr_cpu);
358 msg->done = false;
360 // dprintf("smp_send_broadcast_ici%d: inserting msg 0x%x into broadcast mbox\n", smp_get_current_cpu(), msg);
362 // stick it in the appropriate cpu's mailbox
363 acquire_spinlock_nocheck(&broadcast_msg_spinlock);
364 msg->next = smp_broadcast_msgs;
365 smp_broadcast_msgs = msg;
366 release_spinlock(&broadcast_msg_spinlock);
368 arch_smp_send_broadcast_ici();
370 // dprintf("smp_send_broadcast_ici: sent interrupt\n");
372 if(flags == SMP_MSG_FLAG_SYNC) {
373 // wait for the other cpus to finish processing it
374 // the interrupt handler will ref count it to <0
375 // if the message is sync after it has removed it from the mailbox
376 // dprintf("smp_send_broadcast_ici: waiting for ack\n");
377 while(msg->done == false)
378 smp_process_pending_ici(curr_cpu);
379 // dprintf("smp_send_broadcast_ici: returning message to free list\n");
380 // for SYNC messages, it's our responsibility to put it
381 // back into the free list
382 return_free_message(msg);
385 int_restore_interrupts();
387 // dprintf("smp_send_broadcast_ici: done\n");
390 int smp_trap_non_boot_cpus(kernel_args *ka, int cpu)
392 if(cpu > 0) {
393 boot_cpu_spin[cpu] = 1;
394 acquire_spinlock(&boot_cpu_spin[cpu]);
395 return 1;
396 } else {
397 return 0;
401 void smp_wake_up_all_non_boot_cpus()
403 int i;
404 for(i=1; i < smp_num_cpus; i++) {
405 release_spinlock(&boot_cpu_spin[i]);
409 void smp_wait_for_ap_cpus(kernel_args *ka)
411 unsigned int i;
412 int retry;
413 do {
414 retry = 0;
415 for(i=1; i < ka->num_cpus; i++) {
416 if(boot_cpu_spin[i] != 1)
417 retry = 1;
419 } while(retry == 1);
422 void smp_set_num_cpus(int num_cpus)
424 smp_num_cpus = num_cpus;
427 int smp_get_num_cpus()
429 return smp_num_cpus;
432 int smp_get_current_cpu(void)
434 struct thread *t = thread_get_current_thread();
435 if(t)
436 return t->cpu->cpu_num;
437 else
438 return 0;
441 int smp_enable_ici()
443 if(smp_num_cpus > 1) // dont actually do it if we only have one cpu
444 ici_enabled = true;
445 return NO_ERROR;
448 int smp_disable_ici()
450 ici_enabled = false;
451 return NO_ERROR;
453 #endif
455 int smp_intercpu_int_handler(void)
457 #if _WITH_SMP
458 int retval;
459 int curr_cpu = smp_get_current_cpu();
461 // dprintf("smp_intercpu_int_handler: entry on cpu %d\n", curr_cpu);
463 retval = smp_process_pending_ici(curr_cpu);
465 // dprintf("smp_intercpu_int_handler: done\n");
467 return retval;
468 #else
469 return INT_NO_RESCHEDULE;
470 #endif
473 int smp_init(kernel_args *ka)
475 #if _WITH_SMP
476 struct smp_msg *msg;
477 int i;
479 dprintf("smp_init: entry\n");
480 kprintf("initializing smp mailboxes...\n");
482 if(ka->num_cpus > 1) {
483 free_msgs = NULL;
484 free_msg_count = 0;
485 for(i=0; i<MSG_POOL_SIZE; i++) {
486 msg = (struct smp_msg *)kmalloc(sizeof(struct smp_msg));
487 if(msg == NULL) {
488 panic("error creating smp mailboxes\n");
489 return ERR_GENERAL;
491 memset(msg, 0, sizeof(struct smp_msg));
492 msg->next = free_msgs;
493 free_msgs = msg;
494 free_msg_count++;
496 smp_num_cpus = ka->num_cpus;
498 #endif
499 dprintf("smp_init: calling arch_smp_init\n");
500 return arch_smp_init(ka);
503 int smp_init_percpu(kernel_args *ka, int cpu_num)
505 return arch_smp_init_percpu(ka, cpu_num);