2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
6 #include <kernel/thread.h>
7 #include <kernel/console.h>
8 #include <kernel/debug.h>
9 #include <kernel/int.h>
10 #include <kernel/smp_priv.h>
11 #include <kernel/smp.h>
12 #include <kernel/heap.h>
13 #include <newos/errors.h>
14 #include <kernel/cpu.h>
16 #include <kernel/arch/smp.h>
20 // without smp code, there's no point compiling this stuff
22 #define MSG_POOL_SIZE (_MAX_CPUS * 4)
34 unsigned int proc_bitmap
;
43 static spinlock_t boot_cpu_spin
[_MAX_CPUS
] = { 0, };
45 static struct smp_msg
*free_msgs
= NULL
;
46 static volatile int free_msg_count
= 0;
47 static spinlock_t free_msg_spinlock
= 0;
49 static struct smp_msg
*smp_msgs
[_MAX_CPUS
] = { NULL
, };
50 static spinlock_t cpu_msg_spinlock
[_MAX_CPUS
] = { 0, };
52 static struct smp_msg
*smp_broadcast_msgs
= NULL
;
53 static spinlock_t broadcast_msg_spinlock
= 0;
55 static bool ici_enabled
= false;
57 static int smp_num_cpus
= 1;
59 static int smp_process_pending_ici(int curr_cpu
);
61 void acquire_spinlock(spinlock_t
*lock
)
63 if(smp_num_cpus
> 1) {
64 int curr_cpu
= smp_get_current_cpu();
65 if(int_are_interrupts_enabled())
66 panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock
);
69 smp_process_pending_ici(curr_cpu
);
70 if(atomic_set(lock
, 1) == 0)
76 static void acquire_spinlock_nocheck(spinlock_t
*lock
)
78 if(smp_num_cpus
> 1) {
82 if(atomic_set(lock
, 1) == 0)
88 void release_spinlock(spinlock_t
*lock
)
93 // finds a free message and gets it
94 // NOTE: has side effect of disabling interrupts
95 static void find_free_message(struct smp_msg
**msg
)
98 // dprintf("find_free_message: entry\n");
101 while(free_msg_count
<= 0)
103 int_disable_interrupts();
104 acquire_spinlock(&free_msg_spinlock
);
106 if(free_msg_count
<= 0) {
107 // someone grabbed one while we were getting the lock,
108 // go back to waiting for it
109 release_spinlock(&free_msg_spinlock
);
110 int_restore_interrupts();
115 free_msgs
= (*msg
)->next
;
118 release_spinlock(&free_msg_spinlock
);
120 // dprintf("find_free_message: returning msg 0x%x\n", *msg);
123 static void return_free_message(struct smp_msg
*msg
)
125 // dprintf("return_free_message: returning msg 0x%x\n", msg);
126 acquire_spinlock_nocheck(&free_msg_spinlock
);
127 msg
->next
= free_msgs
;
130 release_spinlock(&free_msg_spinlock
);
133 static struct smp_msg
*smp_check_for_message(int curr_cpu
, int *source_mailbox
)
137 acquire_spinlock_nocheck(&cpu_msg_spinlock
[curr_cpu
]);
138 msg
= smp_msgs
[curr_cpu
];
140 smp_msgs
[curr_cpu
] = msg
->next
;
141 release_spinlock(&cpu_msg_spinlock
[curr_cpu
]);
142 // dprintf(" found msg 0x%x in cpu mailbox\n", msg);
143 *source_mailbox
= MAILBOX_LOCAL
;
145 // try getting one from the broadcast mailbox
147 release_spinlock(&cpu_msg_spinlock
[curr_cpu
]);
148 acquire_spinlock_nocheck(&broadcast_msg_spinlock
);
150 msg
= smp_broadcast_msgs
;
152 if(CHECK_BIT(msg
->proc_bitmap
, curr_cpu
) != 0) {
153 // we have handled this one already
158 // mark it so we wont try to process this one again
159 msg
->proc_bitmap
= SET_BIT(msg
->proc_bitmap
, curr_cpu
);
160 *source_mailbox
= MAILBOX_BCAST
;
163 release_spinlock(&broadcast_msg_spinlock
);
164 // dprintf(" found msg 0x%x in broadcast mailbox\n", msg);
169 static void smp_finish_message_processing(int curr_cpu
, struct smp_msg
*msg
, int source_mailbox
)
173 old_refcount
= atomic_add(&msg
->ref_count
, -1);
174 if(old_refcount
== 1) {
175 // we were the last one to decrement the ref_count
176 // it's our job to remove it from the list & possibly clean it up
177 struct smp_msg
**mbox
= NULL
;
178 spinlock_t
*spinlock
= NULL
;
180 // clean up the message from one of the mailboxes
181 switch(source_mailbox
) {
183 mbox
= &smp_broadcast_msgs
;
184 spinlock
= &broadcast_msg_spinlock
;
187 mbox
= &smp_msgs
[curr_cpu
];
188 spinlock
= &cpu_msg_spinlock
[curr_cpu
];
192 acquire_spinlock_nocheck(spinlock
);
194 // dprintf("cleaning up message 0x%x\n", msg);
199 // we need to walk to find the message in the list.
200 // we can't use any data found when previously walking through
201 // the list, since the list may have changed. But, we are guaranteed
202 // to at least have msg in it.
203 struct smp_msg
*last
= NULL
;
204 struct smp_msg
*msg1
;
207 while(msg1
!= NULL
&& msg1
!= msg
) {
212 // by definition, last must be something
213 if(msg1
== msg
&& last
!= NULL
) {
214 last
->next
= msg
->next
;
216 dprintf("last == NULL or msg != msg1!!!\n");
220 release_spinlock(spinlock
);
222 if(msg
->data_ptr
!= NULL
)
223 kfree(msg
->data_ptr
);
225 if(msg
->flags
== SMP_MSG_FLAG_SYNC
) {
227 // the caller cpu should now free the message
229 // in the !SYNC case, we get to free the message
230 return_free_message(msg
);
235 static int smp_process_pending_ici(int curr_cpu
)
239 int source_mailbox
= 0;
240 int retval
= INT_NO_RESCHEDULE
;
242 msg
= smp_check_for_message(curr_cpu
, &source_mailbox
);
246 // dprintf(" message = %d\n", msg->message);
247 switch(msg
->message
) {
248 case SMP_MSG_INVL_PAGE_RANGE
:
249 arch_cpu_invalidate_TLB_range((addr_t
)msg
->data
, (addr_t
)msg
->data2
);
251 case SMP_MSG_INVL_PAGE_LIST
:
252 arch_cpu_invalidate_TLB_list((addr_t
*)msg
->data
, (int)msg
->data2
);
254 case SMP_MSG_GLOBAL_INVL_PAGE
:
255 arch_cpu_global_TLB_invalidate();
257 case SMP_MSG_RESCHEDULE
:
258 retval
= INT_RESCHEDULE
;
260 case SMP_MSG_CPU_HALT
:
262 dprintf("cpu %d halted!\n", curr_cpu
);
266 dprintf("smp_intercpu_int_handler: got unknown message %d\n", msg
->message
);
269 // finish dealing with this message, possibly removing it from the list
270 smp_finish_message_processing(curr_cpu
, msg
, source_mailbox
);
272 // special case for the halt message
273 // we otherwise wouldn't have gotten the opportunity to clean up
275 int_disable_interrupts();
282 void smp_send_ici(int target_cpu
, int message
, unsigned long data
, unsigned long data2
, unsigned long data3
, void *data_ptr
, int flags
)
286 // dprintf("smp_send_ici: target 0x%x, mess 0x%x, data 0x%x, data2 0x%x, data3 0x%x, ptr 0x%x, flags 0x%x\n",
287 // target_cpu, message, data, data2, data3, data_ptr, flags);
292 // find_free_message leaves interrupts disabled
293 find_free_message(&msg
);
295 curr_cpu
= smp_get_current_cpu();
296 if(target_cpu
== curr_cpu
) {
297 return_free_message(msg
);
298 int_restore_interrupts();
299 return; // nope, cant do that
302 // set up the message
303 msg
->message
= message
;
307 msg
->data_ptr
= data_ptr
;
312 // stick it in the appropriate cpu's mailbox
313 acquire_spinlock_nocheck(&cpu_msg_spinlock
[target_cpu
]);
314 msg
->next
= smp_msgs
[target_cpu
];
315 smp_msgs
[target_cpu
] = msg
;
316 release_spinlock(&cpu_msg_spinlock
[target_cpu
]);
318 arch_smp_send_ici(target_cpu
);
320 if(flags
== SMP_MSG_FLAG_SYNC
) {
321 // wait for the other cpu to finish processing it
322 // the interrupt handler will ref count it to <0
323 // if the message is sync after it has removed it from the mailbox
324 while(msg
->done
== false)
325 smp_process_pending_ici(curr_cpu
);
326 // for SYNC messages, it's our responsibility to put it
327 // back into the free list
328 return_free_message(msg
);
331 int_restore_interrupts();
335 void smp_send_broadcast_ici(int message
, unsigned long data
, unsigned long data2
, unsigned long data3
, void *data_ptr
, int flags
)
339 // dprintf("smp_send_broadcast_ici: cpu %d mess 0x%x, data 0x%x, data2 0x%x, data3 0x%x, ptr 0x%x, flags 0x%x\n",
340 // smp_get_current_cpu(), message, data, data2, data3, data_ptr, flags);
345 // find_free_message leaves interrupts disabled
346 find_free_message(&msg
);
348 curr_cpu
= smp_get_current_cpu();
350 msg
->message
= message
;
354 msg
->data_ptr
= data_ptr
;
355 msg
->ref_count
= smp_num_cpus
- 1;
357 msg
->proc_bitmap
= SET_BIT(0, curr_cpu
);
360 // dprintf("smp_send_broadcast_ici%d: inserting msg 0x%x into broadcast mbox\n", smp_get_current_cpu(), msg);
362 // stick it in the appropriate cpu's mailbox
363 acquire_spinlock_nocheck(&broadcast_msg_spinlock
);
364 msg
->next
= smp_broadcast_msgs
;
365 smp_broadcast_msgs
= msg
;
366 release_spinlock(&broadcast_msg_spinlock
);
368 arch_smp_send_broadcast_ici();
370 // dprintf("smp_send_broadcast_ici: sent interrupt\n");
372 if(flags
== SMP_MSG_FLAG_SYNC
) {
373 // wait for the other cpus to finish processing it
374 // the interrupt handler will ref count it to <0
375 // if the message is sync after it has removed it from the mailbox
376 // dprintf("smp_send_broadcast_ici: waiting for ack\n");
377 while(msg
->done
== false)
378 smp_process_pending_ici(curr_cpu
);
379 // dprintf("smp_send_broadcast_ici: returning message to free list\n");
380 // for SYNC messages, it's our responsibility to put it
381 // back into the free list
382 return_free_message(msg
);
385 int_restore_interrupts();
387 // dprintf("smp_send_broadcast_ici: done\n");
390 int smp_trap_non_boot_cpus(kernel_args
*ka
, int cpu
)
393 boot_cpu_spin
[cpu
] = 1;
394 acquire_spinlock(&boot_cpu_spin
[cpu
]);
401 void smp_wake_up_all_non_boot_cpus()
404 for(i
=1; i
< smp_num_cpus
; i
++) {
405 release_spinlock(&boot_cpu_spin
[i
]);
409 void smp_wait_for_ap_cpus(kernel_args
*ka
)
415 for(i
=1; i
< ka
->num_cpus
; i
++) {
416 if(boot_cpu_spin
[i
] != 1)
422 void smp_set_num_cpus(int num_cpus
)
424 smp_num_cpus
= num_cpus
;
427 int smp_get_num_cpus()
432 int smp_get_current_cpu(void)
434 struct thread
*t
= thread_get_current_thread();
436 return t
->cpu
->cpu_num
;
443 if(smp_num_cpus
> 1) // dont actually do it if we only have one cpu
448 int smp_disable_ici()
455 int smp_intercpu_int_handler(void)
459 int curr_cpu
= smp_get_current_cpu();
461 // dprintf("smp_intercpu_int_handler: entry on cpu %d\n", curr_cpu);
463 retval
= smp_process_pending_ici(curr_cpu
);
465 // dprintf("smp_intercpu_int_handler: done\n");
469 return INT_NO_RESCHEDULE
;
473 int smp_init(kernel_args
*ka
)
479 dprintf("smp_init: entry\n");
480 kprintf("initializing smp mailboxes...\n");
482 if(ka
->num_cpus
> 1) {
485 for(i
=0; i
<MSG_POOL_SIZE
; i
++) {
486 msg
= (struct smp_msg
*)kmalloc(sizeof(struct smp_msg
));
488 panic("error creating smp mailboxes\n");
491 memset(msg
, 0, sizeof(struct smp_msg
));
492 msg
->next
= free_msgs
;
496 smp_num_cpus
= ka
->num_cpus
;
499 dprintf("smp_init: calling arch_smp_init\n");
500 return arch_smp_init(ka
);
503 int smp_init_percpu(kernel_args
*ka
, int cpu_num
)
505 return arch_smp_init_percpu(ka
, cpu_num
);