Cleaner solution to plugin-included core files.
[kugel-rb.git] / firmware / kernel.c
blobb5a4d9e76db24837bb260bd899be21adbc28168f
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #ifdef SIMULATOR
26 #include "system-sdl.h"
27 #include "debug.h"
28 #endif
29 #include "thread.h"
30 #include "cpu.h"
31 #include "system.h"
32 #include "panic.h"
34 /* Make this nonzero to enable more elaborate checks on objects */
35 #if defined(DEBUG) || defined(SIMULATOR)
36 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
37 #else
38 #define KERNEL_OBJECT_CHECKS 0
39 #endif
41 #if KERNEL_OBJECT_CHECKS
42 #ifdef SIMULATOR
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
45 #else
46 #define KERNEL_ASSERT(exp, msg...) \
47 ({ if (!({ exp; })) panicf(msg); })
48 #endif
49 #else
50 #define KERNEL_ASSERT(exp, msg...) ({})
51 #endif
53 #if !defined(CPU_PP) || !defined(BOOTLOADER)
54 volatile long current_tick SHAREDDATA_ATTR = 0;
55 #endif
57 /* List of tick tasks - final element always NULL for termination */
58 void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
59 static int num_tick_funcs = 0;
61 extern struct core_entry cores[NUM_CORES];
63 /* This array holds all queues that are initiated. It is used for broadcast. */
64 static struct
66 int count;
67 struct event_queue *queues[MAX_NUM_QUEUES];
68 IF_COP( struct corelock cl; )
69 } all_queues SHAREDBSS_ATTR;
71 /****************************************************************************
72 * Standard kernel stuff
73 ****************************************************************************/
74 void kernel_init(void)
76 /* Init the threading API */
77 init_threads();
79 /* Other processors will not reach this point in a multicore build.
80 * In a single-core build with multiple cores they fall-through and
81 * sleep in cop_main without returning. */
82 if (CURRENT_CORE == CPU)
84 memset(tick_funcs, 0, sizeof(tick_funcs));
85 memset(&all_queues, 0, sizeof(all_queues));
86 corelock_init(&all_queues.cl);
87 tick_start(1000/HZ);
88 #ifdef KDEV_INIT
89 kernel_device_init();
90 #endif
94 /****************************************************************************
95 * Timer tick - Timer initialization and interrupt handler is defined at
96 * the target level.
97 ****************************************************************************/
98 int tick_add_task(void (*f)(void))
100 int oldlevel = disable_irq_save();
102 /* Add a task if there is room */
103 if(num_tick_funcs < MAX_NUM_TICK_TASKS)
105 tick_funcs[num_tick_funcs++] = f;
106 restore_irq(oldlevel);
107 return 0;
110 restore_irq(oldlevel);
111 panicf("Error! tick_add_task(): out of tasks");
112 return -1;
115 int tick_remove_task(void (*f)(void))
117 int i;
118 int oldlevel = disable_irq_save();
120 /* Remove a task if it is there */
121 for(i = 0;i < num_tick_funcs;i++)
123 if(tick_funcs[i] == f)
125 /* Compact function list - propagates NULL-terminator as well */
126 for(; i < num_tick_funcs; i++)
127 tick_funcs[i] = tick_funcs[i+1];
129 num_tick_funcs--;
131 restore_irq(oldlevel);
132 return 0;
136 restore_irq(oldlevel);
137 return -1;
140 /****************************************************************************
141 * Tick-based interval timers/one-shots - be mindful this is not really
142 * intended for continuous timers but for events that need to run for a short
143 * time and be cancelled without further software intervention.
144 ****************************************************************************/
145 #ifdef INCLUDE_TIMEOUT_API
146 static struct timeout *tmo_list = NULL; /* list of active timeout events */
148 /* timeout tick task - calls event handlers when they expire
149 * Event handlers may alter ticks, callback and data during operation.
151 static void timeout_tick(void)
153 unsigned long tick = current_tick;
154 struct timeout *curr, *next;
156 for (curr = tmo_list; curr != NULL; curr = next)
158 next = (struct timeout *)curr->next;
160 if (TIME_BEFORE(tick, curr->expires))
161 continue;
163 /* this event has expired - call callback */
164 if (curr->callback(curr))
165 *(long *)&curr->expires = tick + curr->ticks; /* reload */
166 else
167 timeout_cancel(curr); /* cancel */
171 /* Cancels a timeout callback - can be called from the ISR */
172 void timeout_cancel(struct timeout *tmo)
174 int oldlevel = disable_irq_save();
176 if (tmo_list != NULL)
178 struct timeout *curr = tmo_list;
179 struct timeout *prev = NULL;
181 while (curr != tmo && curr != NULL)
183 prev = curr;
184 curr = (struct timeout *)curr->next;
187 if (curr != NULL)
189 /* in list */
190 if (prev == NULL)
191 tmo_list = (struct timeout *)curr->next;
192 else
193 *(const struct timeout **)&prev->next = curr->next;
195 if (tmo_list == NULL)
196 tick_remove_task(timeout_tick); /* last one - remove task */
198 /* not in list or tmo == NULL */
201 restore_irq(oldlevel);
204 /* Adds a timeout callback - calling with an active timeout resets the
205 interval - can be called from the ISR */
206 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
207 int ticks, intptr_t data)
209 int oldlevel;
210 struct timeout *curr;
212 if (tmo == NULL)
213 return;
215 oldlevel = disable_irq_save();
217 /* see if this one is already registered */
218 curr = tmo_list;
219 while (curr != tmo && curr != NULL)
220 curr = (struct timeout *)curr->next;
222 if (curr == NULL)
224 /* not found - add it */
225 if (tmo_list == NULL)
226 tick_add_task(timeout_tick); /* first one - add task */
228 *(struct timeout **)&tmo->next = tmo_list;
229 tmo_list = tmo;
232 tmo->callback = callback;
233 tmo->ticks = ticks;
234 tmo->data = data;
235 *(long *)&tmo->expires = current_tick + ticks;
237 restore_irq(oldlevel);
240 #endif /* INCLUDE_TIMEOUT_API */
242 /****************************************************************************
243 * Thread stuff
244 ****************************************************************************/
245 void sleep(int ticks)
247 #if defined(CPU_PP) && defined(BOOTLOADER)
248 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
249 while (TIME_BEFORE(USEC_TIMER, stop))
250 switch_thread();
251 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
252 /* hacky.. */
253 long sleep_ticks = current_tick + ticks + 1;
254 while (sleep_ticks > current_tick)
255 switch_thread();
256 #else
257 disable_irq();
258 sleep_thread(ticks);
259 switch_thread();
260 #endif
263 void yield(void)
265 #if ((defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
266 /* Some targets don't like yielding in the bootloader */
267 #else
268 switch_thread();
269 #endif
272 /****************************************************************************
273 * Queue handling stuff
274 ****************************************************************************/
276 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
277 /****************************************************************************
278 * Sender thread queue structure that aids implementation of priority
279 * inheritance on queues because the send list structure is the same as
280 * for all other kernel objects:
282 * Example state:
283 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
284 * E3 was posted with queue_post
285 * 4 events remain enqueued (E1-E4)
287 * rd wr
288 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
289 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
290 * \/ \/ \/
291 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
292 * q->send->curr_sender: /\
294 * Thread has E0 in its own struct queue_event.
296 ****************************************************************************/
298 /* Puts the specified return value in the waiting thread's return value
299 * and wakes the thread.
301 * A sender should be confirmed to exist before calling which makes it
302 * more efficent to reject the majority of cases that don't need this
303 * called.
305 static void queue_release_sender(struct thread_entry **sender,
306 intptr_t retval)
308 struct thread_entry *thread = *sender;
310 *sender = NULL; /* Clear slot. */
311 thread->wakeup_ext_cb = NULL; /* Clear callback. */
312 thread->retval = retval; /* Assign thread-local return value. */
313 *thread->bqp = thread; /* Move blocking queue head to thread since
314 wakeup_thread wakes the first thread in
315 the list. */
316 wakeup_thread(thread->bqp);
319 /* Releases any waiting threads that are queued with queue_send -
320 * reply with 0.
322 static void queue_release_all_senders(struct event_queue *q)
324 if(q->send)
326 unsigned int i;
327 for(i = q->read; i != q->write; i++)
329 struct thread_entry **spp =
330 &q->send->senders[i & QUEUE_LENGTH_MASK];
332 if(*spp)
334 queue_release_sender(spp, 0);
340 /* Callback to do extra forced removal steps from sender list in addition
341 * to the normal blocking queue removal and priority dis-inherit */
342 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
344 *((struct thread_entry **)thread->retval) = NULL;
345 thread->wakeup_ext_cb = NULL;
346 thread->retval = 0;
349 /* Enables queue_send on the specified queue - caller allocates the extra
350 * data structure. Only queues which are taken to be owned by a thread should
351 * enable this however an official owner is not compulsory but must be
352 * specified for priority inheritance to operate.
354 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
355 * messages results in an undefined order of message replies or possible default
356 * replies if two or more waits happen before a reply is done.
358 void queue_enable_queue_send(struct event_queue *q,
359 struct queue_sender_list *send,
360 unsigned int owner_id)
362 int oldlevel = disable_irq_save();
363 corelock_lock(&q->cl);
365 if(send != NULL && q->send == NULL)
367 memset(send, 0, sizeof(*send));
368 #ifdef HAVE_PRIORITY_SCHEDULING
369 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
370 send->blocker.priority = PRIORITY_IDLE;
371 if(owner_id != 0)
373 send->blocker.thread = thread_id_entry(owner_id);
374 q->blocker_p = &send->blocker;
376 #endif
377 q->send = send;
380 corelock_unlock(&q->cl);
381 restore_irq(oldlevel);
383 (void)owner_id;
386 /* Unblock a blocked thread at a given event index */
387 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
388 unsigned int i)
390 if(send)
392 struct thread_entry **spp = &send->senders[i];
394 if(UNLIKELY(*spp))
396 queue_release_sender(spp, 0);
401 /* Perform the auto-reply sequence */
402 static inline void queue_do_auto_reply(struct queue_sender_list *send)
404 if(send && send->curr_sender)
406 /* auto-reply */
407 queue_release_sender(&send->curr_sender, 0);
411 /* Moves waiting thread's refrence from the senders array to the
412 * current_sender which represents the thread waiting for a reponse to the
413 * last message removed from the queue. This also protects the thread from
414 * being bumped due to overflow which would not be a valid action since its
415 * message _is_ being processed at this point. */
416 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
417 unsigned int rd)
419 if(send)
421 struct thread_entry **spp = &send->senders[rd];
423 if(*spp)
425 /* Move thread reference from array to the next thread
426 that queue_reply will release */
427 send->curr_sender = *spp;
428 (*spp)->retval = (intptr_t)spp;
429 *spp = NULL;
431 /* else message was posted asynchronously with queue_post */
434 #else
435 /* Empty macros for when synchoronous sending is not made */
436 #define queue_release_all_senders(q)
437 #define queue_do_unblock_sender(send, i)
438 #define queue_do_auto_reply(send)
439 #define queue_do_fetch_sender(send, rd)
440 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
442 /* Queue must not be available for use during this call */
443 void queue_init(struct event_queue *q, bool register_queue)
445 int oldlevel = disable_irq_save();
447 if(register_queue)
449 corelock_lock(&all_queues.cl);
452 corelock_init(&q->cl);
453 q->queue = NULL;
454 q->read = 0;
455 q->write = 0;
456 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
457 q->send = NULL; /* No message sending by default */
458 IF_PRIO( q->blocker_p = NULL; )
459 #endif
461 if(register_queue)
463 if(all_queues.count >= MAX_NUM_QUEUES)
465 panicf("queue_init->out of queues");
467 /* Add it to the all_queues array */
468 all_queues.queues[all_queues.count++] = q;
469 corelock_unlock(&all_queues.cl);
472 restore_irq(oldlevel);
475 /* Queue must not be available for use during this call */
476 void queue_delete(struct event_queue *q)
478 int oldlevel;
479 int i;
481 oldlevel = disable_irq_save();
482 corelock_lock(&all_queues.cl);
483 corelock_lock(&q->cl);
485 /* Find the queue to be deleted */
486 for(i = 0;i < all_queues.count;i++)
488 if(all_queues.queues[i] == q)
490 /* Move the following queues up in the list */
491 all_queues.count--;
493 for(;i < all_queues.count;i++)
495 all_queues.queues[i] = all_queues.queues[i+1];
498 break;
502 corelock_unlock(&all_queues.cl);
504 /* Release thread(s) waiting on queue head */
505 thread_queue_wake(&q->queue);
507 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
508 if(q->send)
510 /* Release threads waiting for replies */
511 queue_release_all_senders(q);
513 /* Reply to any dequeued message waiting for one */
514 queue_do_auto_reply(q->send);
516 q->send = NULL;
517 IF_PRIO( q->blocker_p = NULL; )
519 #endif
521 q->read = 0;
522 q->write = 0;
524 corelock_unlock(&q->cl);
525 restore_irq(oldlevel);
528 /* NOTE: multiple threads waiting on a queue head cannot have a well-
529 defined release order if timeouts are used. If multiple threads must
530 access the queue head, use a dispatcher or queue_wait only. */
531 void queue_wait(struct event_queue *q, struct queue_event *ev)
533 int oldlevel;
534 unsigned int rd;
536 #ifdef HAVE_PRIORITY_SCHEDULING
537 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
538 QUEUE_GET_THREAD(q) == cores[CURRENT_CORE].running,
539 "queue_wait->wrong thread\n");
540 #endif
542 oldlevel = disable_irq_save();
543 corelock_lock(&q->cl);
545 /* auto-reply */
546 queue_do_auto_reply(q->send);
548 if (q->read == q->write)
550 struct thread_entry *current = cores[CURRENT_CORE].running;
554 IF_COP( current->obj_cl = &q->cl; )
555 current->bqp = &q->queue;
557 block_thread(current);
559 corelock_unlock(&q->cl);
560 switch_thread();
562 oldlevel = disable_irq_save();
563 corelock_lock(&q->cl);
565 /* A message that woke us could now be gone */
566 while (q->read == q->write);
569 rd = q->read++ & QUEUE_LENGTH_MASK;
570 *ev = q->events[rd];
572 /* Get data for a waiting thread if one */
573 queue_do_fetch_sender(q->send, rd);
575 corelock_unlock(&q->cl);
576 restore_irq(oldlevel);
579 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
581 int oldlevel;
583 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
584 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
585 QUEUE_GET_THREAD(q) == cores[CURRENT_CORE].running,
586 "queue_wait_w_tmo->wrong thread\n");
587 #endif
589 oldlevel = disable_irq_save();
590 corelock_lock(&q->cl);
592 /* Auto-reply */
593 queue_do_auto_reply(q->send);
595 if (q->read == q->write && ticks > 0)
597 struct thread_entry *current = cores[CURRENT_CORE].running;
599 IF_COP( current->obj_cl = &q->cl; )
600 current->bqp = &q->queue;
602 block_thread_w_tmo(current, ticks);
603 corelock_unlock(&q->cl);
605 switch_thread();
607 oldlevel = disable_irq_save();
608 corelock_lock(&q->cl);
611 /* no worry about a removed message here - status is checked inside
612 locks - perhaps verify if timeout or false alarm */
613 if (q->read != q->write)
615 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
616 *ev = q->events[rd];
617 /* Get data for a waiting thread if one */
618 queue_do_fetch_sender(q->send, rd);
620 else
622 ev->id = SYS_TIMEOUT;
625 corelock_unlock(&q->cl);
626 restore_irq(oldlevel);
629 void queue_post(struct event_queue *q, long id, intptr_t data)
631 int oldlevel;
632 unsigned int wr;
634 oldlevel = disable_irq_save();
635 corelock_lock(&q->cl);
637 wr = q->write++ & QUEUE_LENGTH_MASK;
639 q->events[wr].id = id;
640 q->events[wr].data = data;
642 /* overflow protect - unblock any thread waiting at this index */
643 queue_do_unblock_sender(q->send, wr);
645 /* Wakeup a waiting thread if any */
646 wakeup_thread(&q->queue);
648 corelock_unlock(&q->cl);
649 restore_irq(oldlevel);
652 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
653 /* IRQ handlers are not allowed use of this function - we only aim to
654 protect the queue integrity by turning them off. */
655 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
657 int oldlevel;
658 unsigned int wr;
660 oldlevel = disable_irq_save();
661 corelock_lock(&q->cl);
663 wr = q->write++ & QUEUE_LENGTH_MASK;
665 q->events[wr].id = id;
666 q->events[wr].data = data;
668 if(LIKELY(q->send))
670 struct queue_sender_list *send = q->send;
671 struct thread_entry **spp = &send->senders[wr];
672 struct thread_entry *current = cores[CURRENT_CORE].running;
674 if(UNLIKELY(*spp))
676 /* overflow protect - unblock any thread waiting at this index */
677 queue_release_sender(spp, 0);
680 /* Wakeup a waiting thread if any */
681 wakeup_thread(&q->queue);
683 /* Save thread in slot, add to list and wait for reply */
684 *spp = current;
685 IF_COP( current->obj_cl = &q->cl; )
686 IF_PRIO( current->blocker = q->blocker_p; )
687 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
688 current->retval = (intptr_t)spp;
689 current->bqp = &send->list;
691 block_thread(current);
693 corelock_unlock(&q->cl);
694 switch_thread();
696 return current->retval;
699 /* Function as queue_post if sending is not enabled */
700 wakeup_thread(&q->queue);
702 corelock_unlock(&q->cl);
703 restore_irq(oldlevel);
705 return 0;
708 #if 0 /* not used now but probably will be later */
709 /* Query if the last message dequeued was added by queue_send or not */
710 bool queue_in_queue_send(struct event_queue *q)
712 bool in_send;
714 #if NUM_CORES > 1
715 int oldlevel = disable_irq_save();
716 corelock_lock(&q->cl);
717 #endif
719 in_send = q->send && q->send->curr_sender;
721 #if NUM_CORES > 1
722 corelock_unlock(&q->cl);
723 restore_irq(oldlevel);
724 #endif
726 return in_send;
728 #endif
730 /* Replies with retval to the last dequeued message sent with queue_send */
731 void queue_reply(struct event_queue *q, intptr_t retval)
733 if(q->send && q->send->curr_sender)
735 int oldlevel = disable_irq_save();
736 corelock_lock(&q->cl);
737 /* Double-check locking */
738 IF_COP( if(LIKELY(q->send && q->send->curr_sender)) )
740 queue_release_sender(&q->send->curr_sender, retval);
743 corelock_unlock(&q->cl);
744 restore_irq(oldlevel);
748 bool queue_peek(struct event_queue *q, struct queue_event *ev)
750 if(q->read == q->write)
751 return false;
753 bool have_msg = false;
755 int oldlevel = disable_irq_save();
756 corelock_lock(&q->cl);
758 if(q->read != q->write)
760 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
761 have_msg = true;
764 corelock_unlock(&q->cl);
765 restore_irq(oldlevel);
767 return have_msg;
769 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
771 /* Poll queue to see if a message exists - careful in using the result if
772 * queue_remove_from_head is called when messages are posted - possibly use
773 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
774 * unsignals the queue may cause an unwanted block */
775 bool queue_empty(const struct event_queue* q)
777 return ( q->read == q->write );
780 void queue_clear(struct event_queue* q)
782 int oldlevel;
784 oldlevel = disable_irq_save();
785 corelock_lock(&q->cl);
787 /* Release all threads waiting in the queue for a reply -
788 dequeued sent message will be handled by owning thread */
789 queue_release_all_senders(q);
791 q->read = 0;
792 q->write = 0;
794 corelock_unlock(&q->cl);
795 restore_irq(oldlevel);
798 void queue_remove_from_head(struct event_queue *q, long id)
800 int oldlevel;
802 oldlevel = disable_irq_save();
803 corelock_lock(&q->cl);
805 while(q->read != q->write)
807 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
809 if(q->events[rd].id != id)
811 break;
814 /* Release any thread waiting on this message */
815 queue_do_unblock_sender(q->send, rd);
817 q->read++;
820 corelock_unlock(&q->cl);
821 restore_irq(oldlevel);
825 * The number of events waiting in the queue.
827 * @param struct of event_queue
828 * @return number of events in the queue
830 int queue_count(const struct event_queue *q)
832 return q->write - q->read;
835 int queue_broadcast(long id, intptr_t data)
837 int i;
839 #if NUM_CORES > 1
840 int oldlevel = disable_irq_save();
841 corelock_lock(&all_queues.cl);
842 #endif
844 for(i = 0;i < all_queues.count;i++)
846 queue_post(all_queues.queues[i], id, data);
849 #if NUM_CORES > 1
850 corelock_unlock(&all_queues.cl);
851 restore_irq(oldlevel);
852 #endif
854 return i;
857 /****************************************************************************
858 * Simple mutex functions ;)
859 ****************************************************************************/
861 /* Initialize a mutex object - call before any use and do not call again once
862 * the object is available to other threads */
863 void mutex_init(struct mutex *m)
865 corelock_init(&m->cl);
866 m->queue = NULL;
867 m->count = 0;
868 m->locked = 0;
869 MUTEX_SET_THREAD(m, NULL);
870 #ifdef HAVE_PRIORITY_SCHEDULING
871 m->blocker.priority = PRIORITY_IDLE;
872 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
873 m->no_preempt = false;
874 #endif
877 /* Gain ownership of a mutex object or block until it becomes free */
878 void mutex_lock(struct mutex *m)
880 const unsigned int core = CURRENT_CORE;
881 struct thread_entry *current = cores[core].running;
883 if(current == MUTEX_GET_THREAD(m))
885 /* current thread already owns this mutex */
886 m->count++;
887 return;
890 /* lock out other cores */
891 corelock_lock(&m->cl);
893 if(LIKELY(m->locked == 0))
895 /* lock is open */
896 MUTEX_SET_THREAD(m, current);
897 m->locked = 1;
898 corelock_unlock(&m->cl);
899 return;
902 /* block until the lock is open... */
903 IF_COP( current->obj_cl = &m->cl; )
904 IF_PRIO( current->blocker = &m->blocker; )
905 current->bqp = &m->queue;
907 disable_irq();
908 block_thread(current);
910 corelock_unlock(&m->cl);
912 /* ...and turn control over to next thread */
913 switch_thread();
916 /* Release ownership of a mutex object - only owning thread must call this */
917 void mutex_unlock(struct mutex *m)
919 /* unlocker not being the owner is an unlocking violation */
920 KERNEL_ASSERT(MUTEX_GET_THREAD(m) == cores[CURRENT_CORE].running,
921 "mutex_unlock->wrong thread (%s != %s)\n",
922 MUTEX_GET_THREAD(m)->name,
923 cores[CURRENT_CORE].running->name);
925 if(m->count > 0)
927 /* this thread still owns lock */
928 m->count--;
929 return;
932 /* lock out other cores */
933 corelock_lock(&m->cl);
935 /* transfer to next queued thread if any */
936 if(LIKELY(m->queue == NULL))
938 /* no threads waiting - open the lock */
939 MUTEX_SET_THREAD(m, NULL);
940 m->locked = 0;
941 corelock_unlock(&m->cl);
942 return;
944 else
946 const int oldlevel = disable_irq_save();
947 /* Tranfer of owning thread is handled in the wakeup protocol
948 * if priorities are enabled otherwise just set it from the
949 * queue head. */
950 IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); )
951 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
952 restore_irq(oldlevel);
954 corelock_unlock(&m->cl);
956 #ifdef HAVE_PRIORITY_SCHEDULING
957 if((result & THREAD_SWITCH) && !m->no_preempt)
958 switch_thread();
959 #endif
963 /****************************************************************************
964 * Simpl-er mutex functions ;)
965 ****************************************************************************/
966 #if NUM_CORES > 1
967 void spinlock_init(struct spinlock *l)
969 corelock_init(&l->cl);
970 l->thread = NULL;
971 l->count = 0;
974 void spinlock_lock(struct spinlock *l)
976 const unsigned int core = CURRENT_CORE;
977 struct thread_entry *current = cores[core].running;
979 if(l->thread == current)
981 /* current core already owns it */
982 l->count++;
983 return;
986 /* lock against other processor cores */
987 corelock_lock(&l->cl);
989 /* take ownership */
990 l->thread = current;
993 void spinlock_unlock(struct spinlock *l)
995 /* unlocker not being the owner is an unlocking violation */
996 KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
997 "spinlock_unlock->wrong thread\n");
999 if(l->count > 0)
1001 /* this core still owns lock */
1002 l->count--;
1003 return;
1006 /* clear owner */
1007 l->thread = NULL;
1009 /* release lock */
1010 corelock_unlock(&l->cl);
1012 #endif /* NUM_CORES > 1 */
1014 /****************************************************************************
1015 * Simple semaphore functions ;)
1016 ****************************************************************************/
1017 #ifdef HAVE_SEMAPHORE_OBJECTS
1018 void semaphore_init(struct semaphore *s, int max, int start)
1020 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1021 "semaphore_init->inv arg\n");
1022 s->queue = NULL;
1023 s->max = max;
1024 s->count = start;
1025 corelock_init(&s->cl);
1028 void semaphore_wait(struct semaphore *s)
1030 struct thread_entry *current;
1032 corelock_lock(&s->cl);
1034 if(LIKELY(--s->count >= 0))
1036 /* wait satisfied */
1037 corelock_unlock(&s->cl);
1038 return;
1041 /* too many waits - block until dequeued... */
1042 current = cores[CURRENT_CORE].running;
1044 IF_COP( current->obj_cl = &s->cl; )
1045 current->bqp = &s->queue;
1047 disable_irq();
1048 block_thread(current);
1050 corelock_unlock(&s->cl);
1052 /* ...and turn control over to next thread */
1053 switch_thread();
1056 void semaphore_release(struct semaphore *s)
1058 IF_PRIO( unsigned int result = THREAD_NONE; )
1060 corelock_lock(&s->cl);
1062 if(s->count < s->max && ++s->count <= 0)
1064 /* there should be threads in this queue */
1065 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1066 /* a thread was queued - wake it up */
1067 int oldlevel = disable_irq_save();
1068 IF_PRIO( result = ) wakeup_thread(&s->queue);
1069 restore_irq(oldlevel);
1072 corelock_unlock(&s->cl);
1074 #ifdef HAVE_PRIORITY_SCHEDULING
1075 if(result & THREAD_SWITCH)
1076 switch_thread();
1077 #endif
1079 #endif /* HAVE_SEMAPHORE_OBJECTS */
1081 #ifdef HAVE_WAKEUP_OBJECTS
1082 /****************************************************************************
1083 * Lightweight IRQ-compatible wakeup object
1086 /* Initialize the wakeup object */
1087 void wakeup_init(struct wakeup *w)
1089 w->queue = NULL;
1090 w->signalled = 0;
1091 IF_COP( corelock_init(&w->cl); )
1094 /* Wait for a signal blocking indefinitely or for a specified period */
1095 int wakeup_wait(struct wakeup *w, int timeout)
1097 int ret = OBJ_WAIT_SUCCEEDED; /* Presume success */
1098 int oldlevel = disable_irq_save();
1100 corelock_lock(&w->cl);
1102 if(LIKELY(w->signalled == 0 && timeout != TIMEOUT_NOBLOCK))
1104 struct thread_entry * current = cores[CURRENT_CORE].running;
1106 IF_COP( current->obj_cl = &w->cl; )
1107 current->bqp = &w->queue;
1109 if (timeout != TIMEOUT_BLOCK)
1110 block_thread_w_tmo(current, timeout);
1111 else
1112 block_thread(current);
1114 corelock_unlock(&w->cl);
1115 switch_thread();
1117 oldlevel = disable_irq_save();
1118 corelock_lock(&w->cl);
1121 if(UNLIKELY(w->signalled == 0))
1123 /* Timed-out or failed */
1124 ret = (timeout != TIMEOUT_BLOCK) ?
1125 OBJ_WAIT_TIMEDOUT : OBJ_WAIT_FAILED;
1128 w->signalled = 0; /* Reset */
1130 corelock_unlock(&w->cl);
1131 restore_irq(oldlevel);
1133 return ret;
1136 /* Signal the thread waiting or leave the signal if the thread hasn't
1137 * waited yet.
1139 * returns THREAD_NONE or THREAD_OK
1141 int wakeup_signal(struct wakeup *w)
1143 int oldlevel = disable_irq_save();
1144 int ret;
1146 corelock_lock(&w->cl);
1148 w->signalled = 1;
1149 ret = wakeup_thread(&w->queue);
1151 corelock_unlock(&w->cl);
1152 restore_irq(oldlevel);
1154 return ret;
1156 #endif /* HAVE_WAKEUP_OBJECTS */