Move thread-sdl.[ch] into the target tree.
[kugel-rb.git] / firmware / kernel.c
blobd8c67e8485a1dae7933ddb40f712960118f3ddc0
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #include "thread.h"
26 #include "cpu.h"
27 #include "system.h"
28 #include "panic.h"
29 #include "debug.h"
31 /* Make this nonzero to enable more elaborate checks on objects */
32 #if defined(DEBUG) || defined(SIMULATOR)
33 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
34 #else
35 #define KERNEL_OBJECT_CHECKS 0
36 #endif
38 #if KERNEL_OBJECT_CHECKS
39 #ifdef SIMULATOR
40 #define KERNEL_ASSERT(exp, msg...) \
41 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
42 #else
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) panicf(msg); })
45 #endif
46 #else
47 #define KERNEL_ASSERT(exp, msg...) ({})
48 #endif
50 #if !defined(CPU_PP) || !defined(BOOTLOADER)
51 volatile long current_tick SHAREDDATA_ATTR = 0;
52 #endif
54 /* List of tick tasks - final element always NULL for termination */
55 void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
57 /* This array holds all queues that are initiated. It is used for broadcast. */
58 static struct
60 struct event_queue *queues[MAX_NUM_QUEUES+1];
61 IF_COP( struct corelock cl; )
62 } all_queues SHAREDBSS_ATTR;
64 /****************************************************************************
65 * Common utilities
66 ****************************************************************************/
68 /* Find a pointer in a pointer array. Returns the addess of the element if
69 * found or the address of the terminating NULL otherwise. */
70 static void ** find_array_ptr(void **arr, void *ptr)
72 void *curr;
73 for(curr = *arr; curr != NULL && curr != ptr; curr = *(++arr));
74 return arr;
77 /* Remove a pointer from a pointer array if it exists. Compacts it so that
78 * no gaps exist. Returns 0 on success and -1 if the element wasn't found. */
79 static int remove_array_ptr(void **arr, void *ptr)
81 void *curr;
82 arr = find_array_ptr(arr, ptr);
84 if(*arr == NULL)
85 return -1;
87 /* Found. Slide up following items. */
90 void **arr1 = arr + 1;
91 *arr++ = curr = *arr1;
93 while(curr != NULL);
95 return 0;
98 /****************************************************************************
99 * Standard kernel stuff
100 ****************************************************************************/
101 void kernel_init(void)
103 /* Init the threading API */
104 init_threads();
106 /* Other processors will not reach this point in a multicore build.
107 * In a single-core build with multiple cores they fall-through and
108 * sleep in cop_main without returning. */
109 if (CURRENT_CORE == CPU)
111 memset(tick_funcs, 0, sizeof(tick_funcs));
112 memset(&all_queues, 0, sizeof(all_queues));
113 corelock_init(&all_queues.cl);
114 tick_start(1000/HZ);
115 #ifdef KDEV_INIT
116 kernel_device_init();
117 #endif
121 /****************************************************************************
122 * Timer tick - Timer initialization and interrupt handler is defined at
123 * the target level.
124 ****************************************************************************/
125 int tick_add_task(void (*f)(void))
127 int oldlevel = disable_irq_save();
128 void **arr = (void **)tick_funcs;
129 void **p = find_array_ptr(arr, f);
131 /* Add a task if there is room */
132 if(p - arr < MAX_NUM_TICK_TASKS)
134 *p = f; /* If already in list, no problem. */
136 else
138 panicf("Error! tick_add_task(): out of tasks");
141 restore_irq(oldlevel);
142 return 0;
145 int tick_remove_task(void (*f)(void))
147 int oldlevel = disable_irq_save();
148 int rc = remove_array_ptr((void **)tick_funcs, f);
149 restore_irq(oldlevel);
150 return rc;
153 /****************************************************************************
154 * Tick-based interval timers/one-shots - be mindful this is not really
155 * intended for continuous timers but for events that need to run for a short
156 * time and be cancelled without further software intervention.
157 ****************************************************************************/
158 #ifdef INCLUDE_TIMEOUT_API
159 /* list of active timeout events */
160 static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
162 /* timeout tick task - calls event handlers when they expire
163 * Event handlers may alter expiration, callback and data during operation.
165 static void timeout_tick(void)
167 unsigned long tick = current_tick;
168 struct timeout **p = tmo_list;
169 struct timeout *curr;
171 for(curr = *p; curr != NULL; curr = *(++p))
173 int ticks;
175 if(TIME_BEFORE(tick, curr->expires))
176 continue;
178 /* this event has expired - call callback */
179 ticks = curr->callback(curr);
180 if(ticks > 0)
182 curr->expires = tick + ticks; /* reload */
184 else
186 timeout_cancel(curr); /* cancel */
191 /* Cancels a timeout callback - can be called from the ISR */
192 void timeout_cancel(struct timeout *tmo)
194 int oldlevel = disable_irq_save();
195 void **arr = (void **)tmo_list;
196 int rc = remove_array_ptr(arr, tmo);
198 if(rc >= 0 && *arr == NULL)
200 tick_remove_task(timeout_tick); /* Last one - remove task */
203 restore_irq(oldlevel);
206 /* Adds a timeout callback - calling with an active timeout resets the
207 interval - can be called from the ISR */
208 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
209 int ticks, intptr_t data)
211 int oldlevel;
212 void **arr, **p;
214 if(tmo == NULL)
215 return;
217 oldlevel = disable_irq_save();
219 /* See if this one is already registered */
220 arr = (void **)tmo_list;
221 p = find_array_ptr(arr, tmo);
223 if(p - arr < MAX_NUM_TIMEOUTS)
225 /* Vacancy */
226 if(*p == NULL)
228 /* Not present */
229 if(*arr == NULL)
231 tick_add_task(timeout_tick); /* First one - add task */
234 *p = tmo;
237 tmo->callback = callback;
238 tmo->data = data;
239 tmo->expires = current_tick + ticks;
242 restore_irq(oldlevel);
245 #endif /* INCLUDE_TIMEOUT_API */
247 /****************************************************************************
248 * Thread stuff
249 ****************************************************************************/
250 void sleep(int ticks)
252 #if defined(CPU_PP) && defined(BOOTLOADER)
253 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
254 while (TIME_BEFORE(USEC_TIMER, stop))
255 switch_thread();
256 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
257 /* hacky.. */
258 long sleep_ticks = current_tick + ticks + 1;
259 while (TIME_BEFORE(current_tick, sleep_ticks))
260 switch_thread();
261 #else
262 disable_irq();
263 sleep_thread(ticks);
264 switch_thread();
265 #endif
268 void yield(void)
270 #if ((defined(TATUNG_TPJ1022)) && defined(BOOTLOADER))
271 /* Some targets don't like yielding in the bootloader */
272 #else
273 switch_thread();
274 #endif
277 /****************************************************************************
278 * Queue handling stuff
279 ****************************************************************************/
281 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
282 /****************************************************************************
283 * Sender thread queue structure that aids implementation of priority
284 * inheritance on queues because the send list structure is the same as
285 * for all other kernel objects:
287 * Example state:
288 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
289 * E3 was posted with queue_post
290 * 4 events remain enqueued (E1-E4)
292 * rd wr
293 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
294 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
295 * \/ \/ \/
296 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
297 * q->send->curr_sender: /\
299 * Thread has E0 in its own struct queue_event.
301 ****************************************************************************/
303 /* Puts the specified return value in the waiting thread's return value
304 * and wakes the thread.
306 * A sender should be confirmed to exist before calling which makes it
307 * more efficent to reject the majority of cases that don't need this
308 * called.
310 static void queue_release_sender(struct thread_entry **sender,
311 intptr_t retval)
313 struct thread_entry *thread = *sender;
315 *sender = NULL; /* Clear slot. */
316 #ifdef HAVE_WAKEUP_EXT_CB
317 thread->wakeup_ext_cb = NULL; /* Clear callback. */
318 #endif
319 thread->retval = retval; /* Assign thread-local return value. */
320 *thread->bqp = thread; /* Move blocking queue head to thread since
321 wakeup_thread wakes the first thread in
322 the list. */
323 wakeup_thread(thread->bqp);
326 /* Releases any waiting threads that are queued with queue_send -
327 * reply with 0.
329 static void queue_release_all_senders(struct event_queue *q)
331 if(q->send)
333 unsigned int i;
334 for(i = q->read; i != q->write; i++)
336 struct thread_entry **spp =
337 &q->send->senders[i & QUEUE_LENGTH_MASK];
339 if(*spp)
341 queue_release_sender(spp, 0);
347 /* Callback to do extra forced removal steps from sender list in addition
348 * to the normal blocking queue removal and priority dis-inherit */
349 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
351 *((struct thread_entry **)thread->retval) = NULL;
352 #ifdef HAVE_WAKEUP_EXT_CB
353 thread->wakeup_ext_cb = NULL;
354 #endif
355 thread->retval = 0;
358 /* Enables queue_send on the specified queue - caller allocates the extra
359 * data structure. Only queues which are taken to be owned by a thread should
360 * enable this however an official owner is not compulsory but must be
361 * specified for priority inheritance to operate.
363 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
364 * messages results in an undefined order of message replies or possible default
365 * replies if two or more waits happen before a reply is done.
367 void queue_enable_queue_send(struct event_queue *q,
368 struct queue_sender_list *send,
369 unsigned int owner_id)
371 int oldlevel = disable_irq_save();
372 corelock_lock(&q->cl);
374 if(send != NULL && q->send == NULL)
376 memset(send, 0, sizeof(*send));
377 #ifdef HAVE_PRIORITY_SCHEDULING
378 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
379 send->blocker.priority = PRIORITY_IDLE;
380 if(owner_id != 0)
382 send->blocker.thread = thread_id_entry(owner_id);
383 q->blocker_p = &send->blocker;
385 #endif
386 q->send = send;
389 corelock_unlock(&q->cl);
390 restore_irq(oldlevel);
392 (void)owner_id;
395 /* Unblock a blocked thread at a given event index */
396 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
397 unsigned int i)
399 if(send)
401 struct thread_entry **spp = &send->senders[i];
403 if(UNLIKELY(*spp))
405 queue_release_sender(spp, 0);
410 /* Perform the auto-reply sequence */
411 static inline void queue_do_auto_reply(struct queue_sender_list *send)
413 if(send && send->curr_sender)
415 /* auto-reply */
416 queue_release_sender(&send->curr_sender, 0);
420 /* Moves waiting thread's refrence from the senders array to the
421 * current_sender which represents the thread waiting for a reponse to the
422 * last message removed from the queue. This also protects the thread from
423 * being bumped due to overflow which would not be a valid action since its
424 * message _is_ being processed at this point. */
425 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
426 unsigned int rd)
428 if(send)
430 struct thread_entry **spp = &send->senders[rd];
432 if(*spp)
434 /* Move thread reference from array to the next thread
435 that queue_reply will release */
436 send->curr_sender = *spp;
437 (*spp)->retval = (intptr_t)spp;
438 *spp = NULL;
440 /* else message was posted asynchronously with queue_post */
443 #else
444 /* Empty macros for when synchoronous sending is not made */
445 #define queue_release_all_senders(q)
446 #define queue_do_unblock_sender(send, i)
447 #define queue_do_auto_reply(send)
448 #define queue_do_fetch_sender(send, rd)
449 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
451 /* Queue must not be available for use during this call */
452 void queue_init(struct event_queue *q, bool register_queue)
454 int oldlevel = disable_irq_save();
456 if(register_queue)
458 corelock_lock(&all_queues.cl);
461 corelock_init(&q->cl);
462 q->queue = NULL;
463 q->read = 0;
464 q->write = 0;
465 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
466 q->send = NULL; /* No message sending by default */
467 IF_PRIO( q->blocker_p = NULL; )
468 #endif
470 if(register_queue)
472 void **queues = (void **)all_queues.queues;
473 void **p = find_array_ptr(queues, q);
475 if(p - queues >= MAX_NUM_QUEUES)
477 panicf("queue_init->out of queues");
480 if(*p == NULL)
482 /* Add it to the all_queues array */
483 *p = q;
484 corelock_unlock(&all_queues.cl);
488 restore_irq(oldlevel);
491 /* Queue must not be available for use during this call */
492 void queue_delete(struct event_queue *q)
494 int oldlevel = disable_irq_save();
495 corelock_lock(&all_queues.cl);
496 corelock_lock(&q->cl);
498 /* Remove the queue if registered */
499 remove_array_ptr((void **)all_queues.queues, q);
501 corelock_unlock(&all_queues.cl);
503 /* Release thread(s) waiting on queue head */
504 thread_queue_wake(&q->queue);
506 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
507 if(q->send)
509 /* Release threads waiting for replies */
510 queue_release_all_senders(q);
512 /* Reply to any dequeued message waiting for one */
513 queue_do_auto_reply(q->send);
515 q->send = NULL;
516 IF_PRIO( q->blocker_p = NULL; )
518 #endif
520 q->read = 0;
521 q->write = 0;
523 corelock_unlock(&q->cl);
524 restore_irq(oldlevel);
527 /* NOTE: multiple threads waiting on a queue head cannot have a well-
528 defined release order if timeouts are used. If multiple threads must
529 access the queue head, use a dispatcher or queue_wait only. */
530 void queue_wait(struct event_queue *q, struct queue_event *ev)
532 int oldlevel;
533 unsigned int rd;
535 #ifdef HAVE_PRIORITY_SCHEDULING
536 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
537 QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
538 "queue_wait->wrong thread\n");
539 #endif
541 oldlevel = disable_irq_save();
542 corelock_lock(&q->cl);
544 /* auto-reply */
545 queue_do_auto_reply(q->send);
547 if (q->read == q->write)
549 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
553 IF_COP( current->obj_cl = &q->cl; )
554 current->bqp = &q->queue;
556 block_thread(current);
558 corelock_unlock(&q->cl);
559 switch_thread();
561 oldlevel = disable_irq_save();
562 corelock_lock(&q->cl);
564 /* A message that woke us could now be gone */
565 while (q->read == q->write);
568 rd = q->read++ & QUEUE_LENGTH_MASK;
569 *ev = q->events[rd];
571 /* Get data for a waiting thread if one */
572 queue_do_fetch_sender(q->send, rd);
574 corelock_unlock(&q->cl);
575 restore_irq(oldlevel);
578 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
580 int oldlevel;
582 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
583 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
584 QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
585 "queue_wait_w_tmo->wrong thread\n");
586 #endif
588 oldlevel = disable_irq_save();
589 corelock_lock(&q->cl);
591 /* Auto-reply */
592 queue_do_auto_reply(q->send);
594 if (q->read == q->write && ticks > 0)
596 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
598 IF_COP( current->obj_cl = &q->cl; )
599 current->bqp = &q->queue;
601 block_thread_w_tmo(current, ticks);
602 corelock_unlock(&q->cl);
604 switch_thread();
606 oldlevel = disable_irq_save();
607 corelock_lock(&q->cl);
610 /* no worry about a removed message here - status is checked inside
611 locks - perhaps verify if timeout or false alarm */
612 if (q->read != q->write)
614 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
615 *ev = q->events[rd];
616 /* Get data for a waiting thread if one */
617 queue_do_fetch_sender(q->send, rd);
619 else
621 ev->id = SYS_TIMEOUT;
624 corelock_unlock(&q->cl);
625 restore_irq(oldlevel);
628 void queue_post(struct event_queue *q, long id, intptr_t data)
630 int oldlevel;
631 unsigned int wr;
633 oldlevel = disable_irq_save();
634 corelock_lock(&q->cl);
636 wr = q->write++ & QUEUE_LENGTH_MASK;
638 q->events[wr].id = id;
639 q->events[wr].data = data;
641 /* overflow protect - unblock any thread waiting at this index */
642 queue_do_unblock_sender(q->send, wr);
644 /* Wakeup a waiting thread if any */
645 wakeup_thread(&q->queue);
647 corelock_unlock(&q->cl);
648 restore_irq(oldlevel);
651 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
652 /* IRQ handlers are not allowed use of this function - we only aim to
653 protect the queue integrity by turning them off. */
654 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
656 int oldlevel;
657 unsigned int wr;
659 oldlevel = disable_irq_save();
660 corelock_lock(&q->cl);
662 wr = q->write++ & QUEUE_LENGTH_MASK;
664 q->events[wr].id = id;
665 q->events[wr].data = data;
667 if(LIKELY(q->send))
669 struct queue_sender_list *send = q->send;
670 struct thread_entry **spp = &send->senders[wr];
671 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
673 if(UNLIKELY(*spp))
675 /* overflow protect - unblock any thread waiting at this index */
676 queue_release_sender(spp, 0);
679 /* Wakeup a waiting thread if any */
680 wakeup_thread(&q->queue);
682 /* Save thread in slot, add to list and wait for reply */
683 *spp = current;
684 IF_COP( current->obj_cl = &q->cl; )
685 IF_PRIO( current->blocker = q->blocker_p; )
686 #ifdef HAVE_WAKEUP_EXT_CB
687 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
688 #endif
689 current->retval = (intptr_t)spp;
690 current->bqp = &send->list;
692 block_thread(current);
694 corelock_unlock(&q->cl);
695 switch_thread();
697 return current->retval;
700 /* Function as queue_post if sending is not enabled */
701 wakeup_thread(&q->queue);
703 corelock_unlock(&q->cl);
704 restore_irq(oldlevel);
706 return 0;
709 #if 0 /* not used now but probably will be later */
710 /* Query if the last message dequeued was added by queue_send or not */
711 bool queue_in_queue_send(struct event_queue *q)
713 bool in_send;
715 #if NUM_CORES > 1
716 int oldlevel = disable_irq_save();
717 corelock_lock(&q->cl);
718 #endif
720 in_send = q->send && q->send->curr_sender;
722 #if NUM_CORES > 1
723 corelock_unlock(&q->cl);
724 restore_irq(oldlevel);
725 #endif
727 return in_send;
729 #endif
731 /* Replies with retval to the last dequeued message sent with queue_send */
732 void queue_reply(struct event_queue *q, intptr_t retval)
734 if(q->send && q->send->curr_sender)
736 int oldlevel = disable_irq_save();
737 corelock_lock(&q->cl);
738 /* Double-check locking */
739 IF_COP( if(LIKELY(q->send && q->send->curr_sender)) )
741 queue_release_sender(&q->send->curr_sender, retval);
744 corelock_unlock(&q->cl);
745 restore_irq(oldlevel);
749 bool queue_peek(struct event_queue *q, struct queue_event *ev)
751 if(q->read == q->write)
752 return false;
754 bool have_msg = false;
756 int oldlevel = disable_irq_save();
757 corelock_lock(&q->cl);
759 if(q->read != q->write)
761 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
762 have_msg = true;
765 corelock_unlock(&q->cl);
766 restore_irq(oldlevel);
768 return have_msg;
770 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
772 /* Poll queue to see if a message exists - careful in using the result if
773 * queue_remove_from_head is called when messages are posted - possibly use
774 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
775 * unsignals the queue may cause an unwanted block */
776 bool queue_empty(const struct event_queue* q)
778 return ( q->read == q->write );
781 void queue_clear(struct event_queue* q)
783 int oldlevel;
785 oldlevel = disable_irq_save();
786 corelock_lock(&q->cl);
788 /* Release all threads waiting in the queue for a reply -
789 dequeued sent message will be handled by owning thread */
790 queue_release_all_senders(q);
792 q->read = 0;
793 q->write = 0;
795 corelock_unlock(&q->cl);
796 restore_irq(oldlevel);
799 void queue_remove_from_head(struct event_queue *q, long id)
801 int oldlevel;
803 oldlevel = disable_irq_save();
804 corelock_lock(&q->cl);
806 while(q->read != q->write)
808 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
810 if(q->events[rd].id != id)
812 break;
815 /* Release any thread waiting on this message */
816 queue_do_unblock_sender(q->send, rd);
818 q->read++;
821 corelock_unlock(&q->cl);
822 restore_irq(oldlevel);
826 * The number of events waiting in the queue.
828 * @param struct of event_queue
829 * @return number of events in the queue
831 int queue_count(const struct event_queue *q)
833 return q->write - q->read;
836 int queue_broadcast(long id, intptr_t data)
838 struct event_queue **p = all_queues.queues;
839 struct event_queue *q;
841 #if NUM_CORES > 1
842 int oldlevel = disable_irq_save();
843 corelock_lock(&all_queues.cl);
844 #endif
846 for(q = *p; q != NULL; q = *(++p))
848 queue_post(q, id, data);
851 #if NUM_CORES > 1
852 corelock_unlock(&all_queues.cl);
853 restore_irq(oldlevel);
854 #endif
856 return p - all_queues.queues;
859 /****************************************************************************
860 * Simple mutex functions ;)
861 ****************************************************************************/
863 static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
864 __attribute__((always_inline));
865 static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
867 #ifdef HAVE_PRIORITY_SCHEDULING
868 mtx->blocker.thread = td;
869 #else
870 mtx->thread = td;
871 #endif
874 static inline struct thread_entry* mutex_get_thread(struct mutex *mtx)
875 __attribute__((always_inline));
876 static inline struct thread_entry* mutex_get_thread(struct mutex *mtx)
878 #ifdef HAVE_PRIORITY_SCHEDULING
879 return mtx->blocker.thread;
880 #else
881 return mtx->thread;
882 #endif
885 /* Initialize a mutex object - call before any use and do not call again once
886 * the object is available to other threads */
887 void mutex_init(struct mutex *m)
889 corelock_init(&m->cl);
890 m->queue = NULL;
891 m->count = 0;
892 m->locked = false;
893 mutex_set_thread(m, NULL);
894 #ifdef HAVE_PRIORITY_SCHEDULING
895 m->blocker.priority = PRIORITY_IDLE;
896 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
897 m->no_preempt = false;
898 #endif
901 /* Gain ownership of a mutex object or block until it becomes free */
902 void mutex_lock(struct mutex *m)
904 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
906 if(current == mutex_get_thread(m))
908 /* current thread already owns this mutex */
909 m->count++;
910 return;
913 /* lock out other cores */
914 corelock_lock(&m->cl);
916 if(LIKELY(!m->locked))
918 /* lock is open */
919 mutex_set_thread(m, current);
920 m->locked = true;
921 corelock_unlock(&m->cl);
922 return;
925 /* block until the lock is open... */
926 IF_COP( current->obj_cl = &m->cl; )
927 IF_PRIO( current->blocker = &m->blocker; )
928 current->bqp = &m->queue;
930 disable_irq();
931 block_thread(current);
933 corelock_unlock(&m->cl);
935 /* ...and turn control over to next thread */
936 switch_thread();
939 /* Release ownership of a mutex object - only owning thread must call this */
940 void mutex_unlock(struct mutex *m)
942 /* unlocker not being the owner is an unlocking violation */
943 KERNEL_ASSERT(mutex_get_thread(m) == thread_id_entry(THREAD_ID_CURRENT),
944 "mutex_unlock->wrong thread (%s != %s)\n",
945 mutex_get_thread(m)->name,
946 thread_id_entry(THREAD_ID_CURRENT)->name);
948 if(m->count > 0)
950 /* this thread still owns lock */
951 m->count--;
952 return;
955 /* lock out other cores */
956 corelock_lock(&m->cl);
958 /* transfer to next queued thread if any */
959 if(LIKELY(m->queue == NULL))
961 /* no threads waiting - open the lock */
962 mutex_set_thread(m, NULL);
963 m->locked = false;
964 corelock_unlock(&m->cl);
965 return;
967 else
969 const int oldlevel = disable_irq_save();
970 /* Tranfer of owning thread is handled in the wakeup protocol
971 * if priorities are enabled otherwise just set it from the
972 * queue head. */
973 IFN_PRIO( mutex_set_thread(m, m->queue); )
974 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
975 restore_irq(oldlevel);
977 corelock_unlock(&m->cl);
979 #ifdef HAVE_PRIORITY_SCHEDULING
980 if((result & THREAD_SWITCH) && !m->no_preempt)
981 switch_thread();
982 #endif
986 /****************************************************************************
987 * Simple semaphore functions ;)
988 ****************************************************************************/
989 #ifdef HAVE_SEMAPHORE_OBJECTS
990 void semaphore_init(struct semaphore *s, int max, int start)
992 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
993 "semaphore_init->inv arg\n");
994 s->queue = NULL;
995 s->max = max;
996 s->count = start;
997 corelock_init(&s->cl);
1000 void semaphore_wait(struct semaphore *s)
1002 struct thread_entry *current;
1004 corelock_lock(&s->cl);
1006 if(LIKELY(--s->count >= 0))
1008 /* wait satisfied */
1009 corelock_unlock(&s->cl);
1010 return;
1013 /* too many waits - block until dequeued... */
1014 current = thread_id_entry(THREAD_ID_CURRENT);
1016 IF_COP( current->obj_cl = &s->cl; )
1017 current->bqp = &s->queue;
1019 disable_irq();
1020 block_thread(current);
1022 corelock_unlock(&s->cl);
1024 /* ...and turn control over to next thread */
1025 switch_thread();
1028 void semaphore_release(struct semaphore *s)
1030 IF_PRIO( unsigned int result = THREAD_NONE; )
1032 corelock_lock(&s->cl);
1034 if(s->count < s->max && ++s->count <= 0)
1036 /* there should be threads in this queue */
1037 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1038 /* a thread was queued - wake it up */
1039 int oldlevel = disable_irq_save();
1040 IF_PRIO( result = ) wakeup_thread(&s->queue);
1041 restore_irq(oldlevel);
1044 corelock_unlock(&s->cl);
1046 #ifdef HAVE_PRIORITY_SCHEDULING
1047 if(result & THREAD_SWITCH)
1048 switch_thread();
1049 #endif
1051 #endif /* HAVE_SEMAPHORE_OBJECTS */
1053 #ifdef HAVE_WAKEUP_OBJECTS
1054 /****************************************************************************
1055 * Lightweight IRQ-compatible wakeup object
1058 /* Initialize the wakeup object */
1059 void wakeup_init(struct wakeup *w)
1061 w->queue = NULL;
1062 w->signalled = false;
1063 IF_COP( corelock_init(&w->cl); )
1066 /* Wait for a signal blocking indefinitely or for a specified period */
1067 int wakeup_wait(struct wakeup *w, int timeout)
1069 int ret = OBJ_WAIT_SUCCEEDED; /* Presume success */
1070 int oldlevel = disable_irq_save();
1072 corelock_lock(&w->cl);
1074 if(LIKELY(!w->signalled && timeout != TIMEOUT_NOBLOCK))
1076 struct thread_entry * current = thread_id_entry(THREAD_ID_CURRENT);
1078 IF_COP( current->obj_cl = &w->cl; )
1079 current->bqp = &w->queue;
1081 if (timeout != TIMEOUT_BLOCK)
1082 block_thread_w_tmo(current, timeout);
1083 else
1084 block_thread(current);
1086 corelock_unlock(&w->cl);
1087 switch_thread();
1089 oldlevel = disable_irq_save();
1090 corelock_lock(&w->cl);
1093 if(UNLIKELY(!w->signalled))
1095 /* Timed-out or failed */
1096 ret = (timeout != TIMEOUT_BLOCK) ?
1097 OBJ_WAIT_TIMEDOUT : OBJ_WAIT_FAILED;
1100 w->signalled = false; /* Reset */
1102 corelock_unlock(&w->cl);
1103 restore_irq(oldlevel);
1105 return ret;
1108 /* Signal the thread waiting or leave the signal if the thread hasn't
1109 * waited yet.
1111 * returns THREAD_NONE or THREAD_OK
1113 int wakeup_signal(struct wakeup *w)
1115 int oldlevel = disable_irq_save();
1116 int ret;
1118 corelock_lock(&w->cl);
1120 w->signalled = true;
1121 ret = wakeup_thread(&w->queue);
1123 corelock_unlock(&w->cl);
1124 restore_irq(oldlevel);
1126 return ret;
1128 #endif /* HAVE_WAKEUP_OBJECTS */