AS3525: Implement a true audio pause and full-resolution audio tick. Take care of...
[kugel-rb.git] / firmware / kernel.c
blob4e629c550492eefad27d912a8343d0a3c336810b
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #include "thread.h"
26 #include "cpu.h"
27 #include "system.h"
28 #include "panic.h"
29 #include "debug.h"
30 #include "general.h"
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
35 #else
36 #define KERNEL_OBJECT_CHECKS 0
37 #endif
39 #if KERNEL_OBJECT_CHECKS
40 #ifdef SIMULATOR
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
43 #else
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
46 #endif
47 #else
48 #define KERNEL_ASSERT(exp, msg...) ({})
49 #endif
51 #if !defined(CPU_PP) || !defined(BOOTLOADER) || \
52 defined(HAVE_BOOTLOADER_USB_MODE)
53 volatile long current_tick SHAREDDATA_ATTR = 0;
54 #endif
56 /* Unless otherwise defined, do nothing */
57 #ifndef YIELD_KERNEL_HOOK
58 #define YIELD_KERNEL_HOOK() false
59 #endif
60 #ifndef SLEEP_KERNEL_HOOK
61 #define SLEEP_KERNEL_HOOK(ticks) false
62 #endif
64 /* List of tick tasks - final element always NULL for termination */
65 void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
67 /* This array holds all queues that are initiated. It is used for broadcast. */
68 static struct
70 struct event_queue *queues[MAX_NUM_QUEUES+1];
71 IF_COP( struct corelock cl; )
72 } all_queues SHAREDBSS_ATTR;
74 /****************************************************************************
75 * Standard kernel stuff
76 ****************************************************************************/
77 void kernel_init(void)
79 /* Init the threading API */
80 init_threads();
82 /* Other processors will not reach this point in a multicore build.
83 * In a single-core build with multiple cores they fall-through and
84 * sleep in cop_main without returning. */
85 if (CURRENT_CORE == CPU)
87 memset(tick_funcs, 0, sizeof(tick_funcs));
88 memset(&all_queues, 0, sizeof(all_queues));
89 corelock_init(&all_queues.cl);
90 tick_start(1000/HZ);
91 #ifdef KDEV_INIT
92 kernel_device_init();
93 #endif
97 /****************************************************************************
98 * Timer tick - Timer initialization and interrupt handler is defined at
99 * the target level.
100 ****************************************************************************/
101 int tick_add_task(void (*f)(void))
103 int oldlevel = disable_irq_save();
104 void **arr = (void **)tick_funcs;
105 void **p = find_array_ptr(arr, f);
107 /* Add a task if there is room */
108 if(p - arr < MAX_NUM_TICK_TASKS)
110 *p = f; /* If already in list, no problem. */
112 else
114 panicf("Error! tick_add_task(): out of tasks");
117 restore_irq(oldlevel);
118 return 0;
121 int tick_remove_task(void (*f)(void))
123 int oldlevel = disable_irq_save();
124 int rc = remove_array_ptr((void **)tick_funcs, f);
125 restore_irq(oldlevel);
126 return rc;
129 /****************************************************************************
130 * Tick-based interval timers/one-shots - be mindful this is not really
131 * intended for continuous timers but for events that need to run for a short
132 * time and be cancelled without further software intervention.
133 ****************************************************************************/
134 #ifdef INCLUDE_TIMEOUT_API
135 /* list of active timeout events */
136 static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
138 /* timeout tick task - calls event handlers when they expire
139 * Event handlers may alter expiration, callback and data during operation.
141 static void timeout_tick(void)
143 unsigned long tick = current_tick;
144 struct timeout **p = tmo_list;
145 struct timeout *curr;
147 for(curr = *p; curr != NULL; curr = *(++p))
149 int ticks;
151 if(TIME_BEFORE(tick, curr->expires))
152 continue;
154 /* this event has expired - call callback */
155 ticks = curr->callback(curr);
156 if(ticks > 0)
158 curr->expires = tick + ticks; /* reload */
160 else
162 timeout_cancel(curr); /* cancel */
167 /* Cancels a timeout callback - can be called from the ISR */
168 void timeout_cancel(struct timeout *tmo)
170 int oldlevel = disable_irq_save();
171 int rc = remove_array_ptr((void **)tmo_list, tmo);
173 if(rc >= 0 && *tmo_list == NULL)
175 tick_remove_task(timeout_tick); /* Last one - remove task */
178 restore_irq(oldlevel);
181 /* Adds a timeout callback - calling with an active timeout resets the
182 interval - can be called from the ISR */
183 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
184 int ticks, intptr_t data)
186 int oldlevel;
187 void **arr, **p;
189 if(tmo == NULL)
190 return;
192 oldlevel = disable_irq_save();
194 /* See if this one is already registered */
195 arr = (void **)tmo_list;
196 p = find_array_ptr(arr, tmo);
198 if(p - arr < MAX_NUM_TIMEOUTS)
200 /* Vacancy */
201 if(*p == NULL)
203 /* Not present */
204 if(*tmo_list == NULL)
206 tick_add_task(timeout_tick); /* First one - add task */
209 *p = tmo;
212 tmo->callback = callback;
213 tmo->data = data;
214 tmo->expires = current_tick + ticks;
217 restore_irq(oldlevel);
220 #endif /* INCLUDE_TIMEOUT_API */
222 /****************************************************************************
223 * Thread stuff
224 ****************************************************************************/
225 unsigned sleep(unsigned ticks)
227 /* In certain situations, certain bootloaders in particular, a normal
228 * threading call is inappropriate. */
229 if (SLEEP_KERNEL_HOOK(ticks))
230 return 0; /* Handled */
232 disable_irq();
233 sleep_thread(ticks);
234 switch_thread();
235 return 0;
238 void yield(void)
240 /* In certain situations, certain bootloaders in particular, a normal
241 * threading call is inappropriate. */
242 if (YIELD_KERNEL_HOOK())
243 return; /* handled */
245 switch_thread();
248 /****************************************************************************
249 * Queue handling stuff
250 ****************************************************************************/
252 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
253 /****************************************************************************
254 * Sender thread queue structure that aids implementation of priority
255 * inheritance on queues because the send list structure is the same as
256 * for all other kernel objects:
258 * Example state:
259 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
260 * E3 was posted with queue_post
261 * 4 events remain enqueued (E1-E4)
263 * rd wr
264 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
265 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
266 * \/ \/ \/
267 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
268 * q->send->curr_sender: /\
270 * Thread has E0 in its own struct queue_event.
272 ****************************************************************************/
274 /* Puts the specified return value in the waiting thread's return value
275 * and wakes the thread.
277 * A sender should be confirmed to exist before calling which makes it
278 * more efficent to reject the majority of cases that don't need this
279 * called.
281 static void queue_release_sender(struct thread_entry * volatile * sender,
282 intptr_t retval)
284 struct thread_entry *thread = *sender;
286 *sender = NULL; /* Clear slot. */
287 #ifdef HAVE_WAKEUP_EXT_CB
288 thread->wakeup_ext_cb = NULL; /* Clear callback. */
289 #endif
290 thread->retval = retval; /* Assign thread-local return value. */
291 *thread->bqp = thread; /* Move blocking queue head to thread since
292 wakeup_thread wakes the first thread in
293 the list. */
294 wakeup_thread(thread->bqp);
297 /* Releases any waiting threads that are queued with queue_send -
298 * reply with 0.
300 static void queue_release_all_senders(struct event_queue *q)
302 if(q->send)
304 unsigned int i;
305 for(i = q->read; i != q->write; i++)
307 struct thread_entry **spp =
308 &q->send->senders[i & QUEUE_LENGTH_MASK];
310 if(*spp)
312 queue_release_sender(spp, 0);
318 /* Callback to do extra forced removal steps from sender list in addition
319 * to the normal blocking queue removal and priority dis-inherit */
320 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
322 *((struct thread_entry **)thread->retval) = NULL;
323 #ifdef HAVE_WAKEUP_EXT_CB
324 thread->wakeup_ext_cb = NULL;
325 #endif
326 thread->retval = 0;
329 /* Enables queue_send on the specified queue - caller allocates the extra
330 * data structure. Only queues which are taken to be owned by a thread should
331 * enable this however an official owner is not compulsory but must be
332 * specified for priority inheritance to operate.
334 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
335 * messages results in an undefined order of message replies or possible default
336 * replies if two or more waits happen before a reply is done.
338 void queue_enable_queue_send(struct event_queue *q,
339 struct queue_sender_list *send,
340 unsigned int owner_id)
342 int oldlevel = disable_irq_save();
343 corelock_lock(&q->cl);
345 if(send != NULL && q->send == NULL)
347 memset(send, 0, sizeof(*send));
348 #ifdef HAVE_PRIORITY_SCHEDULING
349 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
350 send->blocker.priority = PRIORITY_IDLE;
351 if(owner_id != 0)
353 send->blocker.thread = thread_id_entry(owner_id);
354 q->blocker_p = &send->blocker;
356 #endif
357 q->send = send;
360 corelock_unlock(&q->cl);
361 restore_irq(oldlevel);
363 (void)owner_id;
366 /* Unblock a blocked thread at a given event index */
367 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
368 unsigned int i)
370 if(send)
372 struct thread_entry **spp = &send->senders[i];
374 if(UNLIKELY(*spp))
376 queue_release_sender(spp, 0);
381 /* Perform the auto-reply sequence */
382 static inline void queue_do_auto_reply(struct queue_sender_list *send)
384 if(send && send->curr_sender)
386 /* auto-reply */
387 queue_release_sender(&send->curr_sender, 0);
391 /* Moves waiting thread's refrence from the senders array to the
392 * current_sender which represents the thread waiting for a reponse to the
393 * last message removed from the queue. This also protects the thread from
394 * being bumped due to overflow which would not be a valid action since its
395 * message _is_ being processed at this point. */
396 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
397 unsigned int rd)
399 if(send)
401 struct thread_entry **spp = &send->senders[rd];
403 if(*spp)
405 /* Move thread reference from array to the next thread
406 that queue_reply will release */
407 send->curr_sender = *spp;
408 (*spp)->retval = (intptr_t)spp;
409 *spp = NULL;
411 /* else message was posted asynchronously with queue_post */
414 #else
415 /* Empty macros for when synchoronous sending is not made */
416 #define queue_release_all_senders(q)
417 #define queue_do_unblock_sender(send, i)
418 #define queue_do_auto_reply(send)
419 #define queue_do_fetch_sender(send, rd)
420 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
422 /* Queue must not be available for use during this call */
423 void queue_init(struct event_queue *q, bool register_queue)
425 int oldlevel = disable_irq_save();
427 if(register_queue)
429 corelock_lock(&all_queues.cl);
432 corelock_init(&q->cl);
433 q->queue = NULL;
434 /* What garbage is in write is irrelevant because of the masking design-
435 * any other functions the empty the queue do this as well so that
436 * queue_count and queue_empty return sane values in the case of a
437 * concurrent change without locking inside them. */
438 q->read = q->write;
439 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
440 q->send = NULL; /* No message sending by default */
441 IF_PRIO( q->blocker_p = NULL; )
442 #endif
444 if(register_queue)
446 void **queues = (void **)all_queues.queues;
447 void **p = find_array_ptr(queues, q);
449 if(p - queues >= MAX_NUM_QUEUES)
451 panicf("queue_init->out of queues");
454 if(*p == NULL)
456 /* Add it to the all_queues array */
457 *p = q;
458 corelock_unlock(&all_queues.cl);
462 restore_irq(oldlevel);
465 /* Queue must not be available for use during this call */
466 void queue_delete(struct event_queue *q)
468 int oldlevel = disable_irq_save();
469 corelock_lock(&all_queues.cl);
470 corelock_lock(&q->cl);
472 /* Remove the queue if registered */
473 remove_array_ptr((void **)all_queues.queues, q);
475 corelock_unlock(&all_queues.cl);
477 /* Release thread(s) waiting on queue head */
478 thread_queue_wake(&q->queue);
480 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
481 if(q->send)
483 /* Release threads waiting for replies */
484 queue_release_all_senders(q);
486 /* Reply to any dequeued message waiting for one */
487 queue_do_auto_reply(q->send);
489 q->send = NULL;
490 IF_PRIO( q->blocker_p = NULL; )
492 #endif
494 q->read = q->write;
496 corelock_unlock(&q->cl);
497 restore_irq(oldlevel);
500 /* NOTE: multiple threads waiting on a queue head cannot have a well-
501 defined release order if timeouts are used. If multiple threads must
502 access the queue head, use a dispatcher or queue_wait only. */
503 void queue_wait(struct event_queue *q, struct queue_event *ev)
505 int oldlevel;
506 unsigned int rd;
508 #ifdef HAVE_PRIORITY_SCHEDULING
509 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
510 QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
511 "queue_wait->wrong thread\n");
512 #endif
514 oldlevel = disable_irq_save();
515 corelock_lock(&q->cl);
517 /* auto-reply */
518 queue_do_auto_reply(q->send);
520 while(1)
522 struct thread_entry *current;
524 rd = q->read;
525 if (rd != q->write) /* A waking message could disappear */
526 break;
528 current = thread_id_entry(THREAD_ID_CURRENT);
530 IF_COP( current->obj_cl = &q->cl; )
531 current->bqp = &q->queue;
533 block_thread(current);
535 corelock_unlock(&q->cl);
536 switch_thread();
538 oldlevel = disable_irq_save();
539 corelock_lock(&q->cl);
542 q->read = rd + 1;
543 rd &= QUEUE_LENGTH_MASK;
544 *ev = q->events[rd];
546 /* Get data for a waiting thread if one */
547 queue_do_fetch_sender(q->send, rd);
549 corelock_unlock(&q->cl);
550 restore_irq(oldlevel);
553 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
555 int oldlevel;
556 unsigned int rd, wr;
558 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
559 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
560 QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
561 "queue_wait_w_tmo->wrong thread\n");
562 #endif
564 oldlevel = disable_irq_save();
565 corelock_lock(&q->cl);
567 /* Auto-reply */
568 queue_do_auto_reply(q->send);
570 rd = q->read;
571 wr = q->write;
572 if (rd == wr && ticks > 0)
574 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
576 IF_COP( current->obj_cl = &q->cl; )
577 current->bqp = &q->queue;
579 block_thread_w_tmo(current, ticks);
580 corelock_unlock(&q->cl);
582 switch_thread();
584 oldlevel = disable_irq_save();
585 corelock_lock(&q->cl);
587 rd = q->read;
588 wr = q->write;
591 /* no worry about a removed message here - status is checked inside
592 locks - perhaps verify if timeout or false alarm */
593 if (rd != wr)
595 q->read = rd + 1;
596 rd &= QUEUE_LENGTH_MASK;
597 *ev = q->events[rd];
598 /* Get data for a waiting thread if one */
599 queue_do_fetch_sender(q->send, rd);
601 else
603 ev->id = SYS_TIMEOUT;
606 corelock_unlock(&q->cl);
607 restore_irq(oldlevel);
610 void queue_post(struct event_queue *q, long id, intptr_t data)
612 int oldlevel;
613 unsigned int wr;
615 oldlevel = disable_irq_save();
616 corelock_lock(&q->cl);
618 wr = q->write++ & QUEUE_LENGTH_MASK;
620 q->events[wr].id = id;
621 q->events[wr].data = data;
623 /* overflow protect - unblock any thread waiting at this index */
624 queue_do_unblock_sender(q->send, wr);
626 /* Wakeup a waiting thread if any */
627 wakeup_thread(&q->queue);
629 corelock_unlock(&q->cl);
630 restore_irq(oldlevel);
633 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
634 /* IRQ handlers are not allowed use of this function - we only aim to
635 protect the queue integrity by turning them off. */
636 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
638 int oldlevel;
639 unsigned int wr;
641 oldlevel = disable_irq_save();
642 corelock_lock(&q->cl);
644 wr = q->write++ & QUEUE_LENGTH_MASK;
646 q->events[wr].id = id;
647 q->events[wr].data = data;
649 if(LIKELY(q->send))
651 struct queue_sender_list *send = q->send;
652 struct thread_entry **spp = &send->senders[wr];
653 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
655 if(UNLIKELY(*spp))
657 /* overflow protect - unblock any thread waiting at this index */
658 queue_release_sender(spp, 0);
661 /* Wakeup a waiting thread if any */
662 wakeup_thread(&q->queue);
664 /* Save thread in slot, add to list and wait for reply */
665 *spp = current;
666 IF_COP( current->obj_cl = &q->cl; )
667 IF_PRIO( current->blocker = q->blocker_p; )
668 #ifdef HAVE_WAKEUP_EXT_CB
669 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
670 #endif
671 current->retval = (intptr_t)spp;
672 current->bqp = &send->list;
674 block_thread(current);
676 corelock_unlock(&q->cl);
677 switch_thread();
679 return current->retval;
682 /* Function as queue_post if sending is not enabled */
683 wakeup_thread(&q->queue);
685 corelock_unlock(&q->cl);
686 restore_irq(oldlevel);
688 return 0;
691 #if 0 /* not used now but probably will be later */
692 /* Query if the last message dequeued was added by queue_send or not */
693 bool queue_in_queue_send(struct event_queue *q)
695 bool in_send;
697 #if NUM_CORES > 1
698 int oldlevel = disable_irq_save();
699 corelock_lock(&q->cl);
700 #endif
702 in_send = q->send && q->send->curr_sender;
704 #if NUM_CORES > 1
705 corelock_unlock(&q->cl);
706 restore_irq(oldlevel);
707 #endif
709 return in_send;
711 #endif
713 /* Replies with retval to the last dequeued message sent with queue_send */
714 void queue_reply(struct event_queue *q, intptr_t retval)
716 if(q->send && q->send->curr_sender)
718 struct queue_sender_list *sender;
720 int oldlevel = disable_irq_save();
721 corelock_lock(&q->cl);
723 sender = q->send;
725 /* Double-check locking */
726 if(LIKELY(sender && sender->curr_sender))
727 queue_release_sender(&sender->curr_sender, retval);
729 corelock_unlock(&q->cl);
730 restore_irq(oldlevel);
733 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
735 bool queue_peek(struct event_queue *q, struct queue_event *ev)
737 unsigned int rd;
739 if(q->read == q->write)
740 return false;
742 bool have_msg = false;
744 int oldlevel = disable_irq_save();
745 corelock_lock(&q->cl);
747 rd = q->read;
748 if(rd != q->write)
750 *ev = q->events[rd & QUEUE_LENGTH_MASK];
751 have_msg = true;
754 corelock_unlock(&q->cl);
755 restore_irq(oldlevel);
757 return have_msg;
760 /* Poll queue to see if a message exists - careful in using the result if
761 * queue_remove_from_head is called when messages are posted - possibly use
762 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
763 * unsignals the queue may cause an unwanted block */
764 bool queue_empty(const struct event_queue* q)
766 return ( q->read == q->write );
769 void queue_clear(struct event_queue* q)
771 int oldlevel;
773 oldlevel = disable_irq_save();
774 corelock_lock(&q->cl);
776 /* Release all threads waiting in the queue for a reply -
777 dequeued sent message will be handled by owning thread */
778 queue_release_all_senders(q);
780 q->read = q->write;
782 corelock_unlock(&q->cl);
783 restore_irq(oldlevel);
786 void queue_remove_from_head(struct event_queue *q, long id)
788 int oldlevel;
790 oldlevel = disable_irq_save();
791 corelock_lock(&q->cl);
793 while(q->read != q->write)
795 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
797 if(q->events[rd].id != id)
799 break;
802 /* Release any thread waiting on this message */
803 queue_do_unblock_sender(q->send, rd);
805 q->read++;
808 corelock_unlock(&q->cl);
809 restore_irq(oldlevel);
813 * The number of events waiting in the queue.
815 * @param struct of event_queue
816 * @return number of events in the queue
818 int queue_count(const struct event_queue *q)
820 return q->write - q->read;
823 int queue_broadcast(long id, intptr_t data)
825 struct event_queue **p = all_queues.queues;
826 struct event_queue *q;
828 #if NUM_CORES > 1
829 int oldlevel = disable_irq_save();
830 corelock_lock(&all_queues.cl);
831 #endif
833 for(q = *p; q != NULL; q = *(++p))
835 queue_post(q, id, data);
838 #if NUM_CORES > 1
839 corelock_unlock(&all_queues.cl);
840 restore_irq(oldlevel);
841 #endif
843 return p - all_queues.queues;
846 /****************************************************************************
847 * Simple mutex functions ;)
848 ****************************************************************************/
850 static inline void __attribute__((always_inline))
851 mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
853 #ifdef HAVE_PRIORITY_SCHEDULING
854 mtx->blocker.thread = td;
855 #else
856 mtx->thread = td;
857 #endif
860 static inline struct thread_entry * __attribute__((always_inline))
861 mutex_get_thread(volatile struct mutex *mtx)
863 #ifdef HAVE_PRIORITY_SCHEDULING
864 return mtx->blocker.thread;
865 #else
866 return mtx->thread;
867 #endif
870 /* Initialize a mutex object - call before any use and do not call again once
871 * the object is available to other threads */
872 void mutex_init(struct mutex *m)
874 corelock_init(&m->cl);
875 m->queue = NULL;
876 m->recursion = 0;
877 mutex_set_thread(m, NULL);
878 #ifdef HAVE_PRIORITY_SCHEDULING
879 m->blocker.priority = PRIORITY_IDLE;
880 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
881 m->no_preempt = false;
882 #endif
885 /* Gain ownership of a mutex object or block until it becomes free */
886 void mutex_lock(struct mutex *m)
888 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
890 if(current == mutex_get_thread(m))
892 /* current thread already owns this mutex */
893 m->recursion++;
894 return;
897 /* lock out other cores */
898 corelock_lock(&m->cl);
900 /* must read thread again inside cs (a multiprocessor concern really) */
901 if(LIKELY(mutex_get_thread(m) == NULL))
903 /* lock is open */
904 mutex_set_thread(m, current);
905 corelock_unlock(&m->cl);
906 return;
909 /* block until the lock is open... */
910 IF_COP( current->obj_cl = &m->cl; )
911 IF_PRIO( current->blocker = &m->blocker; )
912 current->bqp = &m->queue;
914 disable_irq();
915 block_thread(current);
917 corelock_unlock(&m->cl);
919 /* ...and turn control over to next thread */
920 switch_thread();
923 /* Release ownership of a mutex object - only owning thread must call this */
924 void mutex_unlock(struct mutex *m)
926 /* unlocker not being the owner is an unlocking violation */
927 KERNEL_ASSERT(mutex_get_thread(m) == thread_id_entry(THREAD_ID_CURRENT),
928 "mutex_unlock->wrong thread (%s != %s)\n",
929 mutex_get_thread(m)->name,
930 thread_id_entry(THREAD_ID_CURRENT)->name);
932 if(m->recursion > 0)
934 /* this thread still owns lock */
935 m->recursion--;
936 return;
939 /* lock out other cores */
940 corelock_lock(&m->cl);
942 /* transfer to next queued thread if any */
943 if(LIKELY(m->queue == NULL))
945 /* no threads waiting - open the lock */
946 mutex_set_thread(m, NULL);
947 corelock_unlock(&m->cl);
948 return;
950 else
952 const int oldlevel = disable_irq_save();
953 /* Tranfer of owning thread is handled in the wakeup protocol
954 * if priorities are enabled otherwise just set it from the
955 * queue head. */
956 IFN_PRIO( mutex_set_thread(m, m->queue); )
957 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
958 restore_irq(oldlevel);
960 corelock_unlock(&m->cl);
962 #ifdef HAVE_PRIORITY_SCHEDULING
963 if((result & THREAD_SWITCH) && !m->no_preempt)
964 switch_thread();
965 #endif
969 /****************************************************************************
970 * Simple semaphore functions ;)
971 ****************************************************************************/
972 #ifdef HAVE_SEMAPHORE_OBJECTS
973 void semaphore_init(struct semaphore *s, int max, int start)
975 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
976 "semaphore_init->inv arg\n");
977 s->queue = NULL;
978 s->max = max;
979 s->count = start;
980 corelock_init(&s->cl);
983 void semaphore_wait(struct semaphore *s)
985 struct thread_entry *current;
987 corelock_lock(&s->cl);
989 if(LIKELY(--s->count >= 0))
991 /* wait satisfied */
992 corelock_unlock(&s->cl);
993 return;
996 /* too many waits - block until dequeued... */
997 current = thread_id_entry(THREAD_ID_CURRENT);
999 IF_COP( current->obj_cl = &s->cl; )
1000 current->bqp = &s->queue;
1002 disable_irq();
1003 block_thread(current);
1005 corelock_unlock(&s->cl);
1007 /* ...and turn control over to next thread */
1008 switch_thread();
1011 void semaphore_release(struct semaphore *s)
1013 IF_PRIO( unsigned int result = THREAD_NONE; )
1015 corelock_lock(&s->cl);
1017 if(s->count < s->max && ++s->count <= 0)
1019 /* there should be threads in this queue */
1020 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1021 /* a thread was queued - wake it up */
1022 int oldlevel = disable_irq_save();
1023 IF_PRIO( result = ) wakeup_thread(&s->queue);
1024 restore_irq(oldlevel);
1027 corelock_unlock(&s->cl);
1029 #ifdef HAVE_PRIORITY_SCHEDULING
1030 if(result & THREAD_SWITCH)
1031 switch_thread();
1032 #endif
1034 #endif /* HAVE_SEMAPHORE_OBJECTS */
1036 #ifdef HAVE_WAKEUP_OBJECTS
1037 /****************************************************************************
1038 * Lightweight IRQ-compatible wakeup object
1041 /* Initialize the wakeup object */
1042 void wakeup_init(struct wakeup *w)
1044 w->queue = NULL;
1045 w->signalled = false;
1046 IF_COP( corelock_init(&w->cl); )
1049 /* Wait for a signal blocking indefinitely or for a specified period */
1050 int wakeup_wait(struct wakeup *w, int timeout)
1052 int ret = OBJ_WAIT_SUCCEEDED; /* Presume success */
1053 int oldlevel = disable_irq_save();
1055 corelock_lock(&w->cl);
1057 if(LIKELY(!w->signalled && timeout != TIMEOUT_NOBLOCK))
1059 struct thread_entry * current = thread_id_entry(THREAD_ID_CURRENT);
1061 IF_COP( current->obj_cl = &w->cl; )
1062 current->bqp = &w->queue;
1064 if (timeout != TIMEOUT_BLOCK)
1065 block_thread_w_tmo(current, timeout);
1066 else
1067 block_thread(current);
1069 corelock_unlock(&w->cl);
1070 switch_thread();
1072 oldlevel = disable_irq_save();
1073 corelock_lock(&w->cl);
1076 if(UNLIKELY(!w->signalled))
1078 /* Timed-out or failed */
1079 ret = (timeout != TIMEOUT_BLOCK) ?
1080 OBJ_WAIT_TIMEDOUT : OBJ_WAIT_FAILED;
1083 w->signalled = false; /* Reset */
1085 corelock_unlock(&w->cl);
1086 restore_irq(oldlevel);
1088 return ret;
1091 /* Signal the thread waiting or leave the signal if the thread hasn't
1092 * waited yet.
1094 * returns THREAD_NONE or THREAD_OK
1096 int wakeup_signal(struct wakeup *w)
1098 int oldlevel = disable_irq_save();
1099 int ret;
1101 corelock_lock(&w->cl);
1103 w->signalled = true;
1104 ret = wakeup_thread(&w->queue);
1106 corelock_unlock(&w->cl);
1107 restore_irq(oldlevel);
1109 return ret;
1111 #endif /* HAVE_WAKEUP_OBJECTS */