Merge branch 'master' into android-test-plugins
[kugel-rb.git] / firmware / kernel.c
blob288ebbbedeb4b82f09fc31f17363f718c99ab579
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #include "thread.h"
26 #include "cpu.h"
27 #include "system.h"
28 #include "panic.h"
29 #include "debug.h"
30 #include "general.h"
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
35 #else
36 #define KERNEL_OBJECT_CHECKS 0
37 #endif
39 #if KERNEL_OBJECT_CHECKS
40 #ifdef SIMULATOR
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
43 #else
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
46 #endif
47 #else
48 #define KERNEL_ASSERT(exp, msg...) ({})
49 #endif
51 #if !defined(CPU_PP) || !defined(BOOTLOADER) || \
52 defined(HAVE_BOOTLOADER_USB_MODE)
53 volatile long current_tick SHAREDDATA_ATTR = 0;
54 #endif
56 /* Unless otherwise defined, do nothing */
57 #ifndef YIELD_KERNEL_HOOK
58 #define YIELD_KERNEL_HOOK() false
59 #endif
60 #ifndef SLEEP_KERNEL_HOOK
61 #define SLEEP_KERNEL_HOOK(ticks) false
62 #endif
64 /* List of tick tasks - final element always NULL for termination */
65 void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
67 /* This array holds all queues that are initiated. It is used for broadcast. */
68 static struct
70 struct event_queue *queues[MAX_NUM_QUEUES+1];
71 #ifdef HAVE_CORELOCK_OBJECT
72 struct corelock cl;
73 #endif
74 } all_queues SHAREDBSS_ATTR;
76 /****************************************************************************
77 * Standard kernel stuff
78 ****************************************************************************/
79 void kernel_init(void)
81 /* Init the threading API */
82 init_threads();
84 /* Other processors will not reach this point in a multicore build.
85 * In a single-core build with multiple cores they fall-through and
86 * sleep in cop_main without returning. */
87 if (CURRENT_CORE == CPU)
89 memset(tick_funcs, 0, sizeof(tick_funcs));
90 memset(&all_queues, 0, sizeof(all_queues));
91 corelock_init(&all_queues.cl);
92 tick_start(1000/HZ);
93 #ifdef KDEV_INIT
94 kernel_device_init();
95 #endif
99 /****************************************************************************
100 * Timer tick - Timer initialization and interrupt handler is defined at
101 * the target level.
102 ****************************************************************************/
103 int tick_add_task(void (*f)(void))
105 int oldlevel = disable_irq_save();
106 void **arr = (void **)tick_funcs;
107 void **p = find_array_ptr(arr, f);
109 /* Add a task if there is room */
110 if(p - arr < MAX_NUM_TICK_TASKS)
112 *p = f; /* If already in list, no problem. */
114 else
116 panicf("Error! tick_add_task(): out of tasks");
119 restore_irq(oldlevel);
120 return 0;
123 int tick_remove_task(void (*f)(void))
125 int oldlevel = disable_irq_save();
126 int rc = remove_array_ptr((void **)tick_funcs, f);
127 restore_irq(oldlevel);
128 return rc;
131 /****************************************************************************
132 * Tick-based interval timers/one-shots - be mindful this is not really
133 * intended for continuous timers but for events that need to run for a short
134 * time and be cancelled without further software intervention.
135 ****************************************************************************/
136 #ifdef INCLUDE_TIMEOUT_API
137 /* list of active timeout events */
138 static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
140 /* timeout tick task - calls event handlers when they expire
141 * Event handlers may alter expiration, callback and data during operation.
143 static void timeout_tick(void)
145 unsigned long tick = current_tick;
146 struct timeout **p = tmo_list;
147 struct timeout *curr;
149 for(curr = *p; curr != NULL; curr = *(++p))
151 int ticks;
153 if(TIME_BEFORE(tick, curr->expires))
154 continue;
156 /* this event has expired - call callback */
157 ticks = curr->callback(curr);
158 if(ticks > 0)
160 curr->expires = tick + ticks; /* reload */
162 else
164 timeout_cancel(curr); /* cancel */
169 /* Cancels a timeout callback - can be called from the ISR */
170 void timeout_cancel(struct timeout *tmo)
172 int oldlevel = disable_irq_save();
173 int rc = remove_array_ptr((void **)tmo_list, tmo);
175 if(rc >= 0 && *tmo_list == NULL)
177 tick_remove_task(timeout_tick); /* Last one - remove task */
180 restore_irq(oldlevel);
183 /* Adds a timeout callback - calling with an active timeout resets the
184 interval - can be called from the ISR */
185 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
186 int ticks, intptr_t data)
188 int oldlevel;
189 void **arr, **p;
191 if(tmo == NULL)
192 return;
194 oldlevel = disable_irq_save();
196 /* See if this one is already registered */
197 arr = (void **)tmo_list;
198 p = find_array_ptr(arr, tmo);
200 if(p - arr < MAX_NUM_TIMEOUTS)
202 /* Vacancy */
203 if(*p == NULL)
205 /* Not present */
206 if(*tmo_list == NULL)
208 tick_add_task(timeout_tick); /* First one - add task */
211 *p = tmo;
214 tmo->callback = callback;
215 tmo->data = data;
216 tmo->expires = current_tick + ticks;
219 restore_irq(oldlevel);
222 #endif /* INCLUDE_TIMEOUT_API */
224 /****************************************************************************
225 * Thread stuff
226 ****************************************************************************/
227 unsigned sleep(unsigned ticks)
229 /* In certain situations, certain bootloaders in particular, a normal
230 * threading call is inappropriate. */
231 if (SLEEP_KERNEL_HOOK(ticks))
232 return 0; /* Handled */
234 disable_irq();
235 sleep_thread(ticks);
236 switch_thread();
237 return 0;
240 void yield(void)
242 /* In certain situations, certain bootloaders in particular, a normal
243 * threading call is inappropriate. */
244 if (YIELD_KERNEL_HOOK())
245 return; /* handled */
247 switch_thread();
250 /****************************************************************************
251 * Queue handling stuff
252 ****************************************************************************/
254 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
255 /****************************************************************************
256 * Sender thread queue structure that aids implementation of priority
257 * inheritance on queues because the send list structure is the same as
258 * for all other kernel objects:
260 * Example state:
261 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
262 * E3 was posted with queue_post
263 * 4 events remain enqueued (E1-E4)
265 * rd wr
266 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
267 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
268 * \/ \/ \/
269 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
270 * q->send->curr_sender: /\
272 * Thread has E0 in its own struct queue_event.
274 ****************************************************************************/
276 /* Puts the specified return value in the waiting thread's return value
277 * and wakes the thread.
279 * A sender should be confirmed to exist before calling which makes it
280 * more efficent to reject the majority of cases that don't need this
281 * called.
283 static void queue_release_sender(struct thread_entry * volatile * sender,
284 intptr_t retval)
286 struct thread_entry *thread = *sender;
288 *sender = NULL; /* Clear slot. */
289 #ifdef HAVE_WAKEUP_EXT_CB
290 thread->wakeup_ext_cb = NULL; /* Clear callback. */
291 #endif
292 thread->retval = retval; /* Assign thread-local return value. */
293 *thread->bqp = thread; /* Move blocking queue head to thread since
294 wakeup_thread wakes the first thread in
295 the list. */
296 wakeup_thread(thread->bqp);
299 /* Releases any waiting threads that are queued with queue_send -
300 * reply with 0.
302 static void queue_release_all_senders(struct event_queue *q)
304 if(q->send)
306 unsigned int i;
307 for(i = q->read; i != q->write; i++)
309 struct thread_entry **spp =
310 &q->send->senders[i & QUEUE_LENGTH_MASK];
312 if(*spp)
314 queue_release_sender(spp, 0);
320 /* Callback to do extra forced removal steps from sender list in addition
321 * to the normal blocking queue removal and priority dis-inherit */
322 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
324 *((struct thread_entry **)thread->retval) = NULL;
325 #ifdef HAVE_WAKEUP_EXT_CB
326 thread->wakeup_ext_cb = NULL;
327 #endif
328 thread->retval = 0;
331 /* Enables queue_send on the specified queue - caller allocates the extra
332 * data structure. Only queues which are taken to be owned by a thread should
333 * enable this however an official owner is not compulsory but must be
334 * specified for priority inheritance to operate.
336 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
337 * messages results in an undefined order of message replies or possible default
338 * replies if two or more waits happen before a reply is done.
340 void queue_enable_queue_send(struct event_queue *q,
341 struct queue_sender_list *send,
342 unsigned int owner_id)
344 int oldlevel = disable_irq_save();
345 corelock_lock(&q->cl);
347 if(send != NULL && q->send == NULL)
349 memset(send, 0, sizeof(*send));
350 #ifdef HAVE_PRIORITY_SCHEDULING
351 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
352 send->blocker.priority = PRIORITY_IDLE;
353 if(owner_id != 0)
355 send->blocker.thread = thread_id_entry(owner_id);
356 q->blocker_p = &send->blocker;
358 #endif
359 q->send = send;
362 corelock_unlock(&q->cl);
363 restore_irq(oldlevel);
365 (void)owner_id;
368 /* Unblock a blocked thread at a given event index */
369 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
370 unsigned int i)
372 if(send)
374 struct thread_entry **spp = &send->senders[i];
376 if(UNLIKELY(*spp))
378 queue_release_sender(spp, 0);
383 /* Perform the auto-reply sequence */
384 static inline void queue_do_auto_reply(struct queue_sender_list *send)
386 if(send && send->curr_sender)
388 /* auto-reply */
389 queue_release_sender(&send->curr_sender, 0);
393 /* Moves waiting thread's refrence from the senders array to the
394 * current_sender which represents the thread waiting for a reponse to the
395 * last message removed from the queue. This also protects the thread from
396 * being bumped due to overflow which would not be a valid action since its
397 * message _is_ being processed at this point. */
398 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
399 unsigned int rd)
401 if(send)
403 struct thread_entry **spp = &send->senders[rd];
405 if(*spp)
407 /* Move thread reference from array to the next thread
408 that queue_reply will release */
409 send->curr_sender = *spp;
410 (*spp)->retval = (intptr_t)spp;
411 *spp = NULL;
413 /* else message was posted asynchronously with queue_post */
416 #else
417 /* Empty macros for when synchoronous sending is not made */
418 #define queue_release_all_senders(q)
419 #define queue_do_unblock_sender(send, i)
420 #define queue_do_auto_reply(send)
421 #define queue_do_fetch_sender(send, rd)
422 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
424 /* Queue must not be available for use during this call */
425 void queue_init(struct event_queue *q, bool register_queue)
427 int oldlevel = disable_irq_save();
429 if(register_queue)
431 corelock_lock(&all_queues.cl);
434 corelock_init(&q->cl);
435 q->queue = NULL;
436 /* What garbage is in write is irrelevant because of the masking design-
437 * any other functions the empty the queue do this as well so that
438 * queue_count and queue_empty return sane values in the case of a
439 * concurrent change without locking inside them. */
440 q->read = q->write;
441 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
442 q->send = NULL; /* No message sending by default */
443 IF_PRIO( q->blocker_p = NULL; )
444 #endif
446 if(register_queue)
448 void **queues = (void **)all_queues.queues;
449 void **p = find_array_ptr(queues, q);
451 if(p - queues >= MAX_NUM_QUEUES)
453 panicf("queue_init->out of queues");
456 if(*p == NULL)
458 /* Add it to the all_queues array */
459 *p = q;
460 corelock_unlock(&all_queues.cl);
464 restore_irq(oldlevel);
467 /* Queue must not be available for use during this call */
468 void queue_delete(struct event_queue *q)
470 int oldlevel = disable_irq_save();
471 corelock_lock(&all_queues.cl);
472 corelock_lock(&q->cl);
474 /* Remove the queue if registered */
475 remove_array_ptr((void **)all_queues.queues, q);
477 corelock_unlock(&all_queues.cl);
479 /* Release thread(s) waiting on queue head */
480 thread_queue_wake(&q->queue);
482 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
483 if(q->send)
485 /* Release threads waiting for replies */
486 queue_release_all_senders(q);
488 /* Reply to any dequeued message waiting for one */
489 queue_do_auto_reply(q->send);
491 q->send = NULL;
492 IF_PRIO( q->blocker_p = NULL; )
494 #endif
496 q->read = q->write;
498 corelock_unlock(&q->cl);
499 restore_irq(oldlevel);
502 /* NOTE: multiple threads waiting on a queue head cannot have a well-
503 defined release order if timeouts are used. If multiple threads must
504 access the queue head, use a dispatcher or queue_wait only. */
505 void queue_wait(struct event_queue *q, struct queue_event *ev)
507 int oldlevel;
508 unsigned int rd;
510 #ifdef HAVE_PRIORITY_SCHEDULING
511 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
512 QUEUE_GET_THREAD(q) == thread_self_entry(),
513 "queue_wait->wrong thread\n");
514 #endif
516 oldlevel = disable_irq_save();
517 corelock_lock(&q->cl);
519 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
520 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
521 queue_do_auto_reply(q->send);
522 #endif
524 while(1)
526 struct thread_entry *current;
528 rd = q->read;
529 if (rd != q->write) /* A waking message could disappear */
530 break;
532 current = thread_self_entry();
534 IF_COP( current->obj_cl = &q->cl; )
535 current->bqp = &q->queue;
537 block_thread(current);
539 corelock_unlock(&q->cl);
540 switch_thread();
542 disable_irq();
543 corelock_lock(&q->cl);
546 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
547 if(ev)
548 #endif
550 q->read = rd + 1;
551 rd &= QUEUE_LENGTH_MASK;
552 *ev = q->events[rd];
554 /* Get data for a waiting thread if one */
555 queue_do_fetch_sender(q->send, rd);
557 /* else just waiting on non-empty */
559 corelock_unlock(&q->cl);
560 restore_irq(oldlevel);
563 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
565 int oldlevel;
566 unsigned int rd, wr;
568 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
569 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
570 QUEUE_GET_THREAD(q) == thread_self_entry(),
571 "queue_wait_w_tmo->wrong thread\n");
572 #endif
574 oldlevel = disable_irq_save();
575 corelock_lock(&q->cl);
577 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
578 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
579 queue_do_auto_reply(q->send);
580 #endif
582 rd = q->read;
583 wr = q->write;
584 if (rd == wr && ticks > 0)
586 struct thread_entry *current = thread_self_entry();
588 IF_COP( current->obj_cl = &q->cl; )
589 current->bqp = &q->queue;
591 block_thread_w_tmo(current, ticks);
592 corelock_unlock(&q->cl);
594 switch_thread();
596 disable_irq();
597 corelock_lock(&q->cl);
599 rd = q->read;
600 wr = q->write;
603 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
604 if(ev)
605 #endif
607 /* no worry about a removed message here - status is checked inside
608 locks - perhaps verify if timeout or false alarm */
609 if (rd != wr)
611 q->read = rd + 1;
612 rd &= QUEUE_LENGTH_MASK;
613 *ev = q->events[rd];
614 /* Get data for a waiting thread if one */
615 queue_do_fetch_sender(q->send, rd);
617 else
619 ev->id = SYS_TIMEOUT;
622 /* else just waiting on non-empty */
624 corelock_unlock(&q->cl);
625 restore_irq(oldlevel);
628 void queue_post(struct event_queue *q, long id, intptr_t data)
630 int oldlevel;
631 unsigned int wr;
633 oldlevel = disable_irq_save();
634 corelock_lock(&q->cl);
636 wr = q->write++ & QUEUE_LENGTH_MASK;
638 KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
639 "queue_post ovf q=%08lX", (long)q);
641 q->events[wr].id = id;
642 q->events[wr].data = data;
644 /* overflow protect - unblock any thread waiting at this index */
645 queue_do_unblock_sender(q->send, wr);
647 /* Wakeup a waiting thread if any */
648 wakeup_thread(&q->queue);
650 corelock_unlock(&q->cl);
651 restore_irq(oldlevel);
654 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
655 /* IRQ handlers are not allowed use of this function - we only aim to
656 protect the queue integrity by turning them off. */
657 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
659 int oldlevel;
660 unsigned int wr;
662 oldlevel = disable_irq_save();
663 corelock_lock(&q->cl);
665 wr = q->write++ & QUEUE_LENGTH_MASK;
667 KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
668 "queue_send ovf q=%08lX", (long)q);
670 q->events[wr].id = id;
671 q->events[wr].data = data;
673 if(LIKELY(q->send))
675 struct queue_sender_list *send = q->send;
676 struct thread_entry **spp = &send->senders[wr];
677 struct thread_entry *current = thread_self_entry();
679 if(UNLIKELY(*spp))
681 /* overflow protect - unblock any thread waiting at this index */
682 queue_release_sender(spp, 0);
685 /* Wakeup a waiting thread if any */
686 wakeup_thread(&q->queue);
688 /* Save thread in slot, add to list and wait for reply */
689 *spp = current;
690 IF_COP( current->obj_cl = &q->cl; )
691 IF_PRIO( current->blocker = q->blocker_p; )
692 #ifdef HAVE_WAKEUP_EXT_CB
693 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
694 #endif
695 current->retval = (intptr_t)spp;
696 current->bqp = &send->list;
698 block_thread(current);
700 corelock_unlock(&q->cl);
701 switch_thread();
703 return current->retval;
706 /* Function as queue_post if sending is not enabled */
707 wakeup_thread(&q->queue);
709 corelock_unlock(&q->cl);
710 restore_irq(oldlevel);
712 return 0;
715 #if 0 /* not used now but probably will be later */
716 /* Query if the last message dequeued was added by queue_send or not */
717 bool queue_in_queue_send(struct event_queue *q)
719 bool in_send;
721 #if NUM_CORES > 1
722 int oldlevel = disable_irq_save();
723 corelock_lock(&q->cl);
724 #endif
726 in_send = q->send && q->send->curr_sender;
728 #if NUM_CORES > 1
729 corelock_unlock(&q->cl);
730 restore_irq(oldlevel);
731 #endif
733 return in_send;
735 #endif
737 /* Replies with retval to the last dequeued message sent with queue_send */
738 void queue_reply(struct event_queue *q, intptr_t retval)
740 if(q->send && q->send->curr_sender)
742 struct queue_sender_list *sender;
744 int oldlevel = disable_irq_save();
745 corelock_lock(&q->cl);
747 sender = q->send;
749 /* Double-check locking */
750 if(LIKELY(sender && sender->curr_sender))
751 queue_release_sender(&sender->curr_sender, retval);
753 corelock_unlock(&q->cl);
754 restore_irq(oldlevel);
757 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
759 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
760 /* Scan the even queue from head to tail, returning any event from the
761 filter list that was found, optionally removing the event. If an
762 event is returned, synchronous events are handled in the same manner as
763 with queue_wait(_w_tmo); if discarded, then as queue_clear.
764 If filters are NULL, any event matches. If filters exist, the default
765 is to search the full queue depth.
766 Earlier filters take precedence.
768 Return true if an event was found, false otherwise. */
769 bool queue_peek_ex(struct event_queue *q, struct queue_event *ev,
770 unsigned int flags, const long (*filters)[2])
772 bool have_msg;
773 unsigned int rd, wr;
774 int oldlevel;
776 if(LIKELY(q->read == q->write))
777 return false; /* Empty: do nothing further */
779 have_msg = false;
781 oldlevel = disable_irq_save();
782 corelock_lock(&q->cl);
784 /* Starting at the head, find first match */
785 for(rd = q->read, wr = q->write; rd != wr; rd++)
787 struct queue_event *e = &q->events[rd & QUEUE_LENGTH_MASK];
789 if(filters)
791 /* Have filters - find the first thing that passes */
792 const long (* f)[2] = filters;
793 const long (* const f_last)[2] =
794 &filters[flags & QPEEK_FILTER_COUNT_MASK];
795 long id = e->id;
799 if(UNLIKELY(id >= (*f)[0] && id <= (*f)[1]))
800 goto passed_filter;
802 while(++f <= f_last);
804 if(LIKELY(!(flags & QPEEK_FILTER_HEAD_ONLY)))
805 continue; /* No match; test next event */
806 else
807 break; /* Only check the head */
809 /* else - anything passes */
811 passed_filter:
813 /* Found a matching event */
814 have_msg = true;
816 if(ev)
817 *ev = *e; /* Caller wants the event */
819 if(flags & QPEEK_REMOVE_EVENTS)
821 /* Do event removal */
822 unsigned int r = q->read;
823 q->read = r + 1; /* Advance head */
825 if(ev)
827 /* Auto-reply */
828 queue_do_auto_reply(q->send);
829 /* Get the thread waiting for reply, if any */
830 queue_do_fetch_sender(q->send, rd & QUEUE_LENGTH_MASK);
832 else
834 /* Release any thread waiting on this message */
835 queue_do_unblock_sender(q->send, rd & QUEUE_LENGTH_MASK);
838 /* Slide messages forward into the gap if not at the head */
839 while(rd != r)
841 unsigned int dst = rd & QUEUE_LENGTH_MASK;
842 unsigned int src = --rd & QUEUE_LENGTH_MASK;
844 q->events[dst] = q->events[src];
845 /* Keep sender wait list in sync */
846 if(q->send)
847 q->send->senders[dst] = q->send->senders[src];
851 break;
854 corelock_unlock(&q->cl);
855 restore_irq(oldlevel);
857 return have_msg;
860 bool queue_peek(struct event_queue *q, struct queue_event *ev)
862 return queue_peek_ex(q, ev, 0, NULL);
865 void queue_remove_from_head(struct event_queue *q, long id)
867 const long f[2] = { id, id };
868 while (queue_peek_ex(q, NULL,
869 QPEEK_FILTER_HEAD_ONLY | QPEEK_REMOVE_EVENTS, &f));
871 #else /* !HAVE_EXTENDED_MESSAGING_AND_NAME */
872 /* The more powerful routines aren't required */
873 bool queue_peek(struct event_queue *q, struct queue_event *ev)
875 unsigned int rd;
877 if(q->read == q->write)
878 return false;
880 bool have_msg = false;
882 int oldlevel = disable_irq_save();
883 corelock_lock(&q->cl);
885 rd = q->read;
886 if(rd != q->write)
888 *ev = q->events[rd & QUEUE_LENGTH_MASK];
889 have_msg = true;
892 corelock_unlock(&q->cl);
893 restore_irq(oldlevel);
895 return have_msg;
898 void queue_remove_from_head(struct event_queue *q, long id)
900 int oldlevel;
902 oldlevel = disable_irq_save();
903 corelock_lock(&q->cl);
905 while(q->read != q->write)
907 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
909 if(q->events[rd].id != id)
911 break;
914 /* Release any thread waiting on this message */
915 queue_do_unblock_sender(q->send, rd);
917 q->read++;
920 corelock_unlock(&q->cl);
921 restore_irq(oldlevel);
923 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
925 /* Poll queue to see if a message exists - careful in using the result if
926 * queue_remove_from_head is called when messages are posted - possibly use
927 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
928 * unsignals the queue may cause an unwanted block */
929 bool queue_empty(const struct event_queue* q)
931 return ( q->read == q->write );
934 void queue_clear(struct event_queue* q)
936 int oldlevel;
938 oldlevel = disable_irq_save();
939 corelock_lock(&q->cl);
941 /* Release all threads waiting in the queue for a reply -
942 dequeued sent message will be handled by owning thread */
943 queue_release_all_senders(q);
945 q->read = q->write;
947 corelock_unlock(&q->cl);
948 restore_irq(oldlevel);
952 * The number of events waiting in the queue.
954 * @param struct of event_queue
955 * @return number of events in the queue
957 int queue_count(const struct event_queue *q)
959 return q->write - q->read;
962 int queue_broadcast(long id, intptr_t data)
964 struct event_queue **p = all_queues.queues;
965 struct event_queue *q;
967 #if NUM_CORES > 1
968 int oldlevel = disable_irq_save();
969 corelock_lock(&all_queues.cl);
970 #endif
972 for(q = *p; q != NULL; q = *(++p))
974 queue_post(q, id, data);
977 #if NUM_CORES > 1
978 corelock_unlock(&all_queues.cl);
979 restore_irq(oldlevel);
980 #endif
982 return p - all_queues.queues;
985 /****************************************************************************
986 * Simple mutex functions ;)
987 ****************************************************************************/
989 static inline void __attribute__((always_inline))
990 mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
992 #ifdef HAVE_PRIORITY_SCHEDULING
993 mtx->blocker.thread = td;
994 #else
995 mtx->thread = td;
996 #endif
999 static inline struct thread_entry * __attribute__((always_inline))
1000 mutex_get_thread(volatile struct mutex *mtx)
1002 #ifdef HAVE_PRIORITY_SCHEDULING
1003 return mtx->blocker.thread;
1004 #else
1005 return mtx->thread;
1006 #endif
1009 /* Initialize a mutex object - call before any use and do not call again once
1010 * the object is available to other threads */
1011 void mutex_init(struct mutex *m)
1013 corelock_init(&m->cl);
1014 m->queue = NULL;
1015 m->recursion = 0;
1016 mutex_set_thread(m, NULL);
1017 #ifdef HAVE_PRIORITY_SCHEDULING
1018 m->blocker.priority = PRIORITY_IDLE;
1019 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
1020 m->no_preempt = false;
1021 #endif
1024 /* Gain ownership of a mutex object or block until it becomes free */
1025 void mutex_lock(struct mutex *m)
1027 struct thread_entry *current = thread_self_entry();
1029 if(current == mutex_get_thread(m))
1031 /* current thread already owns this mutex */
1032 m->recursion++;
1033 return;
1036 /* lock out other cores */
1037 corelock_lock(&m->cl);
1039 /* must read thread again inside cs (a multiprocessor concern really) */
1040 if(LIKELY(mutex_get_thread(m) == NULL))
1042 /* lock is open */
1043 mutex_set_thread(m, current);
1044 corelock_unlock(&m->cl);
1045 return;
1048 /* block until the lock is open... */
1049 IF_COP( current->obj_cl = &m->cl; )
1050 IF_PRIO( current->blocker = &m->blocker; )
1051 current->bqp = &m->queue;
1053 disable_irq();
1054 block_thread(current);
1056 corelock_unlock(&m->cl);
1058 /* ...and turn control over to next thread */
1059 switch_thread();
1062 /* Release ownership of a mutex object - only owning thread must call this */
1063 void mutex_unlock(struct mutex *m)
1065 /* unlocker not being the owner is an unlocking violation */
1066 KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
1067 "mutex_unlock->wrong thread (%s != %s)\n",
1068 mutex_get_thread(m)->name,
1069 thread_self_entry()->name);
1071 if(m->recursion > 0)
1073 /* this thread still owns lock */
1074 m->recursion--;
1075 return;
1078 /* lock out other cores */
1079 corelock_lock(&m->cl);
1081 /* transfer to next queued thread if any */
1082 if(LIKELY(m->queue == NULL))
1084 /* no threads waiting - open the lock */
1085 mutex_set_thread(m, NULL);
1086 corelock_unlock(&m->cl);
1087 return;
1089 else
1091 const int oldlevel = disable_irq_save();
1092 /* Tranfer of owning thread is handled in the wakeup protocol
1093 * if priorities are enabled otherwise just set it from the
1094 * queue head. */
1095 IFN_PRIO( mutex_set_thread(m, m->queue); )
1096 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
1097 restore_irq(oldlevel);
1099 corelock_unlock(&m->cl);
1101 #ifdef HAVE_PRIORITY_SCHEDULING
1102 if((result & THREAD_SWITCH) && !m->no_preempt)
1103 switch_thread();
1104 #endif
1108 /****************************************************************************
1109 * Simple semaphore functions ;)
1110 ****************************************************************************/
1111 #ifdef HAVE_SEMAPHORE_OBJECTS
1112 /* Initialize the semaphore object.
1113 * max = maximum up count the semaphore may assume (max >= 1)
1114 * start = initial count of semaphore (0 <= count <= max) */
1115 void semaphore_init(struct semaphore *s, int max, int start)
1117 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1118 "semaphore_init->inv arg\n");
1119 s->queue = NULL;
1120 s->max = max;
1121 s->count = start;
1122 corelock_init(&s->cl);
1125 /* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
1126 * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
1127 * safely be used in an ISR. */
1128 int semaphore_wait(struct semaphore *s, int timeout)
1130 int ret;
1131 int oldlevel;
1132 int count;
1134 oldlevel = disable_irq_save();
1135 corelock_lock(&s->cl);
1137 count = s->count;
1139 if(LIKELY(count > 0))
1141 /* count is not zero; down it */
1142 s->count = count - 1;
1143 ret = OBJ_WAIT_SUCCEEDED;
1145 else if(timeout == 0)
1147 /* just polling it */
1148 ret = OBJ_WAIT_TIMEDOUT;
1150 else
1152 /* too many waits - block until count is upped... */
1153 struct thread_entry * current = thread_self_entry();
1154 IF_COP( current->obj_cl = &s->cl; )
1155 current->bqp = &s->queue;
1156 /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
1157 * explicit in semaphore_release */
1158 current->retval = OBJ_WAIT_TIMEDOUT;
1160 if(timeout > 0)
1161 block_thread_w_tmo(current, timeout); /* ...or timed out... */
1162 else
1163 block_thread(current); /* -timeout = infinite */
1165 corelock_unlock(&s->cl);
1167 /* ...and turn control over to next thread */
1168 switch_thread();
1170 return current->retval;
1173 corelock_unlock(&s->cl);
1174 restore_irq(oldlevel);
1176 return ret;
1179 /* Up the semaphore's count and release any thread waiting at the head of the
1180 * queue. The count is saturated to the value of the 'max' parameter specified
1181 * in 'semaphore_init'. */
1182 void semaphore_release(struct semaphore *s)
1184 IF_PRIO( unsigned int result = THREAD_NONE; )
1185 int oldlevel;
1187 oldlevel = disable_irq_save();
1188 corelock_lock(&s->cl);
1190 if(LIKELY(s->queue != NULL))
1192 /* a thread was queued - wake it up and keep count at 0 */
1193 KERNEL_ASSERT(s->count == 0,
1194 "semaphore_release->threads queued but count=%d!\n", s->count);
1195 s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
1196 IF_PRIO( result = ) wakeup_thread(&s->queue);
1198 else
1200 int count = s->count;
1201 if(count < s->max)
1203 /* nothing waiting - up it */
1204 s->count = count + 1;
1208 corelock_unlock(&s->cl);
1209 restore_irq(oldlevel);
1211 #if defined(HAVE_PRIORITY_SCHEDULING) && defined(irq_enabled_checkval)
1212 /* No thread switch if IRQ disabled - it's probably called via ISR.
1213 * switch_thread would as well enable them anyway. */
1214 if((result & THREAD_SWITCH) && irq_enabled_checkval(oldlevel))
1215 switch_thread();
1216 #endif
1218 #endif /* HAVE_SEMAPHORE_OBJECTS */