skin tags: fix the id3 track/disc numbers in conditionals
[maemo-rb.git] / firmware / kernel.c
bloba264ceb9e4fcfcbc2e94e56cbd3df5a95c624912
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #include "thread.h"
26 #include "cpu.h"
27 #include "system.h"
28 #include "panic.h"
29 #include "debug.h"
30 #include "general.h"
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
35 #else
36 #define KERNEL_OBJECT_CHECKS 0
37 #endif
39 #if KERNEL_OBJECT_CHECKS
40 #ifdef SIMULATOR
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
43 #else
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
46 #endif
47 #else
48 #define KERNEL_ASSERT(exp, msg...) ({})
49 #endif
51 #if !defined(CPU_PP) || !defined(BOOTLOADER) || \
52 defined(HAVE_BOOTLOADER_USB_MODE)
53 volatile long current_tick SHAREDDATA_ATTR = 0;
54 #endif
56 /* Unless otherwise defined, do nothing */
57 #ifndef YIELD_KERNEL_HOOK
58 #define YIELD_KERNEL_HOOK() false
59 #endif
60 #ifndef SLEEP_KERNEL_HOOK
61 #define SLEEP_KERNEL_HOOK(ticks) false
62 #endif
64 /* List of tick tasks - final element always NULL for termination */
65 void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
67 /* This array holds all queues that are initiated. It is used for broadcast. */
68 static struct
70 struct event_queue *queues[MAX_NUM_QUEUES+1];
71 #ifdef HAVE_CORELOCK_OBJECT
72 struct corelock cl;
73 #endif
74 } all_queues SHAREDBSS_ATTR;
76 /****************************************************************************
77 * Standard kernel stuff
78 ****************************************************************************/
79 void kernel_init(void)
81 /* Init the threading API */
82 init_threads();
84 /* Other processors will not reach this point in a multicore build.
85 * In a single-core build with multiple cores they fall-through and
86 * sleep in cop_main without returning. */
87 if (CURRENT_CORE == CPU)
89 memset(tick_funcs, 0, sizeof(tick_funcs));
90 memset(&all_queues, 0, sizeof(all_queues));
91 corelock_init(&all_queues.cl);
92 tick_start(1000/HZ);
93 #ifdef KDEV_INIT
94 kernel_device_init();
95 #endif
99 /****************************************************************************
100 * Timer tick - Timer initialization and interrupt handler is defined at
101 * the target level.
102 ****************************************************************************/
103 int tick_add_task(void (*f)(void))
105 int oldlevel = disable_irq_save();
106 void **arr = (void **)tick_funcs;
107 void **p = find_array_ptr(arr, f);
109 /* Add a task if there is room */
110 if(p - arr < MAX_NUM_TICK_TASKS)
112 *p = f; /* If already in list, no problem. */
114 else
116 panicf("Error! tick_add_task(): out of tasks");
119 restore_irq(oldlevel);
120 return 0;
123 int tick_remove_task(void (*f)(void))
125 int oldlevel = disable_irq_save();
126 int rc = remove_array_ptr((void **)tick_funcs, f);
127 restore_irq(oldlevel);
128 return rc;
131 /****************************************************************************
132 * Tick-based interval timers/one-shots - be mindful this is not really
133 * intended for continuous timers but for events that need to run for a short
134 * time and be cancelled without further software intervention.
135 ****************************************************************************/
136 #ifdef INCLUDE_TIMEOUT_API
137 /* list of active timeout events */
138 static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
140 /* timeout tick task - calls event handlers when they expire
141 * Event handlers may alter expiration, callback and data during operation.
143 static void timeout_tick(void)
145 unsigned long tick = current_tick;
146 struct timeout **p = tmo_list;
147 struct timeout *curr;
149 for(curr = *p; curr != NULL; curr = *(++p))
151 int ticks;
153 if(TIME_BEFORE(tick, curr->expires))
154 continue;
156 /* this event has expired - call callback */
157 ticks = curr->callback(curr);
158 if(ticks > 0)
160 curr->expires = tick + ticks; /* reload */
162 else
164 timeout_cancel(curr); /* cancel */
169 /* Cancels a timeout callback - can be called from the ISR */
170 void timeout_cancel(struct timeout *tmo)
172 int oldlevel = disable_irq_save();
173 int rc = remove_array_ptr((void **)tmo_list, tmo);
175 if(rc >= 0 && *tmo_list == NULL)
177 tick_remove_task(timeout_tick); /* Last one - remove task */
180 restore_irq(oldlevel);
183 /* Adds a timeout callback - calling with an active timeout resets the
184 interval - can be called from the ISR */
185 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
186 int ticks, intptr_t data)
188 int oldlevel;
189 void **arr, **p;
191 if(tmo == NULL)
192 return;
194 oldlevel = disable_irq_save();
196 /* See if this one is already registered */
197 arr = (void **)tmo_list;
198 p = find_array_ptr(arr, tmo);
200 if(p - arr < MAX_NUM_TIMEOUTS)
202 /* Vacancy */
203 if(*p == NULL)
205 /* Not present */
206 if(*tmo_list == NULL)
208 tick_add_task(timeout_tick); /* First one - add task */
211 *p = tmo;
214 tmo->callback = callback;
215 tmo->data = data;
216 tmo->expires = current_tick + ticks;
219 restore_irq(oldlevel);
222 #endif /* INCLUDE_TIMEOUT_API */
224 /****************************************************************************
225 * Thread stuff
226 ****************************************************************************/
228 /* Suspends a thread's execution for at least the specified number of ticks.
229 * May result in CPU core entering wait-for-interrupt mode if no other thread
230 * may be scheduled.
232 * NOTE: sleep(0) sleeps until the end of the current tick
233 * sleep(n) that doesn't result in rescheduling:
234 * n <= ticks suspended < n + 1
235 * n to n+1 is a lower bound. Other factors may affect the actual time
236 * a thread is suspended before it runs again.
238 unsigned sleep(unsigned ticks)
240 /* In certain situations, certain bootloaders in particular, a normal
241 * threading call is inappropriate. */
242 if (SLEEP_KERNEL_HOOK(ticks))
243 return 0; /* Handled */
245 disable_irq();
246 sleep_thread(ticks);
247 switch_thread();
248 return 0;
251 /* Elects another thread to run or, if no other thread may be made ready to
252 * run, immediately returns control back to the calling thread.
254 void yield(void)
256 /* In certain situations, certain bootloaders in particular, a normal
257 * threading call is inappropriate. */
258 if (YIELD_KERNEL_HOOK())
259 return; /* handled */
261 switch_thread();
264 /****************************************************************************
265 * Queue handling stuff
266 ****************************************************************************/
268 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
269 /****************************************************************************
270 * Sender thread queue structure that aids implementation of priority
271 * inheritance on queues because the send list structure is the same as
272 * for all other kernel objects:
274 * Example state:
275 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
276 * E3 was posted with queue_post
277 * 4 events remain enqueued (E1-E4)
279 * rd wr
280 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
281 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
282 * \/ \/ \/
283 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
284 * q->send->curr_sender: /\
286 * Thread has E0 in its own struct queue_event.
288 ****************************************************************************/
290 /* Puts the specified return value in the waiting thread's return value
291 * and wakes the thread.
293 * A sender should be confirmed to exist before calling which makes it
294 * more efficent to reject the majority of cases that don't need this
295 * called.
297 static void queue_release_sender(struct thread_entry * volatile * sender,
298 intptr_t retval)
300 struct thread_entry *thread = *sender;
302 *sender = NULL; /* Clear slot. */
303 #ifdef HAVE_WAKEUP_EXT_CB
304 thread->wakeup_ext_cb = NULL; /* Clear callback. */
305 #endif
306 thread->retval = retval; /* Assign thread-local return value. */
307 *thread->bqp = thread; /* Move blocking queue head to thread since
308 wakeup_thread wakes the first thread in
309 the list. */
310 wakeup_thread(thread->bqp);
313 /* Releases any waiting threads that are queued with queue_send -
314 * reply with 0.
316 static void queue_release_all_senders(struct event_queue *q)
318 if(q->send)
320 unsigned int i;
321 for(i = q->read; i != q->write; i++)
323 struct thread_entry **spp =
324 &q->send->senders[i & QUEUE_LENGTH_MASK];
326 if(*spp)
328 queue_release_sender(spp, 0);
334 /* Callback to do extra forced removal steps from sender list in addition
335 * to the normal blocking queue removal and priority dis-inherit */
336 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
338 *((struct thread_entry **)thread->retval) = NULL;
339 #ifdef HAVE_WAKEUP_EXT_CB
340 thread->wakeup_ext_cb = NULL;
341 #endif
342 thread->retval = 0;
345 /* Enables queue_send on the specified queue - caller allocates the extra
346 * data structure. Only queues which are taken to be owned by a thread should
347 * enable this however an official owner is not compulsory but must be
348 * specified for priority inheritance to operate.
350 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
351 * messages results in an undefined order of message replies or possible default
352 * replies if two or more waits happen before a reply is done.
354 void queue_enable_queue_send(struct event_queue *q,
355 struct queue_sender_list *send,
356 unsigned int owner_id)
358 int oldlevel = disable_irq_save();
359 corelock_lock(&q->cl);
361 if(send != NULL && q->send == NULL)
363 memset(send, 0, sizeof(*send));
364 #ifdef HAVE_PRIORITY_SCHEDULING
365 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
366 send->blocker.priority = PRIORITY_IDLE;
367 if(owner_id != 0)
369 send->blocker.thread = thread_id_entry(owner_id);
370 q->blocker_p = &send->blocker;
372 #endif
373 q->send = send;
376 corelock_unlock(&q->cl);
377 restore_irq(oldlevel);
379 (void)owner_id;
382 /* Unblock a blocked thread at a given event index */
383 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
384 unsigned int i)
386 if(send)
388 struct thread_entry **spp = &send->senders[i];
390 if(UNLIKELY(*spp))
392 queue_release_sender(spp, 0);
397 /* Perform the auto-reply sequence */
398 static inline void queue_do_auto_reply(struct queue_sender_list *send)
400 if(send && send->curr_sender)
402 /* auto-reply */
403 queue_release_sender(&send->curr_sender, 0);
407 /* Moves waiting thread's refrence from the senders array to the
408 * current_sender which represents the thread waiting for a reponse to the
409 * last message removed from the queue. This also protects the thread from
410 * being bumped due to overflow which would not be a valid action since its
411 * message _is_ being processed at this point. */
412 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
413 unsigned int rd)
415 if(send)
417 struct thread_entry **spp = &send->senders[rd];
419 if(*spp)
421 /* Move thread reference from array to the next thread
422 that queue_reply will release */
423 send->curr_sender = *spp;
424 (*spp)->retval = (intptr_t)spp;
425 *spp = NULL;
427 /* else message was posted asynchronously with queue_post */
430 #else
431 /* Empty macros for when synchoronous sending is not made */
432 #define queue_release_all_senders(q)
433 #define queue_do_unblock_sender(send, i)
434 #define queue_do_auto_reply(send)
435 #define queue_do_fetch_sender(send, rd)
436 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
438 /* Queue must not be available for use during this call */
439 void queue_init(struct event_queue *q, bool register_queue)
441 int oldlevel = disable_irq_save();
443 if(register_queue)
445 corelock_lock(&all_queues.cl);
448 corelock_init(&q->cl);
449 q->queue = NULL;
450 /* What garbage is in write is irrelevant because of the masking design-
451 * any other functions the empty the queue do this as well so that
452 * queue_count and queue_empty return sane values in the case of a
453 * concurrent change without locking inside them. */
454 q->read = q->write;
455 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
456 q->send = NULL; /* No message sending by default */
457 IF_PRIO( q->blocker_p = NULL; )
458 #endif
460 if(register_queue)
462 void **queues = (void **)all_queues.queues;
463 void **p = find_array_ptr(queues, q);
465 if(p - queues >= MAX_NUM_QUEUES)
467 panicf("queue_init->out of queues");
470 if(*p == NULL)
472 /* Add it to the all_queues array */
473 *p = q;
474 corelock_unlock(&all_queues.cl);
478 restore_irq(oldlevel);
481 /* Queue must not be available for use during this call */
482 void queue_delete(struct event_queue *q)
484 int oldlevel = disable_irq_save();
485 corelock_lock(&all_queues.cl);
486 corelock_lock(&q->cl);
488 /* Remove the queue if registered */
489 remove_array_ptr((void **)all_queues.queues, q);
491 corelock_unlock(&all_queues.cl);
493 /* Release thread(s) waiting on queue head */
494 thread_queue_wake(&q->queue);
496 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
497 if(q->send)
499 /* Release threads waiting for replies */
500 queue_release_all_senders(q);
502 /* Reply to any dequeued message waiting for one */
503 queue_do_auto_reply(q->send);
505 q->send = NULL;
506 IF_PRIO( q->blocker_p = NULL; )
508 #endif
510 q->read = q->write;
512 corelock_unlock(&q->cl);
513 restore_irq(oldlevel);
516 /* NOTE: multiple threads waiting on a queue head cannot have a well-
517 defined release order if timeouts are used. If multiple threads must
518 access the queue head, use a dispatcher or queue_wait only. */
519 void queue_wait(struct event_queue *q, struct queue_event *ev)
521 int oldlevel;
522 unsigned int rd;
524 #ifdef HAVE_PRIORITY_SCHEDULING
525 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
526 QUEUE_GET_THREAD(q) == thread_self_entry(),
527 "queue_wait->wrong thread\n");
528 #endif
530 oldlevel = disable_irq_save();
531 corelock_lock(&q->cl);
533 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
534 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
535 queue_do_auto_reply(q->send);
536 #endif
538 while(1)
540 struct thread_entry *current;
542 rd = q->read;
543 if (rd != q->write) /* A waking message could disappear */
544 break;
546 current = thread_self_entry();
548 IF_COP( current->obj_cl = &q->cl; )
549 current->bqp = &q->queue;
551 block_thread(current);
553 corelock_unlock(&q->cl);
554 switch_thread();
556 disable_irq();
557 corelock_lock(&q->cl);
560 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
561 if(ev)
562 #endif
564 q->read = rd + 1;
565 rd &= QUEUE_LENGTH_MASK;
566 *ev = q->events[rd];
568 /* Get data for a waiting thread if one */
569 queue_do_fetch_sender(q->send, rd);
571 /* else just waiting on non-empty */
573 corelock_unlock(&q->cl);
574 restore_irq(oldlevel);
577 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
579 int oldlevel;
580 unsigned int rd, wr;
582 /* this function works only with a positive number (or zero) of ticks */
583 if (ticks == TIMEOUT_BLOCK)
585 queue_wait(q, ev);
586 return;
589 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
590 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
591 QUEUE_GET_THREAD(q) == thread_self_entry(),
592 "queue_wait_w_tmo->wrong thread\n");
593 #endif
595 oldlevel = disable_irq_save();
596 corelock_lock(&q->cl);
598 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
599 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
600 queue_do_auto_reply(q->send);
601 #endif
603 rd = q->read;
604 wr = q->write;
605 if (rd == wr && ticks > 0)
607 struct thread_entry *current = thread_self_entry();
609 IF_COP( current->obj_cl = &q->cl; )
610 current->bqp = &q->queue;
612 block_thread_w_tmo(current, ticks);
613 corelock_unlock(&q->cl);
615 switch_thread();
617 disable_irq();
618 corelock_lock(&q->cl);
620 rd = q->read;
621 wr = q->write;
624 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
625 if(ev)
626 #endif
628 /* no worry about a removed message here - status is checked inside
629 locks - perhaps verify if timeout or false alarm */
630 if (rd != wr)
632 q->read = rd + 1;
633 rd &= QUEUE_LENGTH_MASK;
634 *ev = q->events[rd];
635 /* Get data for a waiting thread if one */
636 queue_do_fetch_sender(q->send, rd);
638 else
640 ev->id = SYS_TIMEOUT;
643 /* else just waiting on non-empty */
645 corelock_unlock(&q->cl);
646 restore_irq(oldlevel);
649 void queue_post(struct event_queue *q, long id, intptr_t data)
651 int oldlevel;
652 unsigned int wr;
654 oldlevel = disable_irq_save();
655 corelock_lock(&q->cl);
657 wr = q->write++ & QUEUE_LENGTH_MASK;
659 KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
660 "queue_post ovf q=%08lX", (long)q);
662 q->events[wr].id = id;
663 q->events[wr].data = data;
665 /* overflow protect - unblock any thread waiting at this index */
666 queue_do_unblock_sender(q->send, wr);
668 /* Wakeup a waiting thread if any */
669 wakeup_thread(&q->queue);
671 corelock_unlock(&q->cl);
672 restore_irq(oldlevel);
675 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
676 /* IRQ handlers are not allowed use of this function - we only aim to
677 protect the queue integrity by turning them off. */
678 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
680 int oldlevel;
681 unsigned int wr;
683 oldlevel = disable_irq_save();
684 corelock_lock(&q->cl);
686 wr = q->write++ & QUEUE_LENGTH_MASK;
688 KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
689 "queue_send ovf q=%08lX", (long)q);
691 q->events[wr].id = id;
692 q->events[wr].data = data;
694 if(LIKELY(q->send))
696 struct queue_sender_list *send = q->send;
697 struct thread_entry **spp = &send->senders[wr];
698 struct thread_entry *current = thread_self_entry();
700 if(UNLIKELY(*spp))
702 /* overflow protect - unblock any thread waiting at this index */
703 queue_release_sender(spp, 0);
706 /* Wakeup a waiting thread if any */
707 wakeup_thread(&q->queue);
709 /* Save thread in slot, add to list and wait for reply */
710 *spp = current;
711 IF_COP( current->obj_cl = &q->cl; )
712 IF_PRIO( current->blocker = q->blocker_p; )
713 #ifdef HAVE_WAKEUP_EXT_CB
714 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
715 #endif
716 current->retval = (intptr_t)spp;
717 current->bqp = &send->list;
719 block_thread(current);
721 corelock_unlock(&q->cl);
722 switch_thread();
724 return current->retval;
727 /* Function as queue_post if sending is not enabled */
728 wakeup_thread(&q->queue);
730 corelock_unlock(&q->cl);
731 restore_irq(oldlevel);
733 return 0;
736 #if 0 /* not used now but probably will be later */
737 /* Query if the last message dequeued was added by queue_send or not */
738 bool queue_in_queue_send(struct event_queue *q)
740 bool in_send;
742 #if NUM_CORES > 1
743 int oldlevel = disable_irq_save();
744 corelock_lock(&q->cl);
745 #endif
747 in_send = q->send && q->send->curr_sender;
749 #if NUM_CORES > 1
750 corelock_unlock(&q->cl);
751 restore_irq(oldlevel);
752 #endif
754 return in_send;
756 #endif
758 /* Replies with retval to the last dequeued message sent with queue_send */
759 void queue_reply(struct event_queue *q, intptr_t retval)
761 if(q->send && q->send->curr_sender)
763 struct queue_sender_list *sender;
765 int oldlevel = disable_irq_save();
766 corelock_lock(&q->cl);
768 sender = q->send;
770 /* Double-check locking */
771 if(LIKELY(sender && sender->curr_sender))
772 queue_release_sender(&sender->curr_sender, retval);
774 corelock_unlock(&q->cl);
775 restore_irq(oldlevel);
778 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
780 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
781 /* Scan the even queue from head to tail, returning any event from the
782 filter list that was found, optionally removing the event. If an
783 event is returned, synchronous events are handled in the same manner as
784 with queue_wait(_w_tmo); if discarded, then as queue_clear.
785 If filters are NULL, any event matches. If filters exist, the default
786 is to search the full queue depth.
787 Earlier filters take precedence.
789 Return true if an event was found, false otherwise. */
790 bool queue_peek_ex(struct event_queue *q, struct queue_event *ev,
791 unsigned int flags, const long (*filters)[2])
793 bool have_msg;
794 unsigned int rd, wr;
795 int oldlevel;
797 if(LIKELY(q->read == q->write))
798 return false; /* Empty: do nothing further */
800 have_msg = false;
802 oldlevel = disable_irq_save();
803 corelock_lock(&q->cl);
805 /* Starting at the head, find first match */
806 for(rd = q->read, wr = q->write; rd != wr; rd++)
808 struct queue_event *e = &q->events[rd & QUEUE_LENGTH_MASK];
810 if(filters)
812 /* Have filters - find the first thing that passes */
813 const long (* f)[2] = filters;
814 const long (* const f_last)[2] =
815 &filters[flags & QPEEK_FILTER_COUNT_MASK];
816 long id = e->id;
820 if(UNLIKELY(id >= (*f)[0] && id <= (*f)[1]))
821 goto passed_filter;
823 while(++f <= f_last);
825 if(LIKELY(!(flags & QPEEK_FILTER_HEAD_ONLY)))
826 continue; /* No match; test next event */
827 else
828 break; /* Only check the head */
830 /* else - anything passes */
832 passed_filter:
834 /* Found a matching event */
835 have_msg = true;
837 if(ev)
838 *ev = *e; /* Caller wants the event */
840 if(flags & QPEEK_REMOVE_EVENTS)
842 /* Do event removal */
843 unsigned int r = q->read;
844 q->read = r + 1; /* Advance head */
846 if(ev)
848 /* Auto-reply */
849 queue_do_auto_reply(q->send);
850 /* Get the thread waiting for reply, if any */
851 queue_do_fetch_sender(q->send, rd & QUEUE_LENGTH_MASK);
853 else
855 /* Release any thread waiting on this message */
856 queue_do_unblock_sender(q->send, rd & QUEUE_LENGTH_MASK);
859 /* Slide messages forward into the gap if not at the head */
860 while(rd != r)
862 unsigned int dst = rd & QUEUE_LENGTH_MASK;
863 unsigned int src = --rd & QUEUE_LENGTH_MASK;
865 q->events[dst] = q->events[src];
866 /* Keep sender wait list in sync */
867 if(q->send)
868 q->send->senders[dst] = q->send->senders[src];
872 break;
875 corelock_unlock(&q->cl);
876 restore_irq(oldlevel);
878 return have_msg;
881 bool queue_peek(struct event_queue *q, struct queue_event *ev)
883 return queue_peek_ex(q, ev, 0, NULL);
886 void queue_remove_from_head(struct event_queue *q, long id)
888 const long f[2] = { id, id };
889 while (queue_peek_ex(q, NULL,
890 QPEEK_FILTER_HEAD_ONLY | QPEEK_REMOVE_EVENTS, &f));
892 #else /* !HAVE_EXTENDED_MESSAGING_AND_NAME */
893 /* The more powerful routines aren't required */
894 bool queue_peek(struct event_queue *q, struct queue_event *ev)
896 unsigned int rd;
898 if(q->read == q->write)
899 return false;
901 bool have_msg = false;
903 int oldlevel = disable_irq_save();
904 corelock_lock(&q->cl);
906 rd = q->read;
907 if(rd != q->write)
909 *ev = q->events[rd & QUEUE_LENGTH_MASK];
910 have_msg = true;
913 corelock_unlock(&q->cl);
914 restore_irq(oldlevel);
916 return have_msg;
919 void queue_remove_from_head(struct event_queue *q, long id)
921 int oldlevel;
923 oldlevel = disable_irq_save();
924 corelock_lock(&q->cl);
926 while(q->read != q->write)
928 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
930 if(q->events[rd].id != id)
932 break;
935 /* Release any thread waiting on this message */
936 queue_do_unblock_sender(q->send, rd);
938 q->read++;
941 corelock_unlock(&q->cl);
942 restore_irq(oldlevel);
944 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
946 /* Poll queue to see if a message exists - careful in using the result if
947 * queue_remove_from_head is called when messages are posted - possibly use
948 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
949 * unsignals the queue may cause an unwanted block */
950 bool queue_empty(const struct event_queue* q)
952 return ( q->read == q->write );
955 void queue_clear(struct event_queue* q)
957 int oldlevel;
959 oldlevel = disable_irq_save();
960 corelock_lock(&q->cl);
962 /* Release all threads waiting in the queue for a reply -
963 dequeued sent message will be handled by owning thread */
964 queue_release_all_senders(q);
966 q->read = q->write;
968 corelock_unlock(&q->cl);
969 restore_irq(oldlevel);
973 * The number of events waiting in the queue.
975 * @param struct of event_queue
976 * @return number of events in the queue
978 int queue_count(const struct event_queue *q)
980 return q->write - q->read;
983 int queue_broadcast(long id, intptr_t data)
985 struct event_queue **p = all_queues.queues;
986 struct event_queue *q;
988 #if NUM_CORES > 1
989 int oldlevel = disable_irq_save();
990 corelock_lock(&all_queues.cl);
991 #endif
993 for(q = *p; q != NULL; q = *(++p))
995 queue_post(q, id, data);
998 #if NUM_CORES > 1
999 corelock_unlock(&all_queues.cl);
1000 restore_irq(oldlevel);
1001 #endif
1003 return p - all_queues.queues;
1006 /****************************************************************************
1007 * Simple mutex functions ;)
1008 ****************************************************************************/
1010 static inline void __attribute__((always_inline))
1011 mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
1013 #ifdef HAVE_PRIORITY_SCHEDULING
1014 mtx->blocker.thread = td;
1015 #else
1016 mtx->thread = td;
1017 #endif
1020 static inline struct thread_entry * __attribute__((always_inline))
1021 mutex_get_thread(volatile struct mutex *mtx)
1023 #ifdef HAVE_PRIORITY_SCHEDULING
1024 return mtx->blocker.thread;
1025 #else
1026 return mtx->thread;
1027 #endif
1030 /* Initialize a mutex object - call before any use and do not call again once
1031 * the object is available to other threads */
1032 void mutex_init(struct mutex *m)
1034 corelock_init(&m->cl);
1035 m->queue = NULL;
1036 m->recursion = 0;
1037 mutex_set_thread(m, NULL);
1038 #ifdef HAVE_PRIORITY_SCHEDULING
1039 m->blocker.priority = PRIORITY_IDLE;
1040 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
1041 m->no_preempt = false;
1042 #endif
1045 /* Gain ownership of a mutex object or block until it becomes free */
1046 void mutex_lock(struct mutex *m)
1048 struct thread_entry *current = thread_self_entry();
1050 if(current == mutex_get_thread(m))
1052 /* current thread already owns this mutex */
1053 m->recursion++;
1054 return;
1057 /* lock out other cores */
1058 corelock_lock(&m->cl);
1060 /* must read thread again inside cs (a multiprocessor concern really) */
1061 if(LIKELY(mutex_get_thread(m) == NULL))
1063 /* lock is open */
1064 mutex_set_thread(m, current);
1065 corelock_unlock(&m->cl);
1066 return;
1069 /* block until the lock is open... */
1070 IF_COP( current->obj_cl = &m->cl; )
1071 IF_PRIO( current->blocker = &m->blocker; )
1072 current->bqp = &m->queue;
1074 disable_irq();
1075 block_thread(current);
1077 corelock_unlock(&m->cl);
1079 /* ...and turn control over to next thread */
1080 switch_thread();
1083 /* Release ownership of a mutex object - only owning thread must call this */
1084 void mutex_unlock(struct mutex *m)
1086 /* unlocker not being the owner is an unlocking violation */
1087 KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
1088 "mutex_unlock->wrong thread (%s != %s)\n",
1089 mutex_get_thread(m)->name,
1090 thread_self_entry()->name);
1092 if(m->recursion > 0)
1094 /* this thread still owns lock */
1095 m->recursion--;
1096 return;
1099 /* lock out other cores */
1100 corelock_lock(&m->cl);
1102 /* transfer to next queued thread if any */
1103 if(LIKELY(m->queue == NULL))
1105 /* no threads waiting - open the lock */
1106 mutex_set_thread(m, NULL);
1107 corelock_unlock(&m->cl);
1108 return;
1110 else
1112 const int oldlevel = disable_irq_save();
1113 /* Tranfer of owning thread is handled in the wakeup protocol
1114 * if priorities are enabled otherwise just set it from the
1115 * queue head. */
1116 IFN_PRIO( mutex_set_thread(m, m->queue); )
1117 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
1118 restore_irq(oldlevel);
1120 corelock_unlock(&m->cl);
1122 #ifdef HAVE_PRIORITY_SCHEDULING
1123 if((result & THREAD_SWITCH) && !m->no_preempt)
1124 switch_thread();
1125 #endif
1129 /****************************************************************************
1130 * Simple semaphore functions ;)
1131 ****************************************************************************/
1132 #ifdef HAVE_SEMAPHORE_OBJECTS
1133 /* Initialize the semaphore object.
1134 * max = maximum up count the semaphore may assume (max >= 1)
1135 * start = initial count of semaphore (0 <= count <= max) */
1136 void semaphore_init(struct semaphore *s, int max, int start)
1138 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1139 "semaphore_init->inv arg\n");
1140 s->queue = NULL;
1141 s->max = max;
1142 s->count = start;
1143 corelock_init(&s->cl);
1146 /* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
1147 * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
1148 * safely be used in an ISR. */
1149 int semaphore_wait(struct semaphore *s, int timeout)
1151 int ret;
1152 int oldlevel;
1153 int count;
1155 oldlevel = disable_irq_save();
1156 corelock_lock(&s->cl);
1158 count = s->count;
1160 if(LIKELY(count > 0))
1162 /* count is not zero; down it */
1163 s->count = count - 1;
1164 ret = OBJ_WAIT_SUCCEEDED;
1166 else if(timeout == 0)
1168 /* just polling it */
1169 ret = OBJ_WAIT_TIMEDOUT;
1171 else
1173 /* too many waits - block until count is upped... */
1174 struct thread_entry * current = thread_self_entry();
1175 IF_COP( current->obj_cl = &s->cl; )
1176 current->bqp = &s->queue;
1177 /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
1178 * explicit in semaphore_release */
1179 current->retval = OBJ_WAIT_TIMEDOUT;
1181 if(timeout > 0)
1182 block_thread_w_tmo(current, timeout); /* ...or timed out... */
1183 else
1184 block_thread(current); /* -timeout = infinite */
1186 corelock_unlock(&s->cl);
1188 /* ...and turn control over to next thread */
1189 switch_thread();
1191 return current->retval;
1194 corelock_unlock(&s->cl);
1195 restore_irq(oldlevel);
1197 return ret;
1200 /* Up the semaphore's count and release any thread waiting at the head of the
1201 * queue. The count is saturated to the value of the 'max' parameter specified
1202 * in 'semaphore_init'. */
1203 void semaphore_release(struct semaphore *s)
1205 unsigned int result = THREAD_NONE;
1206 int oldlevel;
1208 oldlevel = disable_irq_save();
1209 corelock_lock(&s->cl);
1211 if(LIKELY(s->queue != NULL))
1213 /* a thread was queued - wake it up and keep count at 0 */
1214 KERNEL_ASSERT(s->count == 0,
1215 "semaphore_release->threads queued but count=%d!\n", s->count);
1216 s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
1217 result = wakeup_thread(&s->queue);
1219 else
1221 int count = s->count;
1222 if(count < s->max)
1224 /* nothing waiting - up it */
1225 s->count = count + 1;
1229 corelock_unlock(&s->cl);
1230 restore_irq(oldlevel);
1232 #if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context)
1233 /* No thread switch if not thread context */
1234 if((result & THREAD_SWITCH) && is_thread_context())
1235 switch_thread();
1236 #endif
1237 (void)result;
1239 #endif /* HAVE_SEMAPHORE_OBJECTS */