Forgot that the X5/M5 use a 64/64KB IRAM split, so they can't use the previous optimi...
[Rockbox.git] / firmware / kernel.c
blob10efb87cf4bb7473717f85f4869454afc788da0d
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #ifdef SIMULATOR
26 #include "system-sdl.h"
27 #include "debug.h"
28 #endif
29 #include "thread.h"
30 #include "cpu.h"
31 #include "system.h"
32 #include "panic.h"
34 /* Make this nonzero to enable more elaborate checks on objects */
35 #if defined(DEBUG) || defined(SIMULATOR)
36 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
37 #else
38 #define KERNEL_OBJECT_CHECKS 0
39 #endif
41 #if KERNEL_OBJECT_CHECKS
42 #ifdef SIMULATOR
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
45 #else
46 #define KERNEL_ASSERT(exp, msg...) \
47 ({ if (!({ exp; })) panicf(msg); })
48 #endif
49 #else
50 #define KERNEL_ASSERT(exp, msg...) ({})
51 #endif
53 #if !defined(CPU_PP) || !defined(BOOTLOADER)
54 volatile long current_tick SHAREDDATA_ATTR = 0;
55 #endif
57 void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
59 extern struct core_entry cores[NUM_CORES];
61 /* This array holds all queues that are initiated. It is used for broadcast. */
62 static struct
64 int count;
65 struct event_queue *queues[MAX_NUM_QUEUES];
66 IF_COP( struct corelock cl; )
67 } all_queues SHAREDBSS_ATTR;
69 /****************************************************************************
70 * Standard kernel stuff
71 ****************************************************************************/
72 void kernel_init(void)
74 /* Init the threading API */
75 init_threads();
77 /* Other processors will not reach this point in a multicore build.
78 * In a single-core build with multiple cores they fall-through and
79 * sleep in cop_main without returning. */
80 if (CURRENT_CORE == CPU)
82 memset(tick_funcs, 0, sizeof(tick_funcs));
83 memset(&all_queues, 0, sizeof(all_queues));
84 corelock_init(&all_queues.cl);
85 tick_start(1000/HZ);
86 #ifdef KDEV_INIT
87 kernel_device_init();
88 #endif
92 /****************************************************************************
93 * Timer tick
94 ****************************************************************************/
95 #if CONFIG_CPU == SH7034
96 void tick_start(unsigned int interval_in_ms)
98 unsigned long count;
100 count = CPU_FREQ * interval_in_ms / 1000 / 8;
102 if(count > 0x10000)
104 panicf("Error! The tick interval is too long (%d ms)\n",
105 interval_in_ms);
106 return;
109 /* We are using timer 0 */
111 TSTR &= ~0x01; /* Stop the timer */
112 TSNC &= ~0x01; /* No synchronization */
113 TMDR &= ~0x01; /* Operate normally */
115 TCNT0 = 0; /* Start counting at 0 */
116 GRA0 = (unsigned short)(count - 1);
117 TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
119 /* Enable interrupt on level 1 */
120 IPRC = (IPRC & ~0x00f0) | 0x0010;
122 TSR0 &= ~0x01;
123 TIER0 = 0xf9; /* Enable GRA match interrupt */
125 TSTR |= 0x01; /* Start timer 1 */
128 void IMIA0(void) __attribute__ ((interrupt_handler));
129 void IMIA0(void)
131 int i;
133 /* Run through the list of tick tasks */
134 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
136 if(tick_funcs[i])
138 tick_funcs[i]();
142 current_tick++;
144 TSR0 &= ~0x01;
146 #elif defined(CPU_COLDFIRE)
147 void tick_start(unsigned int interval_in_ms)
149 unsigned long count;
150 int prescale;
152 count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
154 if(count > 0x10000)
156 panicf("Error! The tick interval is too long (%d ms)\n",
157 interval_in_ms);
158 return;
161 prescale = cpu_frequency / CPU_FREQ;
162 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
163 changes within timer.c */
165 /* We are using timer 0 */
167 TRR0 = (unsigned short)(count - 1); /* The reference count */
168 TCN0 = 0; /* reset the timer */
169 TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
170 /* restart, CLK/16, enabled, prescaler */
172 TER0 = 0xff; /* Clear all events */
174 ICR1 = 0x8c; /* Interrupt on level 3.0 */
175 IMR &= ~0x200;
178 void TIMER0(void) __attribute__ ((interrupt_handler));
179 void TIMER0(void)
181 int i;
183 /* Run through the list of tick tasks */
184 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
186 if(tick_funcs[i])
188 tick_funcs[i]();
192 current_tick++;
194 TER0 = 0xff; /* Clear all events */
197 #elif defined(CPU_PP)
199 #ifndef BOOTLOADER
200 void TIMER1(void)
202 int i;
204 /* Run through the list of tick tasks (using main core) */
205 TIMER1_VAL; /* Read value to ack IRQ */
207 /* Run through the list of tick tasks using main CPU core -
208 wake up the COP through its control interface to provide pulse */
209 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
211 if (tick_funcs[i])
213 tick_funcs[i]();
217 #if NUM_CORES > 1
218 /* Pulse the COP */
219 core_wake(COP);
220 #endif /* NUM_CORES */
222 current_tick++;
224 #endif
226 /* Must be last function called init kernel/thread initialization */
227 void tick_start(unsigned int interval_in_ms)
229 #ifndef BOOTLOADER
230 TIMER1_CFG = 0x0;
231 TIMER1_VAL;
232 /* enable timer */
233 TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
234 /* unmask interrupt source */
235 CPU_INT_EN = TIMER1_MASK;
236 #else
237 /* We don't enable interrupts in the bootloader */
238 (void)interval_in_ms;
239 #endif
242 #elif CONFIG_CPU == PNX0101
244 void timer_handler(void)
246 int i;
248 /* Run through the list of tick tasks */
249 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
251 if(tick_funcs[i])
252 tick_funcs[i]();
255 current_tick++;
257 TIMER0.clr = 0;
260 void tick_start(unsigned int interval_in_ms)
262 TIMER0.ctrl &= ~0x80; /* Disable the counter */
263 TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
264 TIMER0.load = 3000000 * interval_in_ms / 1000;
265 TIMER0.ctrl &= ~0xc; /* No prescaler */
266 TIMER0.clr = 1; /* Clear the interrupt request */
268 irq_set_int_handler(IRQ_TIMER0, timer_handler);
269 irq_enable_int(IRQ_TIMER0);
271 TIMER0.ctrl |= 0x80; /* Enable the counter */
273 #endif
275 int tick_add_task(void (*f)(void))
277 int i;
278 int oldlevel = disable_irq_save();
280 /* Add a task if there is room */
281 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
283 if(tick_funcs[i] == NULL)
285 tick_funcs[i] = f;
286 restore_irq(oldlevel);
287 return 0;
290 restore_irq(oldlevel);
291 panicf("Error! tick_add_task(): out of tasks");
292 return -1;
295 int tick_remove_task(void (*f)(void))
297 int i;
298 int oldlevel = disable_irq_save();
300 /* Remove a task if it is there */
301 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
303 if(tick_funcs[i] == f)
305 tick_funcs[i] = NULL;
306 restore_irq(oldlevel);
307 return 0;
311 restore_irq(oldlevel);
312 return -1;
315 /****************************************************************************
316 * Tick-based interval timers/one-shots - be mindful this is not really
317 * intended for continuous timers but for events that need to run for a short
318 * time and be cancelled without further software intervention.
319 ****************************************************************************/
320 #ifdef INCLUDE_TIMEOUT_API
321 static struct timeout *tmo_list = NULL; /* list of active timeout events */
323 /* timeout tick task - calls event handlers when they expire
324 * Event handlers may alter ticks, callback and data during operation.
326 static void timeout_tick(void)
328 unsigned long tick = current_tick;
329 struct timeout *curr, *next;
331 for (curr = tmo_list; curr != NULL; curr = next)
333 next = (struct timeout *)curr->next;
335 if (TIME_BEFORE(tick, curr->expires))
336 continue;
338 /* this event has expired - call callback */
339 if (curr->callback(curr))
340 *(long *)&curr->expires = tick + curr->ticks; /* reload */
341 else
342 timeout_cancel(curr); /* cancel */
346 /* Cancels a timeout callback - can be called from the ISR */
347 void timeout_cancel(struct timeout *tmo)
349 int oldlevel = disable_irq_save();
351 if (tmo_list != NULL)
353 struct timeout *curr = tmo_list;
354 struct timeout *prev = NULL;
356 while (curr != tmo && curr != NULL)
358 prev = curr;
359 curr = (struct timeout *)curr->next;
362 if (curr != NULL)
364 /* in list */
365 if (prev == NULL)
366 tmo_list = (struct timeout *)curr->next;
367 else
368 *(const struct timeout **)&prev->next = curr->next;
370 if (tmo_list == NULL)
371 tick_remove_task(timeout_tick); /* last one - remove task */
373 /* not in list or tmo == NULL */
376 restore_irq(oldlevel);
379 /* Adds a timeout callback - calling with an active timeout resets the
380 interval - can be called from the ISR */
381 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
382 int ticks, intptr_t data)
384 int oldlevel;
385 struct timeout *curr;
387 if (tmo == NULL)
388 return;
390 oldlevel = disable_irq_save();
392 /* see if this one is already registered */
393 curr = tmo_list;
394 while (curr != tmo && curr != NULL)
395 curr = (struct timeout *)curr->next;
397 if (curr == NULL)
399 /* not found - add it */
400 if (tmo_list == NULL)
401 tick_add_task(timeout_tick); /* first one - add task */
403 *(struct timeout **)&tmo->next = tmo_list;
404 tmo_list = tmo;
407 tmo->callback = callback;
408 tmo->ticks = ticks;
409 tmo->data = data;
410 *(long *)&tmo->expires = current_tick + ticks;
412 restore_irq(oldlevel);
415 #endif /* INCLUDE_TIMEOUT_API */
417 /****************************************************************************
418 * Thread stuff
419 ****************************************************************************/
420 void sleep(int ticks)
422 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
423 volatile int counter;
424 TCON &= ~(1 << 20); // stop timer 4
425 // TODO: this constant depends on dividers settings inherited from
426 // firmware. Set them explicitly somwhere.
427 TCNTB4 = 12193 * ticks / HZ;
428 TCON |= 1 << 21; // set manual bit
429 TCON &= ~(1 << 21); // reset manual bit
430 TCON &= ~(1 << 22); //autoreload Off
431 TCON |= (1 << 20); // start timer 4
432 do {
433 counter = TCNTO4;
434 } while(counter > 0);
436 #elif defined(CPU_PP) && defined(BOOTLOADER)
437 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
438 while (TIME_BEFORE(USEC_TIMER, stop))
439 switch_thread();
440 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
441 /* hacky.. */
442 long sleep_ticks = current_tick + ticks + 1;
443 while (sleep_ticks > current_tick)
444 switch_thread();
445 #else
446 disable_irq();
447 sleep_thread(ticks);
448 switch_thread();
449 #endif
452 void yield(void)
454 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
455 /* Some targets don't like yielding in the bootloader */
456 #else
457 switch_thread();
458 #endif
461 /****************************************************************************
462 * Queue handling stuff
463 ****************************************************************************/
465 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
466 /****************************************************************************
467 * Sender thread queue structure that aids implementation of priority
468 * inheritance on queues because the send list structure is the same as
469 * for all other kernel objects:
471 * Example state:
472 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
473 * E3 was posted with queue_post
474 * 4 events remain enqueued (E1-E4)
476 * rd wr
477 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
478 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
479 * \/ \/ \/
480 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
481 * q->send->curr_sender: /\
483 * Thread has E0 in its own struct queue_event.
485 ****************************************************************************/
487 /* Puts the specified return value in the waiting thread's return value
488 * and wakes the thread.
490 * A sender should be confirmed to exist before calling which makes it
491 * more efficent to reject the majority of cases that don't need this
492 * called.
494 static void queue_release_sender(struct thread_entry **sender,
495 intptr_t retval)
497 struct thread_entry *thread = *sender;
499 *sender = NULL; /* Clear slot. */
500 thread->wakeup_ext_cb = NULL; /* Clear callback. */
501 thread->retval = retval; /* Assign thread-local return value. */
502 *thread->bqp = thread; /* Move blocking queue head to thread since
503 wakeup_thread wakes the first thread in
504 the list. */
505 wakeup_thread(thread->bqp);
508 /* Releases any waiting threads that are queued with queue_send -
509 * reply with 0.
511 static void queue_release_all_senders(struct event_queue *q)
513 if(q->send)
515 unsigned int i;
516 for(i = q->read; i != q->write; i++)
518 struct thread_entry **spp =
519 &q->send->senders[i & QUEUE_LENGTH_MASK];
521 if(*spp)
523 queue_release_sender(spp, 0);
529 /* Callback to do extra forced removal steps from sender list in addition
530 * to the normal blocking queue removal and priority dis-inherit */
531 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
533 *((struct thread_entry **)thread->retval) = NULL;
534 thread->wakeup_ext_cb = NULL;
535 thread->retval = 0;
538 /* Enables queue_send on the specified queue - caller allocates the extra
539 * data structure. Only queues which are taken to be owned by a thread should
540 * enable this however an official owner is not compulsory but must be
541 * specified for priority inheritance to operate.
543 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
544 * messages results in an undefined order of message replies.
546 void queue_enable_queue_send(struct event_queue *q,
547 struct queue_sender_list *send,
548 struct thread_entry *owner)
550 int oldlevel = disable_irq_save();
551 corelock_lock(&q->cl);
553 if(send != NULL && q->send == NULL)
555 memset(send, 0, sizeof(*send));
556 #ifdef HAVE_PRIORITY_SCHEDULING
557 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
558 send->blocker.priority = PRIORITY_IDLE;
559 send->blocker.thread = owner;
560 if(owner != NULL)
561 q->blocker_p = &send->blocker;
562 #endif
563 q->send = send;
566 corelock_unlock(&q->cl);
567 restore_irq(oldlevel);
569 (void)owner;
572 /* Unblock a blocked thread at a given event index */
573 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
574 unsigned int i)
576 if(send)
578 struct thread_entry **spp = &send->senders[i];
580 if(*spp)
582 queue_release_sender(spp, 0);
587 /* Perform the auto-reply sequence */
588 static inline void queue_do_auto_reply(struct queue_sender_list *send)
590 if(send && send->curr_sender)
592 /* auto-reply */
593 queue_release_sender(&send->curr_sender, 0);
597 /* Moves waiting thread's refrence from the senders array to the
598 * current_sender which represents the thread waiting for a reponse to the
599 * last message removed from the queue. This also protects the thread from
600 * being bumped due to overflow which would not be a valid action since its
601 * message _is_ being processed at this point. */
602 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
603 unsigned int rd)
605 if(send)
607 struct thread_entry **spp = &send->senders[rd];
609 if(*spp)
611 /* Move thread reference from array to the next thread
612 that queue_reply will release */
613 send->curr_sender = *spp;
614 (*spp)->retval = (intptr_t)spp;
615 *spp = NULL;
617 /* else message was posted asynchronously with queue_post */
620 #else
621 /* Empty macros for when synchoronous sending is not made */
622 #define queue_release_all_senders(q)
623 #define queue_do_unblock_sender(send, i)
624 #define queue_do_auto_reply(send)
625 #define queue_do_fetch_sender(send, rd)
626 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
628 /* Queue must not be available for use during this call */
629 void queue_init(struct event_queue *q, bool register_queue)
631 int oldlevel = disable_irq_save();
633 if(register_queue)
635 corelock_lock(&all_queues.cl);
638 corelock_init(&q->cl);
639 q->queue = NULL;
640 q->read = 0;
641 q->write = 0;
642 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
643 q->send = NULL; /* No message sending by default */
644 IF_PRIO( q->blocker_p = NULL; )
645 #endif
647 if(register_queue)
649 if(all_queues.count >= MAX_NUM_QUEUES)
651 panicf("queue_init->out of queues");
653 /* Add it to the all_queues array */
654 all_queues.queues[all_queues.count++] = q;
655 corelock_unlock(&all_queues.cl);
658 restore_irq(oldlevel);
661 /* Queue must not be available for use during this call */
662 void queue_delete(struct event_queue *q)
664 int oldlevel;
665 int i;
667 oldlevel = disable_irq_save();
668 corelock_lock(&all_queues.cl);
669 corelock_lock(&q->cl);
671 /* Find the queue to be deleted */
672 for(i = 0;i < all_queues.count;i++)
674 if(all_queues.queues[i] == q)
676 /* Move the following queues up in the list */
677 all_queues.count--;
679 for(;i < all_queues.count;i++)
681 all_queues.queues[i] = all_queues.queues[i+1];
684 break;
688 corelock_unlock(&all_queues.cl);
690 /* Release thread(s) waiting on queue head */
691 thread_queue_wake(&q->queue);
693 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
694 if(q->send)
696 /* Release threads waiting for replies */
697 queue_release_all_senders(q);
699 /* Reply to any dequeued message waiting for one */
700 queue_do_auto_reply(q->send);
702 q->send = NULL;
703 IF_PRIO( q->blocker_p = NULL; )
705 #endif
707 q->read = 0;
708 q->write = 0;
710 corelock_unlock(&q->cl);
711 restore_irq(oldlevel);
714 /* NOTE: multiple threads waiting on a queue head cannot have a well-
715 defined release order if timeouts are used. If multiple threads must
716 access the queue head, use a dispatcher or queue_wait only. */
717 void queue_wait(struct event_queue *q, struct queue_event *ev)
719 int oldlevel;
720 unsigned int rd;
722 #ifdef HAVE_PRIORITY_SCHEDULING
723 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
724 QUEUE_GET_THREAD(q) == thread_get_current(),
725 "queue_wait->wrong thread\n");
726 #endif
728 oldlevel = disable_irq_save();
729 corelock_lock(&q->cl);
731 /* auto-reply */
732 queue_do_auto_reply(q->send);
734 if (q->read == q->write)
736 struct thread_entry *current = cores[CURRENT_CORE].running;
740 IF_COP( current->obj_cl = &q->cl; )
741 current->bqp = &q->queue;
743 block_thread(current);
745 corelock_unlock(&q->cl);
746 switch_thread();
748 oldlevel = disable_irq_save();
749 corelock_lock(&q->cl);
751 /* A message that woke us could now be gone */
752 while (q->read == q->write);
755 rd = q->read++ & QUEUE_LENGTH_MASK;
756 *ev = q->events[rd];
758 /* Get data for a waiting thread if one */
759 queue_do_fetch_sender(q->send, rd);
761 corelock_unlock(&q->cl);
762 restore_irq(oldlevel);
765 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
767 int oldlevel;
769 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
770 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
771 QUEUE_GET_THREAD(q) == thread_get_current(),
772 "queue_wait_w_tmo->wrong thread\n");
773 #endif
775 oldlevel = disable_irq_save();
776 corelock_lock(&q->cl);
778 /* Auto-reply */
779 queue_do_auto_reply(q->send);
781 if (q->read == q->write && ticks > 0)
783 struct thread_entry *current = cores[CURRENT_CORE].running;
785 IF_COP( current->obj_cl = &q->cl; )
786 current->bqp = &q->queue;
788 block_thread_w_tmo(current, ticks);
789 corelock_unlock(&q->cl);
791 switch_thread();
793 oldlevel = disable_irq_save();
794 corelock_lock(&q->cl);
797 /* no worry about a removed message here - status is checked inside
798 locks - perhaps verify if timeout or false alarm */
799 if (q->read != q->write)
801 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
802 *ev = q->events[rd];
803 /* Get data for a waiting thread if one */
804 queue_do_fetch_sender(q->send, rd);
806 else
808 ev->id = SYS_TIMEOUT;
811 corelock_unlock(&q->cl);
812 restore_irq(oldlevel);
815 void queue_post(struct event_queue *q, long id, intptr_t data)
817 int oldlevel;
818 unsigned int wr;
820 oldlevel = disable_irq_save();
821 corelock_lock(&q->cl);
823 wr = q->write++ & QUEUE_LENGTH_MASK;
825 q->events[wr].id = id;
826 q->events[wr].data = data;
828 /* overflow protect - unblock any thread waiting at this index */
829 queue_do_unblock_sender(q->send, wr);
831 /* Wakeup a waiting thread if any */
832 wakeup_thread(&q->queue);
834 corelock_unlock(&q->cl);
835 restore_irq(oldlevel);
838 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
839 /* IRQ handlers are not allowed use of this function - we only aim to
840 protect the queue integrity by turning them off. */
841 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
843 int oldlevel;
844 unsigned int wr;
846 oldlevel = disable_irq_save();
847 corelock_lock(&q->cl);
849 wr = q->write++ & QUEUE_LENGTH_MASK;
851 q->events[wr].id = id;
852 q->events[wr].data = data;
854 if(q->send)
856 struct queue_sender_list *send = q->send;
857 struct thread_entry **spp = &send->senders[wr];
858 struct thread_entry *current = cores[CURRENT_CORE].running;
860 if(*spp)
862 /* overflow protect - unblock any thread waiting at this index */
863 queue_release_sender(spp, 0);
866 /* Wakeup a waiting thread if any */
867 wakeup_thread(&q->queue);
869 /* Save thread in slot, add to list and wait for reply */
870 *spp = current;
871 IF_COP( current->obj_cl = &q->cl; )
872 IF_PRIO( current->blocker = q->blocker_p; )
873 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
874 current->retval = (intptr_t)spp;
875 current->bqp = &send->list;
877 block_thread(current);
879 corelock_unlock(&q->cl);
880 switch_thread();
882 return current->retval;
885 /* Function as queue_post if sending is not enabled */
886 wakeup_thread(&q->queue);
888 corelock_unlock(&q->cl);
889 restore_irq(oldlevel);
891 return 0;
894 #if 0 /* not used now but probably will be later */
895 /* Query if the last message dequeued was added by queue_send or not */
896 bool queue_in_queue_send(struct event_queue *q)
898 bool in_send;
900 #if NUM_CORES > 1
901 int oldlevel = disable_irq_save();
902 corelock_lock(&q->cl);
903 #endif
905 in_send = q->send && q->send->curr_sender;
907 #if NUM_CORES > 1
908 corelock_unlock(&q->cl);
909 restore_irq(oldlevel);
910 #endif
912 return in_send;
914 #endif
916 /* Replies with retval to the last dequeued message sent with queue_send */
917 void queue_reply(struct event_queue *q, intptr_t retval)
919 if(q->send && q->send->curr_sender)
921 int oldlevel = disable_irq_save();
922 corelock_lock(&q->cl);
923 /* Double-check locking */
924 IF_COP( if(q->send && q->send->curr_sender) )
926 queue_release_sender(&q->send->curr_sender, retval);
929 corelock_unlock(&q->cl);
930 restore_irq(oldlevel);
934 bool queue_peek(struct event_queue *q, struct queue_event *ev)
936 if(q->read == q->write)
937 return false;
939 bool have_msg = false;
941 int oldlevel = disable_irq_save();
942 corelock_lock(&q->cl);
944 if(q->read != q->write)
946 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
947 have_msg = true;
950 corelock_unlock(&q->cl);
951 restore_irq(oldlevel);
953 return have_msg;
955 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
957 /* Poll queue to see if a message exists - careful in using the result if
958 * queue_remove_from_head is called when messages are posted - possibly use
959 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
960 * unsignals the queue may cause an unwanted block */
961 bool queue_empty(const struct event_queue* q)
963 return ( q->read == q->write );
966 void queue_clear(struct event_queue* q)
968 int oldlevel;
970 oldlevel = disable_irq_save();
971 corelock_lock(&q->cl);
973 /* Release all threads waiting in the queue for a reply -
974 dequeued sent message will be handled by owning thread */
975 queue_release_all_senders(q);
977 q->read = 0;
978 q->write = 0;
980 corelock_unlock(&q->cl);
981 restore_irq(oldlevel);
984 void queue_remove_from_head(struct event_queue *q, long id)
986 int oldlevel;
988 oldlevel = disable_irq_save();
989 corelock_lock(&q->cl);
991 while(q->read != q->write)
993 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
995 if(q->events[rd].id != id)
997 break;
1000 /* Release any thread waiting on this message */
1001 queue_do_unblock_sender(q->send, rd);
1003 q->read++;
1006 corelock_unlock(&q->cl);
1007 restore_irq(oldlevel);
1011 * The number of events waiting in the queue.
1013 * @param struct of event_queue
1014 * @return number of events in the queue
1016 int queue_count(const struct event_queue *q)
1018 return q->write - q->read;
1021 int queue_broadcast(long id, intptr_t data)
1023 int i;
1025 #if NUM_CORES > 1
1026 int oldlevel = disable_irq_save();
1027 corelock_lock(&all_queues.cl);
1028 #endif
1030 for(i = 0;i < all_queues.count;i++)
1032 queue_post(all_queues.queues[i], id, data);
1035 #if NUM_CORES > 1
1036 corelock_unlock(&all_queues.cl);
1037 restore_irq(oldlevel);
1038 #endif
1040 return i;
1043 /****************************************************************************
1044 * Simple mutex functions ;)
1045 ****************************************************************************/
1047 /* Initialize a mutex object - call before any use and do not call again once
1048 * the object is available to other threads */
1049 void mutex_init(struct mutex *m)
1051 corelock_init(&m->cl);
1052 m->queue = NULL;
1053 m->count = 0;
1054 m->locked = 0;
1055 MUTEX_SET_THREAD(m, NULL);
1056 #ifdef HAVE_PRIORITY_SCHEDULING
1057 m->blocker.priority = PRIORITY_IDLE;
1058 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
1059 m->no_preempt = false;
1060 #endif
1063 /* Gain ownership of a mutex object or block until it becomes free */
1064 void mutex_lock(struct mutex *m)
1066 const unsigned int core = CURRENT_CORE;
1067 struct thread_entry *current = cores[core].running;
1069 if(current == MUTEX_GET_THREAD(m))
1071 /* current thread already owns this mutex */
1072 m->count++;
1073 return;
1076 /* lock out other cores */
1077 corelock_lock(&m->cl);
1079 if(m->locked == 0)
1081 /* lock is open */
1082 MUTEX_SET_THREAD(m, current);
1083 m->locked = 1;
1084 corelock_unlock(&m->cl);
1085 return;
1088 /* block until the lock is open... */
1089 IF_COP( current->obj_cl = &m->cl; )
1090 IF_PRIO( current->blocker = &m->blocker; )
1091 current->bqp = &m->queue;
1093 disable_irq();
1094 block_thread(current);
1096 corelock_unlock(&m->cl);
1098 /* ...and turn control over to next thread */
1099 switch_thread();
1102 /* Release ownership of a mutex object - only owning thread must call this */
1103 void mutex_unlock(struct mutex *m)
1105 /* unlocker not being the owner is an unlocking violation */
1106 KERNEL_ASSERT(MUTEX_GET_THREAD(m) == thread_get_current(),
1107 "mutex_unlock->wrong thread (%s != %s)\n",
1108 MUTEX_GET_THREAD(m)->name,
1109 thread_get_current()->name);
1111 if(m->count > 0)
1113 /* this thread still owns lock */
1114 m->count--;
1115 return;
1118 /* lock out other cores */
1119 corelock_lock(&m->cl);
1121 /* transfer to next queued thread if any */
1122 if(m->queue == NULL)
1124 /* no threads waiting - open the lock */
1125 MUTEX_SET_THREAD(m, NULL);
1126 m->locked = 0;
1127 corelock_unlock(&m->cl);
1128 return;
1130 else
1132 const int oldlevel = disable_irq_save();
1133 /* Tranfer of owning thread is handled in the wakeup protocol
1134 * if priorities are enabled otherwise just set it from the
1135 * queue head. */
1136 IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); )
1137 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
1138 restore_irq(oldlevel);
1140 corelock_unlock(&m->cl);
1142 #ifdef HAVE_PRIORITY_SCHEDULING
1143 if((result & THREAD_SWITCH) && !m->no_preempt)
1144 switch_thread();
1145 #endif
1149 /****************************************************************************
1150 * Simpl-er mutex functions ;)
1151 ****************************************************************************/
1152 #if NUM_CORES > 1
1153 void spinlock_init(struct spinlock *l)
1155 corelock_init(&l->cl);
1156 l->thread = NULL;
1157 l->count = 0;
1160 void spinlock_lock(struct spinlock *l)
1162 const unsigned int core = CURRENT_CORE;
1163 struct thread_entry *current = cores[core].running;
1165 if(l->thread == current)
1167 /* current core already owns it */
1168 l->count++;
1169 return;
1172 /* lock against other processor cores */
1173 corelock_lock(&l->cl);
1175 /* take ownership */
1176 l->thread = current;
1179 void spinlock_unlock(struct spinlock *l)
1181 /* unlocker not being the owner is an unlocking violation */
1182 KERNEL_ASSERT(l->thread == thread_get_current(),
1183 "spinlock_unlock->wrong thread\n");
1185 if(l->count > 0)
1187 /* this core still owns lock */
1188 l->count--;
1189 return;
1192 /* clear owner */
1193 l->thread = NULL;
1195 /* release lock */
1196 corelock_unlock(&l->cl);
1198 #endif /* NUM_CORES > 1 */
1200 /****************************************************************************
1201 * Simple semaphore functions ;)
1202 ****************************************************************************/
1203 #ifdef HAVE_SEMAPHORE_OBJECTS
1204 void semaphore_init(struct semaphore *s, int max, int start)
1206 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1207 "semaphore_init->inv arg\n");
1208 s->queue = NULL;
1209 s->max = max;
1210 s->count = start;
1211 corelock_init(&s->cl);
1214 void semaphore_wait(struct semaphore *s)
1216 struct thread_entry *current;
1218 corelock_lock(&s->cl);
1220 if(--s->count >= 0)
1222 /* wait satisfied */
1223 corelock_unlock(&s->cl);
1224 return;
1227 /* too many waits - block until dequeued... */
1228 current = cores[CURRENT_CORE].running;
1230 IF_COP( current->obj_cl = &s->cl; )
1231 current->bqp = &s->queue;
1233 disable_irq();
1234 block_thread(current);
1236 corelock_unlock(&s->cl);
1238 /* ...and turn control over to next thread */
1239 switch_thread();
1242 void semaphore_release(struct semaphore *s)
1244 IF_PRIO( unsigned int result = THREAD_NONE; )
1246 corelock_lock(&s->cl);
1248 if(s->count < s->max && ++s->count <= 0)
1250 /* there should be threads in this queue */
1251 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1252 /* a thread was queued - wake it up */
1253 int oldlevel = disable_irq_save();
1254 IF_PRIO( result = ) wakeup_thread(&s->queue);
1255 restore_irq(oldlevel);
1258 corelock_unlock(&s->cl);
1260 #ifdef HAVE_PRIORITY_SCHEDULING
1261 if(result & THREAD_SWITCH)
1262 switch_thread();
1263 #endif
1265 #endif /* HAVE_SEMAPHORE_OBJECTS */
1267 /****************************************************************************
1268 * Simple event functions ;)
1269 ****************************************************************************/
1270 #ifdef HAVE_EVENT_OBJECTS
1271 void event_init(struct event *e, unsigned int flags)
1273 e->queues[STATE_NONSIGNALED] = NULL;
1274 e->queues[STATE_SIGNALED] = NULL;
1275 e->state = flags & STATE_SIGNALED;
1276 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
1277 corelock_init(&e->cl);
1280 void event_wait(struct event *e, unsigned int for_state)
1282 struct thread_entry *current;
1284 corelock_lock(&e->cl);
1286 if(e->automatic != 0)
1288 /* wait for false always satisfied by definition
1289 or if it just changed to false */
1290 if(e->state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
1292 /* automatic - unsignal */
1293 e->state = STATE_NONSIGNALED;
1294 corelock_unlock(&e->cl);
1295 return;
1297 /* block until state matches */
1299 else if(for_state == e->state)
1301 /* the state being waited for is the current state */
1302 corelock_unlock(&e->cl);
1303 return;
1306 /* block until state matches what callers requests */
1307 current = cores[CURRENT_CORE].running;
1309 IF_COP( current->obj_cl = &e->cl; )
1310 current->bqp = &e->queues[for_state];
1312 disable_irq();
1313 block_thread(current);
1315 corelock_unlock(&e->cl);
1317 /* turn control over to next thread */
1318 switch_thread();
1321 void event_set_state(struct event *e, unsigned int state)
1323 unsigned int result;
1324 int oldlevel;
1326 corelock_lock(&e->cl);
1328 if(e->state == state)
1330 /* no change */
1331 corelock_unlock(&e->cl);
1332 return;
1335 IF_PRIO( result = THREAD_OK; )
1337 oldlevel = disable_irq_save();
1339 if(state == STATE_SIGNALED)
1341 if(e->automatic != 0)
1343 /* no thread should have ever blocked for nonsignaled */
1344 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL,
1345 "set_event_state->queue[NS]:S\n");
1346 /* pass to next thread and keep unsignaled - "pulse" */
1347 result = wakeup_thread(&e->queues[STATE_SIGNALED]);
1348 e->state = (result & THREAD_OK) ? STATE_NONSIGNALED : STATE_SIGNALED;
1350 else
1352 /* release all threads waiting for signaled */
1353 e->state = STATE_SIGNALED;
1354 IF_PRIO( result = )
1355 thread_queue_wake(&e->queues[STATE_SIGNALED]);
1358 else
1360 /* release all threads waiting for nonsignaled */
1362 /* no thread should have ever blocked if automatic */
1363 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL ||
1364 e->automatic == 0, "set_event_state->queue[NS]:NS\n");
1366 e->state = STATE_NONSIGNALED;
1367 IF_PRIO( result = )
1368 thread_queue_wake(&e->queues[STATE_NONSIGNALED]);
1371 restore_irq(oldlevel);
1373 corelock_unlock(&e->cl);
1375 #ifdef HAVE_PRIORITY_SCHEDULING
1376 if(result & THREAD_SWITCH)
1377 switch_thread();
1378 #endif
1380 #endif /* HAVE_EVENT_OBJECTS */
1383 #ifdef HAVE_WAKEUP_OBJECTS
1384 /****************************************************************************
1385 * Lightweight IRQ-compatible wakeup object
1388 /* Initialize the wakeup object */
1389 void wakeup_init(struct wakeup *w)
1391 w->queue = NULL;
1392 w->signalled = 0;
1393 IF_COP( corelock_init(&w->cl); )
1396 /* Wait for a signal blocking indefinitely or for a specified period */
1397 int wakeup_wait(struct wakeup *w, int timeout)
1399 int ret = OBJ_WAIT_SUCCEEDED; /* Presume success */
1400 int oldlevel = disable_irq_save();
1402 corelock_lock(&w->cl);
1404 if(w->signalled == 0 && timeout != TIMEOUT_NOBLOCK)
1406 struct thread_entry * current = cores[CURRENT_CORE].running;
1408 IF_COP( current->obj_cl = &w->cl; )
1409 current->bqp = &w->queue;
1411 if (timeout != TIMEOUT_BLOCK)
1412 block_thread_w_tmo(current, timeout);
1413 else
1414 block_thread(current);
1416 corelock_unlock(&w->cl);
1417 switch_thread();
1419 oldlevel = disable_irq_save();
1420 corelock_lock(&w->cl);
1423 if(w->signalled == 0)
1425 /* Timed-out or failed */
1426 ret = (timeout != TIMEOUT_BLOCK) ?
1427 OBJ_WAIT_TIMEDOUT : OBJ_WAIT_FAILED;
1430 w->signalled = 0; /* Reset */
1432 corelock_unlock(&w->cl);
1433 restore_irq(oldlevel);
1435 return ret;
1438 /* Signal the thread waiting or leave the signal if the thread hasn't
1439 * waited yet.
1441 * returns THREAD_NONE or THREAD_OK
1443 int wakeup_signal(struct wakeup *w)
1445 int oldlevel = disable_irq_save();
1446 int ret;
1448 corelock_lock(&w->cl);
1450 w->signalled = 1;
1451 ret = wakeup_thread(&w->queue);
1453 corelock_unlock(&w->cl);
1454 restore_irq(oldlevel);
1456 return ret;
1458 #endif /* HAVE_WAKEUP_OBJECTS */