Remove .a files before running ar, to avoid problems with renamed files remaining...
[kugel-rb.git] / firmware / kernel.c
blobdc960b6e51859ed93392cb5043a66cd8c3c96df7
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #ifdef SIMULATOR
26 #include "system-sdl.h"
27 #include "debug.h"
28 #endif
29 #include "thread.h"
30 #include "cpu.h"
31 #include "system.h"
32 #include "panic.h"
34 /* Make this nonzero to enable more elaborate checks on objects */
35 #if defined(DEBUG) || defined(SIMULATOR)
36 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
37 #else
38 #define KERNEL_OBJECT_CHECKS 0
39 #endif
41 #if KERNEL_OBJECT_CHECKS
42 #ifdef SIMULATOR
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
45 #else
46 #define KERNEL_ASSERT(exp, msg...) \
47 ({ if (!({ exp; })) panicf(msg); })
48 #endif
49 #else
50 #define KERNEL_ASSERT(exp, msg...) ({})
51 #endif
53 #if !defined(CPU_PP) || !defined(BOOTLOADER)
54 volatile long current_tick SHAREDDATA_ATTR = 0;
55 #endif
57 /* List of tick tasks - final element always NULL for termination */
58 void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
59 static int num_tick_funcs = 0;
61 extern struct core_entry cores[NUM_CORES];
63 /* This array holds all queues that are initiated. It is used for broadcast. */
64 static struct
66 int count;
67 struct event_queue *queues[MAX_NUM_QUEUES];
68 IF_COP( struct corelock cl; )
69 } all_queues SHAREDBSS_ATTR;
71 /****************************************************************************
72 * Standard kernel stuff
73 ****************************************************************************/
74 void kernel_init(void)
76 /* Init the threading API */
77 init_threads();
79 /* Other processors will not reach this point in a multicore build.
80 * In a single-core build with multiple cores they fall-through and
81 * sleep in cop_main without returning. */
82 if (CURRENT_CORE == CPU)
84 memset(tick_funcs, 0, sizeof(tick_funcs));
85 memset(&all_queues, 0, sizeof(all_queues));
86 corelock_init(&all_queues.cl);
87 tick_start(1000/HZ);
88 #ifdef KDEV_INIT
89 kernel_device_init();
90 #endif
94 /****************************************************************************
95 * Timer tick - Timer initialization and interrupt handler is defined at
96 * the target level.
97 ****************************************************************************/
98 int tick_add_task(void (*f)(void))
100 int oldlevel = disable_irq_save();
102 /* Add a task if there is room */
103 if(num_tick_funcs < MAX_NUM_TICK_TASKS)
105 tick_funcs[num_tick_funcs++] = f;
106 restore_irq(oldlevel);
107 return 0;
110 restore_irq(oldlevel);
111 panicf("Error! tick_add_task(): out of tasks");
112 return -1;
115 int tick_remove_task(void (*f)(void))
117 int i;
118 int oldlevel = disable_irq_save();
120 /* Remove a task if it is there */
121 for(i = 0;i < num_tick_funcs;i++)
123 if(tick_funcs[i] == f)
125 /* Compact function list - propagates NULL-terminator as well */
126 for(; i < num_tick_funcs; i++)
127 tick_funcs[i] = tick_funcs[i+1];
129 num_tick_funcs--;
131 restore_irq(oldlevel);
132 return 0;
136 restore_irq(oldlevel);
137 return -1;
140 /****************************************************************************
141 * Tick-based interval timers/one-shots - be mindful this is not really
142 * intended for continuous timers but for events that need to run for a short
143 * time and be cancelled without further software intervention.
144 ****************************************************************************/
145 #ifdef INCLUDE_TIMEOUT_API
146 static struct timeout *tmo_list = NULL; /* list of active timeout events */
148 /* timeout tick task - calls event handlers when they expire
149 * Event handlers may alter ticks, callback and data during operation.
151 static void timeout_tick(void)
153 unsigned long tick = current_tick;
154 struct timeout *curr, *next;
156 for (curr = tmo_list; curr != NULL; curr = next)
158 next = (struct timeout *)curr->next;
160 if (TIME_BEFORE(tick, curr->expires))
161 continue;
163 /* this event has expired - call callback */
164 if (curr->callback(curr))
165 *(long *)&curr->expires = tick + curr->ticks; /* reload */
166 else
167 timeout_cancel(curr); /* cancel */
171 /* Cancels a timeout callback - can be called from the ISR */
172 void timeout_cancel(struct timeout *tmo)
174 int oldlevel = disable_irq_save();
176 if (tmo_list != NULL)
178 struct timeout *curr = tmo_list;
179 struct timeout *prev = NULL;
181 while (curr != tmo && curr != NULL)
183 prev = curr;
184 curr = (struct timeout *)curr->next;
187 if (curr != NULL)
189 /* in list */
190 if (prev == NULL)
191 tmo_list = (struct timeout *)curr->next;
192 else
193 *(const struct timeout **)&prev->next = curr->next;
195 if (tmo_list == NULL)
196 tick_remove_task(timeout_tick); /* last one - remove task */
198 /* not in list or tmo == NULL */
201 restore_irq(oldlevel);
204 /* Adds a timeout callback - calling with an active timeout resets the
205 interval - can be called from the ISR */
206 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
207 int ticks, intptr_t data)
209 int oldlevel;
210 struct timeout *curr;
212 if (tmo == NULL)
213 return;
215 oldlevel = disable_irq_save();
217 /* see if this one is already registered */
218 curr = tmo_list;
219 while (curr != tmo && curr != NULL)
220 curr = (struct timeout *)curr->next;
222 if (curr == NULL)
224 /* not found - add it */
225 if (tmo_list == NULL)
226 tick_add_task(timeout_tick); /* first one - add task */
228 *(struct timeout **)&tmo->next = tmo_list;
229 tmo_list = tmo;
232 tmo->callback = callback;
233 tmo->ticks = ticks;
234 tmo->data = data;
235 *(long *)&tmo->expires = current_tick + ticks;
237 restore_irq(oldlevel);
240 #endif /* INCLUDE_TIMEOUT_API */
242 /****************************************************************************
243 * Thread stuff
244 ****************************************************************************/
245 void sleep(int ticks)
247 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
248 volatile int counter;
249 TCON &= ~(1 << 20); // stop timer 4
250 // TODO: this constant depends on dividers settings inherited from
251 // firmware. Set them explicitly somwhere.
252 TCNTB4 = 12193 * ticks / HZ;
253 TCON |= 1 << 21; // set manual bit
254 TCON &= ~(1 << 21); // reset manual bit
255 TCON &= ~(1 << 22); //autoreload Off
256 TCON |= (1 << 20); // start timer 4
257 do {
258 counter = TCNTO4;
259 } while(counter > 0);
261 #elif defined(CPU_PP) && defined(BOOTLOADER)
262 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
263 while (TIME_BEFORE(USEC_TIMER, stop))
264 switch_thread();
265 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
266 /* hacky.. */
267 long sleep_ticks = current_tick + ticks + 1;
268 while (sleep_ticks > current_tick)
269 switch_thread();
270 #else
271 disable_irq();
272 sleep_thread(ticks);
273 switch_thread();
274 #endif
277 void yield(void)
279 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
280 /* Some targets don't like yielding in the bootloader */
281 #else
282 switch_thread();
283 #endif
286 /****************************************************************************
287 * Queue handling stuff
288 ****************************************************************************/
290 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
291 /****************************************************************************
292 * Sender thread queue structure that aids implementation of priority
293 * inheritance on queues because the send list structure is the same as
294 * for all other kernel objects:
296 * Example state:
297 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
298 * E3 was posted with queue_post
299 * 4 events remain enqueued (E1-E4)
301 * rd wr
302 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
303 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
304 * \/ \/ \/
305 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
306 * q->send->curr_sender: /\
308 * Thread has E0 in its own struct queue_event.
310 ****************************************************************************/
312 /* Puts the specified return value in the waiting thread's return value
313 * and wakes the thread.
315 * A sender should be confirmed to exist before calling which makes it
316 * more efficent to reject the majority of cases that don't need this
317 * called.
319 static void queue_release_sender(struct thread_entry **sender,
320 intptr_t retval)
322 struct thread_entry *thread = *sender;
324 *sender = NULL; /* Clear slot. */
325 thread->wakeup_ext_cb = NULL; /* Clear callback. */
326 thread->retval = retval; /* Assign thread-local return value. */
327 *thread->bqp = thread; /* Move blocking queue head to thread since
328 wakeup_thread wakes the first thread in
329 the list. */
330 wakeup_thread(thread->bqp);
333 /* Releases any waiting threads that are queued with queue_send -
334 * reply with 0.
336 static void queue_release_all_senders(struct event_queue *q)
338 if(q->send)
340 unsigned int i;
341 for(i = q->read; i != q->write; i++)
343 struct thread_entry **spp =
344 &q->send->senders[i & QUEUE_LENGTH_MASK];
346 if(*spp)
348 queue_release_sender(spp, 0);
354 /* Callback to do extra forced removal steps from sender list in addition
355 * to the normal blocking queue removal and priority dis-inherit */
356 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
358 *((struct thread_entry **)thread->retval) = NULL;
359 thread->wakeup_ext_cb = NULL;
360 thread->retval = 0;
363 /* Enables queue_send on the specified queue - caller allocates the extra
364 * data structure. Only queues which are taken to be owned by a thread should
365 * enable this however an official owner is not compulsory but must be
366 * specified for priority inheritance to operate.
368 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
369 * messages results in an undefined order of message replies.
371 void queue_enable_queue_send(struct event_queue *q,
372 struct queue_sender_list *send,
373 struct thread_entry *owner)
375 int oldlevel = disable_irq_save();
376 corelock_lock(&q->cl);
378 if(send != NULL && q->send == NULL)
380 memset(send, 0, sizeof(*send));
381 #ifdef HAVE_PRIORITY_SCHEDULING
382 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
383 send->blocker.priority = PRIORITY_IDLE;
384 send->blocker.thread = owner;
385 if(owner != NULL)
386 q->blocker_p = &send->blocker;
387 #endif
388 q->send = send;
391 corelock_unlock(&q->cl);
392 restore_irq(oldlevel);
394 (void)owner;
397 /* Unblock a blocked thread at a given event index */
398 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
399 unsigned int i)
401 if(send)
403 struct thread_entry **spp = &send->senders[i];
405 if(*spp)
407 queue_release_sender(spp, 0);
412 /* Perform the auto-reply sequence */
413 static inline void queue_do_auto_reply(struct queue_sender_list *send)
415 if(send && send->curr_sender)
417 /* auto-reply */
418 queue_release_sender(&send->curr_sender, 0);
422 /* Moves waiting thread's refrence from the senders array to the
423 * current_sender which represents the thread waiting for a reponse to the
424 * last message removed from the queue. This also protects the thread from
425 * being bumped due to overflow which would not be a valid action since its
426 * message _is_ being processed at this point. */
427 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
428 unsigned int rd)
430 if(send)
432 struct thread_entry **spp = &send->senders[rd];
434 if(*spp)
436 /* Move thread reference from array to the next thread
437 that queue_reply will release */
438 send->curr_sender = *spp;
439 (*spp)->retval = (intptr_t)spp;
440 *spp = NULL;
442 /* else message was posted asynchronously with queue_post */
445 #else
446 /* Empty macros for when synchoronous sending is not made */
447 #define queue_release_all_senders(q)
448 #define queue_do_unblock_sender(send, i)
449 #define queue_do_auto_reply(send)
450 #define queue_do_fetch_sender(send, rd)
451 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
453 /* Queue must not be available for use during this call */
454 void queue_init(struct event_queue *q, bool register_queue)
456 int oldlevel = disable_irq_save();
458 if(register_queue)
460 corelock_lock(&all_queues.cl);
463 corelock_init(&q->cl);
464 q->queue = NULL;
465 q->read = 0;
466 q->write = 0;
467 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
468 q->send = NULL; /* No message sending by default */
469 IF_PRIO( q->blocker_p = NULL; )
470 #endif
472 if(register_queue)
474 if(all_queues.count >= MAX_NUM_QUEUES)
476 panicf("queue_init->out of queues");
478 /* Add it to the all_queues array */
479 all_queues.queues[all_queues.count++] = q;
480 corelock_unlock(&all_queues.cl);
483 restore_irq(oldlevel);
486 /* Queue must not be available for use during this call */
487 void queue_delete(struct event_queue *q)
489 int oldlevel;
490 int i;
492 oldlevel = disable_irq_save();
493 corelock_lock(&all_queues.cl);
494 corelock_lock(&q->cl);
496 /* Find the queue to be deleted */
497 for(i = 0;i < all_queues.count;i++)
499 if(all_queues.queues[i] == q)
501 /* Move the following queues up in the list */
502 all_queues.count--;
504 for(;i < all_queues.count;i++)
506 all_queues.queues[i] = all_queues.queues[i+1];
509 break;
513 corelock_unlock(&all_queues.cl);
515 /* Release thread(s) waiting on queue head */
516 thread_queue_wake(&q->queue);
518 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
519 if(q->send)
521 /* Release threads waiting for replies */
522 queue_release_all_senders(q);
524 /* Reply to any dequeued message waiting for one */
525 queue_do_auto_reply(q->send);
527 q->send = NULL;
528 IF_PRIO( q->blocker_p = NULL; )
530 #endif
532 q->read = 0;
533 q->write = 0;
535 corelock_unlock(&q->cl);
536 restore_irq(oldlevel);
539 /* NOTE: multiple threads waiting on a queue head cannot have a well-
540 defined release order if timeouts are used. If multiple threads must
541 access the queue head, use a dispatcher or queue_wait only. */
542 void queue_wait(struct event_queue *q, struct queue_event *ev)
544 int oldlevel;
545 unsigned int rd;
547 #ifdef HAVE_PRIORITY_SCHEDULING
548 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
549 QUEUE_GET_THREAD(q) == thread_get_current(),
550 "queue_wait->wrong thread\n");
551 #endif
553 oldlevel = disable_irq_save();
554 corelock_lock(&q->cl);
556 /* auto-reply */
557 queue_do_auto_reply(q->send);
559 if (q->read == q->write)
561 struct thread_entry *current = cores[CURRENT_CORE].running;
565 IF_COP( current->obj_cl = &q->cl; )
566 current->bqp = &q->queue;
568 block_thread(current);
570 corelock_unlock(&q->cl);
571 switch_thread();
573 oldlevel = disable_irq_save();
574 corelock_lock(&q->cl);
576 /* A message that woke us could now be gone */
577 while (q->read == q->write);
580 rd = q->read++ & QUEUE_LENGTH_MASK;
581 *ev = q->events[rd];
583 /* Get data for a waiting thread if one */
584 queue_do_fetch_sender(q->send, rd);
586 corelock_unlock(&q->cl);
587 restore_irq(oldlevel);
590 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
592 int oldlevel;
594 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
595 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
596 QUEUE_GET_THREAD(q) == thread_get_current(),
597 "queue_wait_w_tmo->wrong thread\n");
598 #endif
600 oldlevel = disable_irq_save();
601 corelock_lock(&q->cl);
603 /* Auto-reply */
604 queue_do_auto_reply(q->send);
606 if (q->read == q->write && ticks > 0)
608 struct thread_entry *current = cores[CURRENT_CORE].running;
610 IF_COP( current->obj_cl = &q->cl; )
611 current->bqp = &q->queue;
613 block_thread_w_tmo(current, ticks);
614 corelock_unlock(&q->cl);
616 switch_thread();
618 oldlevel = disable_irq_save();
619 corelock_lock(&q->cl);
622 /* no worry about a removed message here - status is checked inside
623 locks - perhaps verify if timeout or false alarm */
624 if (q->read != q->write)
626 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
627 *ev = q->events[rd];
628 /* Get data for a waiting thread if one */
629 queue_do_fetch_sender(q->send, rd);
631 else
633 ev->id = SYS_TIMEOUT;
636 corelock_unlock(&q->cl);
637 restore_irq(oldlevel);
640 void queue_post(struct event_queue *q, long id, intptr_t data)
642 int oldlevel;
643 unsigned int wr;
645 oldlevel = disable_irq_save();
646 corelock_lock(&q->cl);
648 wr = q->write++ & QUEUE_LENGTH_MASK;
650 q->events[wr].id = id;
651 q->events[wr].data = data;
653 /* overflow protect - unblock any thread waiting at this index */
654 queue_do_unblock_sender(q->send, wr);
656 /* Wakeup a waiting thread if any */
657 wakeup_thread(&q->queue);
659 corelock_unlock(&q->cl);
660 restore_irq(oldlevel);
663 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
664 /* IRQ handlers are not allowed use of this function - we only aim to
665 protect the queue integrity by turning them off. */
666 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
668 int oldlevel;
669 unsigned int wr;
671 oldlevel = disable_irq_save();
672 corelock_lock(&q->cl);
674 wr = q->write++ & QUEUE_LENGTH_MASK;
676 q->events[wr].id = id;
677 q->events[wr].data = data;
679 if(q->send)
681 struct queue_sender_list *send = q->send;
682 struct thread_entry **spp = &send->senders[wr];
683 struct thread_entry *current = cores[CURRENT_CORE].running;
685 if(*spp)
687 /* overflow protect - unblock any thread waiting at this index */
688 queue_release_sender(spp, 0);
691 /* Wakeup a waiting thread if any */
692 wakeup_thread(&q->queue);
694 /* Save thread in slot, add to list and wait for reply */
695 *spp = current;
696 IF_COP( current->obj_cl = &q->cl; )
697 IF_PRIO( current->blocker = q->blocker_p; )
698 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
699 current->retval = (intptr_t)spp;
700 current->bqp = &send->list;
702 block_thread(current);
704 corelock_unlock(&q->cl);
705 switch_thread();
707 return current->retval;
710 /* Function as queue_post if sending is not enabled */
711 wakeup_thread(&q->queue);
713 corelock_unlock(&q->cl);
714 restore_irq(oldlevel);
716 return 0;
719 #if 0 /* not used now but probably will be later */
720 /* Query if the last message dequeued was added by queue_send or not */
721 bool queue_in_queue_send(struct event_queue *q)
723 bool in_send;
725 #if NUM_CORES > 1
726 int oldlevel = disable_irq_save();
727 corelock_lock(&q->cl);
728 #endif
730 in_send = q->send && q->send->curr_sender;
732 #if NUM_CORES > 1
733 corelock_unlock(&q->cl);
734 restore_irq(oldlevel);
735 #endif
737 return in_send;
739 #endif
741 /* Replies with retval to the last dequeued message sent with queue_send */
742 void queue_reply(struct event_queue *q, intptr_t retval)
744 if(q->send && q->send->curr_sender)
746 int oldlevel = disable_irq_save();
747 corelock_lock(&q->cl);
748 /* Double-check locking */
749 IF_COP( if(q->send && q->send->curr_sender) )
751 queue_release_sender(&q->send->curr_sender, retval);
754 corelock_unlock(&q->cl);
755 restore_irq(oldlevel);
759 bool queue_peek(struct event_queue *q, struct queue_event *ev)
761 if(q->read == q->write)
762 return false;
764 bool have_msg = false;
766 int oldlevel = disable_irq_save();
767 corelock_lock(&q->cl);
769 if(q->read != q->write)
771 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
772 have_msg = true;
775 corelock_unlock(&q->cl);
776 restore_irq(oldlevel);
778 return have_msg;
780 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
782 /* Poll queue to see if a message exists - careful in using the result if
783 * queue_remove_from_head is called when messages are posted - possibly use
784 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
785 * unsignals the queue may cause an unwanted block */
786 bool queue_empty(const struct event_queue* q)
788 return ( q->read == q->write );
791 void queue_clear(struct event_queue* q)
793 int oldlevel;
795 oldlevel = disable_irq_save();
796 corelock_lock(&q->cl);
798 /* Release all threads waiting in the queue for a reply -
799 dequeued sent message will be handled by owning thread */
800 queue_release_all_senders(q);
802 q->read = 0;
803 q->write = 0;
805 corelock_unlock(&q->cl);
806 restore_irq(oldlevel);
809 void queue_remove_from_head(struct event_queue *q, long id)
811 int oldlevel;
813 oldlevel = disable_irq_save();
814 corelock_lock(&q->cl);
816 while(q->read != q->write)
818 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
820 if(q->events[rd].id != id)
822 break;
825 /* Release any thread waiting on this message */
826 queue_do_unblock_sender(q->send, rd);
828 q->read++;
831 corelock_unlock(&q->cl);
832 restore_irq(oldlevel);
836 * The number of events waiting in the queue.
838 * @param struct of event_queue
839 * @return number of events in the queue
841 int queue_count(const struct event_queue *q)
843 return q->write - q->read;
846 int queue_broadcast(long id, intptr_t data)
848 int i;
850 #if NUM_CORES > 1
851 int oldlevel = disable_irq_save();
852 corelock_lock(&all_queues.cl);
853 #endif
855 for(i = 0;i < all_queues.count;i++)
857 queue_post(all_queues.queues[i], id, data);
860 #if NUM_CORES > 1
861 corelock_unlock(&all_queues.cl);
862 restore_irq(oldlevel);
863 #endif
865 return i;
868 /****************************************************************************
869 * Simple mutex functions ;)
870 ****************************************************************************/
872 /* Initialize a mutex object - call before any use and do not call again once
873 * the object is available to other threads */
874 void mutex_init(struct mutex *m)
876 corelock_init(&m->cl);
877 m->queue = NULL;
878 m->count = 0;
879 m->locked = 0;
880 MUTEX_SET_THREAD(m, NULL);
881 #ifdef HAVE_PRIORITY_SCHEDULING
882 m->blocker.priority = PRIORITY_IDLE;
883 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
884 m->no_preempt = false;
885 #endif
888 /* Gain ownership of a mutex object or block until it becomes free */
889 void mutex_lock(struct mutex *m)
891 const unsigned int core = CURRENT_CORE;
892 struct thread_entry *current = cores[core].running;
894 if(current == MUTEX_GET_THREAD(m))
896 /* current thread already owns this mutex */
897 m->count++;
898 return;
901 /* lock out other cores */
902 corelock_lock(&m->cl);
904 if(m->locked == 0)
906 /* lock is open */
907 MUTEX_SET_THREAD(m, current);
908 m->locked = 1;
909 corelock_unlock(&m->cl);
910 return;
913 /* block until the lock is open... */
914 IF_COP( current->obj_cl = &m->cl; )
915 IF_PRIO( current->blocker = &m->blocker; )
916 current->bqp = &m->queue;
918 disable_irq();
919 block_thread(current);
921 corelock_unlock(&m->cl);
923 /* ...and turn control over to next thread */
924 switch_thread();
927 /* Release ownership of a mutex object - only owning thread must call this */
928 void mutex_unlock(struct mutex *m)
930 /* unlocker not being the owner is an unlocking violation */
931 KERNEL_ASSERT(MUTEX_GET_THREAD(m) == thread_get_current(),
932 "mutex_unlock->wrong thread (%s != %s)\n",
933 MUTEX_GET_THREAD(m)->name,
934 thread_get_current()->name);
936 if(m->count > 0)
938 /* this thread still owns lock */
939 m->count--;
940 return;
943 /* lock out other cores */
944 corelock_lock(&m->cl);
946 /* transfer to next queued thread if any */
947 if(m->queue == NULL)
949 /* no threads waiting - open the lock */
950 MUTEX_SET_THREAD(m, NULL);
951 m->locked = 0;
952 corelock_unlock(&m->cl);
953 return;
955 else
957 const int oldlevel = disable_irq_save();
958 /* Tranfer of owning thread is handled in the wakeup protocol
959 * if priorities are enabled otherwise just set it from the
960 * queue head. */
961 IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); )
962 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
963 restore_irq(oldlevel);
965 corelock_unlock(&m->cl);
967 #ifdef HAVE_PRIORITY_SCHEDULING
968 if((result & THREAD_SWITCH) && !m->no_preempt)
969 switch_thread();
970 #endif
974 /****************************************************************************
975 * Simpl-er mutex functions ;)
976 ****************************************************************************/
977 #if NUM_CORES > 1
978 void spinlock_init(struct spinlock *l)
980 corelock_init(&l->cl);
981 l->thread = NULL;
982 l->count = 0;
985 void spinlock_lock(struct spinlock *l)
987 const unsigned int core = CURRENT_CORE;
988 struct thread_entry *current = cores[core].running;
990 if(l->thread == current)
992 /* current core already owns it */
993 l->count++;
994 return;
997 /* lock against other processor cores */
998 corelock_lock(&l->cl);
1000 /* take ownership */
1001 l->thread = current;
1004 void spinlock_unlock(struct spinlock *l)
1006 /* unlocker not being the owner is an unlocking violation */
1007 KERNEL_ASSERT(l->thread == thread_get_current(),
1008 "spinlock_unlock->wrong thread\n");
1010 if(l->count > 0)
1012 /* this core still owns lock */
1013 l->count--;
1014 return;
1017 /* clear owner */
1018 l->thread = NULL;
1020 /* release lock */
1021 corelock_unlock(&l->cl);
1023 #endif /* NUM_CORES > 1 */
1025 /****************************************************************************
1026 * Simple semaphore functions ;)
1027 ****************************************************************************/
1028 #ifdef HAVE_SEMAPHORE_OBJECTS
1029 void semaphore_init(struct semaphore *s, int max, int start)
1031 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1032 "semaphore_init->inv arg\n");
1033 s->queue = NULL;
1034 s->max = max;
1035 s->count = start;
1036 corelock_init(&s->cl);
1039 void semaphore_wait(struct semaphore *s)
1041 struct thread_entry *current;
1043 corelock_lock(&s->cl);
1045 if(--s->count >= 0)
1047 /* wait satisfied */
1048 corelock_unlock(&s->cl);
1049 return;
1052 /* too many waits - block until dequeued... */
1053 current = cores[CURRENT_CORE].running;
1055 IF_COP( current->obj_cl = &s->cl; )
1056 current->bqp = &s->queue;
1058 disable_irq();
1059 block_thread(current);
1061 corelock_unlock(&s->cl);
1063 /* ...and turn control over to next thread */
1064 switch_thread();
1067 void semaphore_release(struct semaphore *s)
1069 IF_PRIO( unsigned int result = THREAD_NONE; )
1071 corelock_lock(&s->cl);
1073 if(s->count < s->max && ++s->count <= 0)
1075 /* there should be threads in this queue */
1076 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1077 /* a thread was queued - wake it up */
1078 int oldlevel = disable_irq_save();
1079 IF_PRIO( result = ) wakeup_thread(&s->queue);
1080 restore_irq(oldlevel);
1083 corelock_unlock(&s->cl);
1085 #ifdef HAVE_PRIORITY_SCHEDULING
1086 if(result & THREAD_SWITCH)
1087 switch_thread();
1088 #endif
1090 #endif /* HAVE_SEMAPHORE_OBJECTS */
1092 #ifdef HAVE_WAKEUP_OBJECTS
1093 /****************************************************************************
1094 * Lightweight IRQ-compatible wakeup object
1097 /* Initialize the wakeup object */
1098 void wakeup_init(struct wakeup *w)
1100 w->queue = NULL;
1101 w->signalled = 0;
1102 IF_COP( corelock_init(&w->cl); )
1105 /* Wait for a signal blocking indefinitely or for a specified period */
1106 int wakeup_wait(struct wakeup *w, int timeout)
1108 int ret = OBJ_WAIT_SUCCEEDED; /* Presume success */
1109 int oldlevel = disable_irq_save();
1111 corelock_lock(&w->cl);
1113 if(w->signalled == 0 && timeout != TIMEOUT_NOBLOCK)
1115 struct thread_entry * current = cores[CURRENT_CORE].running;
1117 IF_COP( current->obj_cl = &w->cl; )
1118 current->bqp = &w->queue;
1120 if (timeout != TIMEOUT_BLOCK)
1121 block_thread_w_tmo(current, timeout);
1122 else
1123 block_thread(current);
1125 corelock_unlock(&w->cl);
1126 switch_thread();
1128 oldlevel = disable_irq_save();
1129 corelock_lock(&w->cl);
1132 if(w->signalled == 0)
1134 /* Timed-out or failed */
1135 ret = (timeout != TIMEOUT_BLOCK) ?
1136 OBJ_WAIT_TIMEDOUT : OBJ_WAIT_FAILED;
1139 w->signalled = 0; /* Reset */
1141 corelock_unlock(&w->cl);
1142 restore_irq(oldlevel);
1144 return ret;
1147 /* Signal the thread waiting or leave the signal if the thread hasn't
1148 * waited yet.
1150 * returns THREAD_NONE or THREAD_OK
1152 int wakeup_signal(struct wakeup *w)
1154 int oldlevel = disable_irq_save();
1155 int ret;
1157 corelock_lock(&w->cl);
1159 w->signalled = 1;
1160 ret = wakeup_thread(&w->queue);
1162 corelock_unlock(&w->cl);
1163 restore_irq(oldlevel);
1165 return ret;
1167 #endif /* HAVE_WAKEUP_OBJECTS */