Use API call instead of accessing a global variable for receiving the current thread.
[kugel-rb.git] / firmware / kernel.c
blob27f3b0d08bb48b54b5cdde5697084eab746bdf1f
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #ifdef SIMULATOR
26 #include "system-sdl.h"
27 #include "debug.h"
28 #endif
29 #include "thread.h"
30 #include "cpu.h"
31 #include "system.h"
32 #include "panic.h"
34 /* Make this nonzero to enable more elaborate checks on objects */
35 #if defined(DEBUG) || defined(SIMULATOR)
36 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
37 #else
38 #define KERNEL_OBJECT_CHECKS 0
39 #endif
41 #if KERNEL_OBJECT_CHECKS
42 #ifdef SIMULATOR
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
45 #else
46 #define KERNEL_ASSERT(exp, msg...) \
47 ({ if (!({ exp; })) panicf(msg); })
48 #endif
49 #else
50 #define KERNEL_ASSERT(exp, msg...) ({})
51 #endif
53 #if !defined(CPU_PP) || !defined(BOOTLOADER)
54 volatile long current_tick SHAREDDATA_ATTR = 0;
55 #endif
57 /* List of tick tasks - final element always NULL for termination */
58 void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
60 /* This array holds all queues that are initiated. It is used for broadcast. */
61 static struct
63 struct event_queue *queues[MAX_NUM_QUEUES+1];
64 IF_COP( struct corelock cl; )
65 } all_queues SHAREDBSS_ATTR;
67 /****************************************************************************
68 * Common utilities
69 ****************************************************************************/
71 /* Find a pointer in a pointer array. Returns the addess of the element if
72 * found or the address of the terminating NULL otherwise. */
73 static void ** find_array_ptr(void **arr, void *ptr)
75 void *curr;
76 for(curr = *arr; curr != NULL && curr != ptr; curr = *(++arr));
77 return arr;
80 /* Remove a pointer from a pointer array if it exists. Compacts it so that
81 * no gaps exist. Returns 0 on success and -1 if the element wasn't found. */
82 static int remove_array_ptr(void **arr, void *ptr)
84 void *curr;
85 arr = find_array_ptr(arr, ptr);
87 if(*arr == NULL)
88 return -1;
90 /* Found. Slide up following items. */
93 void **arr1 = arr + 1;
94 *arr++ = curr = *arr1;
96 while(curr != NULL);
98 return 0;
101 /****************************************************************************
102 * Standard kernel stuff
103 ****************************************************************************/
104 void kernel_init(void)
106 /* Init the threading API */
107 init_threads();
109 /* Other processors will not reach this point in a multicore build.
110 * In a single-core build with multiple cores they fall-through and
111 * sleep in cop_main without returning. */
112 if (CURRENT_CORE == CPU)
114 memset(tick_funcs, 0, sizeof(tick_funcs));
115 memset(&all_queues, 0, sizeof(all_queues));
116 corelock_init(&all_queues.cl);
117 tick_start(1000/HZ);
118 #ifdef KDEV_INIT
119 kernel_device_init();
120 #endif
124 /****************************************************************************
125 * Timer tick - Timer initialization and interrupt handler is defined at
126 * the target level.
127 ****************************************************************************/
128 int tick_add_task(void (*f)(void))
130 int oldlevel = disable_irq_save();
131 void **arr = (void **)tick_funcs;
132 void **p = find_array_ptr(arr, f);
134 /* Add a task if there is room */
135 if(p - arr < MAX_NUM_TICK_TASKS)
137 *p = f; /* If already in list, no problem. */
139 else
141 panicf("Error! tick_add_task(): out of tasks");
144 restore_irq(oldlevel);
145 return 0;
148 int tick_remove_task(void (*f)(void))
150 int oldlevel = disable_irq_save();
151 int rc = remove_array_ptr((void **)tick_funcs, f);
152 restore_irq(oldlevel);
153 return rc;
156 /****************************************************************************
157 * Tick-based interval timers/one-shots - be mindful this is not really
158 * intended for continuous timers but for events that need to run for a short
159 * time and be cancelled without further software intervention.
160 ****************************************************************************/
161 #ifdef INCLUDE_TIMEOUT_API
162 /* list of active timeout events */
163 static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
165 /* timeout tick task - calls event handlers when they expire
166 * Event handlers may alter expiration, callback and data during operation.
168 static void timeout_tick(void)
170 unsigned long tick = current_tick;
171 struct timeout **p = tmo_list;
172 struct timeout *curr;
174 for(curr = *p; curr != NULL; curr = *(++p))
176 int ticks;
178 if(TIME_BEFORE(tick, curr->expires))
179 continue;
181 /* this event has expired - call callback */
182 ticks = curr->callback(curr);
183 if(ticks > 0)
185 curr->expires = tick + ticks; /* reload */
187 else
189 timeout_cancel(curr); /* cancel */
194 /* Cancels a timeout callback - can be called from the ISR */
195 void timeout_cancel(struct timeout *tmo)
197 int oldlevel = disable_irq_save();
198 void **arr = (void **)tmo_list;
199 int rc = remove_array_ptr(arr, tmo);
201 if(rc >= 0 && *arr == NULL)
203 tick_remove_task(timeout_tick); /* Last one - remove task */
206 restore_irq(oldlevel);
209 /* Adds a timeout callback - calling with an active timeout resets the
210 interval - can be called from the ISR */
211 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
212 int ticks, intptr_t data)
214 int oldlevel;
215 void **arr, **p;
217 if(tmo == NULL)
218 return;
220 oldlevel = disable_irq_save();
222 /* See if this one is already registered */
223 arr = (void **)tmo_list;
224 p = find_array_ptr(arr, tmo);
226 if(p - arr < MAX_NUM_TIMEOUTS)
228 /* Vacancy */
229 if(*p == NULL)
231 /* Not present */
232 if(*arr == NULL)
234 tick_add_task(timeout_tick); /* First one - add task */
237 *p = tmo;
240 tmo->callback = callback;
241 tmo->data = data;
242 tmo->expires = current_tick + ticks;
245 restore_irq(oldlevel);
248 #endif /* INCLUDE_TIMEOUT_API */
250 /****************************************************************************
251 * Thread stuff
252 ****************************************************************************/
253 void sleep(int ticks)
255 #if defined(CPU_PP) && defined(BOOTLOADER)
256 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
257 while (TIME_BEFORE(USEC_TIMER, stop))
258 switch_thread();
259 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
260 /* hacky.. */
261 long sleep_ticks = current_tick + ticks + 1;
262 while (TIME_BEFORE(current_tick, sleep_ticks))
263 switch_thread();
264 #else
265 disable_irq();
266 sleep_thread(ticks);
267 switch_thread();
268 #endif
271 void yield(void)
273 #if ((defined(TATUNG_TPJ1022)) && defined(BOOTLOADER))
274 /* Some targets don't like yielding in the bootloader */
275 #else
276 switch_thread();
277 #endif
280 /****************************************************************************
281 * Queue handling stuff
282 ****************************************************************************/
284 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
285 /****************************************************************************
286 * Sender thread queue structure that aids implementation of priority
287 * inheritance on queues because the send list structure is the same as
288 * for all other kernel objects:
290 * Example state:
291 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
292 * E3 was posted with queue_post
293 * 4 events remain enqueued (E1-E4)
295 * rd wr
296 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
297 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
298 * \/ \/ \/
299 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
300 * q->send->curr_sender: /\
302 * Thread has E0 in its own struct queue_event.
304 ****************************************************************************/
306 /* Puts the specified return value in the waiting thread's return value
307 * and wakes the thread.
309 * A sender should be confirmed to exist before calling which makes it
310 * more efficent to reject the majority of cases that don't need this
311 * called.
313 static void queue_release_sender(struct thread_entry **sender,
314 intptr_t retval)
316 struct thread_entry *thread = *sender;
318 *sender = NULL; /* Clear slot. */
319 thread->wakeup_ext_cb = NULL; /* Clear callback. */
320 thread->retval = retval; /* Assign thread-local return value. */
321 *thread->bqp = thread; /* Move blocking queue head to thread since
322 wakeup_thread wakes the first thread in
323 the list. */
324 wakeup_thread(thread->bqp);
327 /* Releases any waiting threads that are queued with queue_send -
328 * reply with 0.
330 static void queue_release_all_senders(struct event_queue *q)
332 if(q->send)
334 unsigned int i;
335 for(i = q->read; i != q->write; i++)
337 struct thread_entry **spp =
338 &q->send->senders[i & QUEUE_LENGTH_MASK];
340 if(*spp)
342 queue_release_sender(spp, 0);
348 /* Callback to do extra forced removal steps from sender list in addition
349 * to the normal blocking queue removal and priority dis-inherit */
350 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
352 *((struct thread_entry **)thread->retval) = NULL;
353 thread->wakeup_ext_cb = NULL;
354 thread->retval = 0;
357 /* Enables queue_send on the specified queue - caller allocates the extra
358 * data structure. Only queues which are taken to be owned by a thread should
359 * enable this however an official owner is not compulsory but must be
360 * specified for priority inheritance to operate.
362 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
363 * messages results in an undefined order of message replies or possible default
364 * replies if two or more waits happen before a reply is done.
366 void queue_enable_queue_send(struct event_queue *q,
367 struct queue_sender_list *send,
368 unsigned int owner_id)
370 int oldlevel = disable_irq_save();
371 corelock_lock(&q->cl);
373 if(send != NULL && q->send == NULL)
375 memset(send, 0, sizeof(*send));
376 #ifdef HAVE_PRIORITY_SCHEDULING
377 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
378 send->blocker.priority = PRIORITY_IDLE;
379 if(owner_id != 0)
381 send->blocker.thread = thread_id_entry(owner_id);
382 q->blocker_p = &send->blocker;
384 #endif
385 q->send = send;
388 corelock_unlock(&q->cl);
389 restore_irq(oldlevel);
391 (void)owner_id;
394 /* Unblock a blocked thread at a given event index */
395 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
396 unsigned int i)
398 if(send)
400 struct thread_entry **spp = &send->senders[i];
402 if(UNLIKELY(*spp))
404 queue_release_sender(spp, 0);
409 /* Perform the auto-reply sequence */
410 static inline void queue_do_auto_reply(struct queue_sender_list *send)
412 if(send && send->curr_sender)
414 /* auto-reply */
415 queue_release_sender(&send->curr_sender, 0);
419 /* Moves waiting thread's refrence from the senders array to the
420 * current_sender which represents the thread waiting for a reponse to the
421 * last message removed from the queue. This also protects the thread from
422 * being bumped due to overflow which would not be a valid action since its
423 * message _is_ being processed at this point. */
424 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
425 unsigned int rd)
427 if(send)
429 struct thread_entry **spp = &send->senders[rd];
431 if(*spp)
433 /* Move thread reference from array to the next thread
434 that queue_reply will release */
435 send->curr_sender = *spp;
436 (*spp)->retval = (intptr_t)spp;
437 *spp = NULL;
439 /* else message was posted asynchronously with queue_post */
442 #else
443 /* Empty macros for when synchoronous sending is not made */
444 #define queue_release_all_senders(q)
445 #define queue_do_unblock_sender(send, i)
446 #define queue_do_auto_reply(send)
447 #define queue_do_fetch_sender(send, rd)
448 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
450 /* Queue must not be available for use during this call */
451 void queue_init(struct event_queue *q, bool register_queue)
453 int oldlevel = disable_irq_save();
455 if(register_queue)
457 corelock_lock(&all_queues.cl);
460 corelock_init(&q->cl);
461 q->queue = NULL;
462 q->read = 0;
463 q->write = 0;
464 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
465 q->send = NULL; /* No message sending by default */
466 IF_PRIO( q->blocker_p = NULL; )
467 #endif
469 if(register_queue)
471 void **queues = (void **)all_queues.queues;
472 void **p = find_array_ptr(queues, q);
474 if(p - queues >= MAX_NUM_QUEUES)
476 panicf("queue_init->out of queues");
479 if(*p == NULL)
481 /* Add it to the all_queues array */
482 *p = q;
483 corelock_unlock(&all_queues.cl);
487 restore_irq(oldlevel);
490 /* Queue must not be available for use during this call */
491 void queue_delete(struct event_queue *q)
493 int oldlevel = disable_irq_save();
494 corelock_lock(&all_queues.cl);
495 corelock_lock(&q->cl);
497 /* Remove the queue if registered */
498 remove_array_ptr((void **)all_queues.queues, q);
500 corelock_unlock(&all_queues.cl);
502 /* Release thread(s) waiting on queue head */
503 thread_queue_wake(&q->queue);
505 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
506 if(q->send)
508 /* Release threads waiting for replies */
509 queue_release_all_senders(q);
511 /* Reply to any dequeued message waiting for one */
512 queue_do_auto_reply(q->send);
514 q->send = NULL;
515 IF_PRIO( q->blocker_p = NULL; )
517 #endif
519 q->read = 0;
520 q->write = 0;
522 corelock_unlock(&q->cl);
523 restore_irq(oldlevel);
526 /* NOTE: multiple threads waiting on a queue head cannot have a well-
527 defined release order if timeouts are used. If multiple threads must
528 access the queue head, use a dispatcher or queue_wait only. */
529 void queue_wait(struct event_queue *q, struct queue_event *ev)
531 int oldlevel;
532 unsigned int rd;
534 #ifdef HAVE_PRIORITY_SCHEDULING
535 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
536 QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
537 "queue_wait->wrong thread\n");
538 #endif
540 oldlevel = disable_irq_save();
541 corelock_lock(&q->cl);
543 /* auto-reply */
544 queue_do_auto_reply(q->send);
546 if (q->read == q->write)
548 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
552 IF_COP( current->obj_cl = &q->cl; )
553 current->bqp = &q->queue;
555 block_thread(current);
557 corelock_unlock(&q->cl);
558 switch_thread();
560 oldlevel = disable_irq_save();
561 corelock_lock(&q->cl);
563 /* A message that woke us could now be gone */
564 while (q->read == q->write);
567 rd = q->read++ & QUEUE_LENGTH_MASK;
568 *ev = q->events[rd];
570 /* Get data for a waiting thread if one */
571 queue_do_fetch_sender(q->send, rd);
573 corelock_unlock(&q->cl);
574 restore_irq(oldlevel);
577 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
579 int oldlevel;
581 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
582 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
583 QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
584 "queue_wait_w_tmo->wrong thread\n");
585 #endif
587 oldlevel = disable_irq_save();
588 corelock_lock(&q->cl);
590 /* Auto-reply */
591 queue_do_auto_reply(q->send);
593 if (q->read == q->write && ticks > 0)
595 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
597 IF_COP( current->obj_cl = &q->cl; )
598 current->bqp = &q->queue;
600 block_thread_w_tmo(current, ticks);
601 corelock_unlock(&q->cl);
603 switch_thread();
605 oldlevel = disable_irq_save();
606 corelock_lock(&q->cl);
609 /* no worry about a removed message here - status is checked inside
610 locks - perhaps verify if timeout or false alarm */
611 if (q->read != q->write)
613 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
614 *ev = q->events[rd];
615 /* Get data for a waiting thread if one */
616 queue_do_fetch_sender(q->send, rd);
618 else
620 ev->id = SYS_TIMEOUT;
623 corelock_unlock(&q->cl);
624 restore_irq(oldlevel);
627 void queue_post(struct event_queue *q, long id, intptr_t data)
629 int oldlevel;
630 unsigned int wr;
632 oldlevel = disable_irq_save();
633 corelock_lock(&q->cl);
635 wr = q->write++ & QUEUE_LENGTH_MASK;
637 q->events[wr].id = id;
638 q->events[wr].data = data;
640 /* overflow protect - unblock any thread waiting at this index */
641 queue_do_unblock_sender(q->send, wr);
643 /* Wakeup a waiting thread if any */
644 wakeup_thread(&q->queue);
646 corelock_unlock(&q->cl);
647 restore_irq(oldlevel);
650 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
651 /* IRQ handlers are not allowed use of this function - we only aim to
652 protect the queue integrity by turning them off. */
653 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
655 int oldlevel;
656 unsigned int wr;
658 oldlevel = disable_irq_save();
659 corelock_lock(&q->cl);
661 wr = q->write++ & QUEUE_LENGTH_MASK;
663 q->events[wr].id = id;
664 q->events[wr].data = data;
666 if(LIKELY(q->send))
668 struct queue_sender_list *send = q->send;
669 struct thread_entry **spp = &send->senders[wr];
670 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
672 if(UNLIKELY(*spp))
674 /* overflow protect - unblock any thread waiting at this index */
675 queue_release_sender(spp, 0);
678 /* Wakeup a waiting thread if any */
679 wakeup_thread(&q->queue);
681 /* Save thread in slot, add to list and wait for reply */
682 *spp = current;
683 IF_COP( current->obj_cl = &q->cl; )
684 IF_PRIO( current->blocker = q->blocker_p; )
685 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
686 current->retval = (intptr_t)spp;
687 current->bqp = &send->list;
689 block_thread(current);
691 corelock_unlock(&q->cl);
692 switch_thread();
694 return current->retval;
697 /* Function as queue_post if sending is not enabled */
698 wakeup_thread(&q->queue);
700 corelock_unlock(&q->cl);
701 restore_irq(oldlevel);
703 return 0;
706 #if 0 /* not used now but probably will be later */
707 /* Query if the last message dequeued was added by queue_send or not */
708 bool queue_in_queue_send(struct event_queue *q)
710 bool in_send;
712 #if NUM_CORES > 1
713 int oldlevel = disable_irq_save();
714 corelock_lock(&q->cl);
715 #endif
717 in_send = q->send && q->send->curr_sender;
719 #if NUM_CORES > 1
720 corelock_unlock(&q->cl);
721 restore_irq(oldlevel);
722 #endif
724 return in_send;
726 #endif
728 /* Replies with retval to the last dequeued message sent with queue_send */
729 void queue_reply(struct event_queue *q, intptr_t retval)
731 if(q->send && q->send->curr_sender)
733 int oldlevel = disable_irq_save();
734 corelock_lock(&q->cl);
735 /* Double-check locking */
736 IF_COP( if(LIKELY(q->send && q->send->curr_sender)) )
738 queue_release_sender(&q->send->curr_sender, retval);
741 corelock_unlock(&q->cl);
742 restore_irq(oldlevel);
746 bool queue_peek(struct event_queue *q, struct queue_event *ev)
748 if(q->read == q->write)
749 return false;
751 bool have_msg = false;
753 int oldlevel = disable_irq_save();
754 corelock_lock(&q->cl);
756 if(q->read != q->write)
758 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
759 have_msg = true;
762 corelock_unlock(&q->cl);
763 restore_irq(oldlevel);
765 return have_msg;
767 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
769 /* Poll queue to see if a message exists - careful in using the result if
770 * queue_remove_from_head is called when messages are posted - possibly use
771 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
772 * unsignals the queue may cause an unwanted block */
773 bool queue_empty(const struct event_queue* q)
775 return ( q->read == q->write );
778 void queue_clear(struct event_queue* q)
780 int oldlevel;
782 oldlevel = disable_irq_save();
783 corelock_lock(&q->cl);
785 /* Release all threads waiting in the queue for a reply -
786 dequeued sent message will be handled by owning thread */
787 queue_release_all_senders(q);
789 q->read = 0;
790 q->write = 0;
792 corelock_unlock(&q->cl);
793 restore_irq(oldlevel);
796 void queue_remove_from_head(struct event_queue *q, long id)
798 int oldlevel;
800 oldlevel = disable_irq_save();
801 corelock_lock(&q->cl);
803 while(q->read != q->write)
805 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
807 if(q->events[rd].id != id)
809 break;
812 /* Release any thread waiting on this message */
813 queue_do_unblock_sender(q->send, rd);
815 q->read++;
818 corelock_unlock(&q->cl);
819 restore_irq(oldlevel);
823 * The number of events waiting in the queue.
825 * @param struct of event_queue
826 * @return number of events in the queue
828 int queue_count(const struct event_queue *q)
830 return q->write - q->read;
833 int queue_broadcast(long id, intptr_t data)
835 struct event_queue **p = all_queues.queues;
836 struct event_queue *q;
838 #if NUM_CORES > 1
839 int oldlevel = disable_irq_save();
840 corelock_lock(&all_queues.cl);
841 #endif
843 for(q = *p; q != NULL; q = *(++p))
845 queue_post(q, id, data);
848 #if NUM_CORES > 1
849 corelock_unlock(&all_queues.cl);
850 restore_irq(oldlevel);
851 #endif
853 return p - all_queues.queues;
856 /****************************************************************************
857 * Simple mutex functions ;)
858 ****************************************************************************/
860 /* Initialize a mutex object - call before any use and do not call again once
861 * the object is available to other threads */
862 void mutex_init(struct mutex *m)
864 corelock_init(&m->cl);
865 m->queue = NULL;
866 m->count = 0;
867 m->locked = 0;
868 MUTEX_SET_THREAD(m, NULL);
869 #ifdef HAVE_PRIORITY_SCHEDULING
870 m->blocker.priority = PRIORITY_IDLE;
871 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
872 m->no_preempt = false;
873 #endif
876 /* Gain ownership of a mutex object or block until it becomes free */
877 void mutex_lock(struct mutex *m)
879 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
881 if(current == MUTEX_GET_THREAD(m))
883 /* current thread already owns this mutex */
884 m->count++;
885 return;
888 /* lock out other cores */
889 corelock_lock(&m->cl);
891 if(LIKELY(m->locked == 0))
893 /* lock is open */
894 MUTEX_SET_THREAD(m, current);
895 m->locked = 1;
896 corelock_unlock(&m->cl);
897 return;
900 /* block until the lock is open... */
901 IF_COP( current->obj_cl = &m->cl; )
902 IF_PRIO( current->blocker = &m->blocker; )
903 current->bqp = &m->queue;
905 disable_irq();
906 block_thread(current);
908 corelock_unlock(&m->cl);
910 /* ...and turn control over to next thread */
911 switch_thread();
914 /* Release ownership of a mutex object - only owning thread must call this */
915 void mutex_unlock(struct mutex *m)
917 /* unlocker not being the owner is an unlocking violation */
918 KERNEL_ASSERT(MUTEX_GET_THREAD(m) == thread_id_entry(THREAD_ID_CURRENT),
919 "mutex_unlock->wrong thread (%s != %s)\n",
920 MUTEX_GET_THREAD(m)->name,
921 thread_id_entry(THREAD_ID_CURRENT)->name);
923 if(m->count > 0)
925 /* this thread still owns lock */
926 m->count--;
927 return;
930 /* lock out other cores */
931 corelock_lock(&m->cl);
933 /* transfer to next queued thread if any */
934 if(LIKELY(m->queue == NULL))
936 /* no threads waiting - open the lock */
937 MUTEX_SET_THREAD(m, NULL);
938 m->locked = 0;
939 corelock_unlock(&m->cl);
940 return;
942 else
944 const int oldlevel = disable_irq_save();
945 /* Tranfer of owning thread is handled in the wakeup protocol
946 * if priorities are enabled otherwise just set it from the
947 * queue head. */
948 IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); )
949 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
950 restore_irq(oldlevel);
952 corelock_unlock(&m->cl);
954 #ifdef HAVE_PRIORITY_SCHEDULING
955 if((result & THREAD_SWITCH) && !m->no_preempt)
956 switch_thread();
957 #endif
961 /****************************************************************************
962 * Simple semaphore functions ;)
963 ****************************************************************************/
964 #ifdef HAVE_SEMAPHORE_OBJECTS
965 void semaphore_init(struct semaphore *s, int max, int start)
967 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
968 "semaphore_init->inv arg\n");
969 s->queue = NULL;
970 s->max = max;
971 s->count = start;
972 corelock_init(&s->cl);
975 void semaphore_wait(struct semaphore *s)
977 struct thread_entry *current;
979 corelock_lock(&s->cl);
981 if(LIKELY(--s->count >= 0))
983 /* wait satisfied */
984 corelock_unlock(&s->cl);
985 return;
988 /* too many waits - block until dequeued... */
989 current = thread_id_entry(THREAD_ID_CURRENT);
991 IF_COP( current->obj_cl = &s->cl; )
992 current->bqp = &s->queue;
994 disable_irq();
995 block_thread(current);
997 corelock_unlock(&s->cl);
999 /* ...and turn control over to next thread */
1000 switch_thread();
1003 void semaphore_release(struct semaphore *s)
1005 IF_PRIO( unsigned int result = THREAD_NONE; )
1007 corelock_lock(&s->cl);
1009 if(s->count < s->max && ++s->count <= 0)
1011 /* there should be threads in this queue */
1012 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1013 /* a thread was queued - wake it up */
1014 int oldlevel = disable_irq_save();
1015 IF_PRIO( result = ) wakeup_thread(&s->queue);
1016 restore_irq(oldlevel);
1019 corelock_unlock(&s->cl);
1021 #ifdef HAVE_PRIORITY_SCHEDULING
1022 if(result & THREAD_SWITCH)
1023 switch_thread();
1024 #endif
1026 #endif /* HAVE_SEMAPHORE_OBJECTS */
1028 #ifdef HAVE_WAKEUP_OBJECTS
1029 /****************************************************************************
1030 * Lightweight IRQ-compatible wakeup object
1033 /* Initialize the wakeup object */
1034 void wakeup_init(struct wakeup *w)
1036 w->queue = NULL;
1037 w->signalled = 0;
1038 IF_COP( corelock_init(&w->cl); )
1041 /* Wait for a signal blocking indefinitely or for a specified period */
1042 int wakeup_wait(struct wakeup *w, int timeout)
1044 int ret = OBJ_WAIT_SUCCEEDED; /* Presume success */
1045 int oldlevel = disable_irq_save();
1047 corelock_lock(&w->cl);
1049 if(LIKELY(w->signalled == 0 && timeout != TIMEOUT_NOBLOCK))
1051 struct thread_entry * current = thread_id_entry(THREAD_ID_CURRENT);
1053 IF_COP( current->obj_cl = &w->cl; )
1054 current->bqp = &w->queue;
1056 if (timeout != TIMEOUT_BLOCK)
1057 block_thread_w_tmo(current, timeout);
1058 else
1059 block_thread(current);
1061 corelock_unlock(&w->cl);
1062 switch_thread();
1064 oldlevel = disable_irq_save();
1065 corelock_lock(&w->cl);
1068 if(UNLIKELY(w->signalled == 0))
1070 /* Timed-out or failed */
1071 ret = (timeout != TIMEOUT_BLOCK) ?
1072 OBJ_WAIT_TIMEDOUT : OBJ_WAIT_FAILED;
1075 w->signalled = 0; /* Reset */
1077 corelock_unlock(&w->cl);
1078 restore_irq(oldlevel);
1080 return ret;
1083 /* Signal the thread waiting or leave the signal if the thread hasn't
1084 * waited yet.
1086 * returns THREAD_NONE or THREAD_OK
1088 int wakeup_signal(struct wakeup *w)
1090 int oldlevel = disable_irq_save();
1091 int ret;
1093 corelock_lock(&w->cl);
1095 w->signalled = 1;
1096 ret = wakeup_thread(&w->queue);
1098 corelock_unlock(&w->cl);
1099 restore_irq(oldlevel);
1101 return ret;
1103 #endif /* HAVE_WAKEUP_OBJECTS */