Certain data accesses in the kernel should have volatile semantics to be correct...
[kugel-rb.git] / firmware / kernel.c
blob41d1d00689a75c5e34b32bb4936dd5450b5d674f
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include <stdlib.h>
22 #include <string.h>
23 #include "config.h"
24 #include "kernel.h"
25 #include "thread.h"
26 #include "cpu.h"
27 #include "system.h"
28 #include "panic.h"
29 #include "debug.h"
30 #include "general.h"
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
35 #else
36 #define KERNEL_OBJECT_CHECKS 0
37 #endif
39 #if KERNEL_OBJECT_CHECKS
40 #ifdef SIMULATOR
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
43 #else
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
46 #endif
47 #else
48 #define KERNEL_ASSERT(exp, msg...) ({})
49 #endif
51 #if !defined(CPU_PP) || !defined(BOOTLOADER)
52 volatile long current_tick SHAREDDATA_ATTR = 0;
53 #endif
55 /* List of tick tasks - final element always NULL for termination */
56 void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
58 /* This array holds all queues that are initiated. It is used for broadcast. */
59 static struct
61 struct event_queue *queues[MAX_NUM_QUEUES+1];
62 IF_COP( struct corelock cl; )
63 } all_queues SHAREDBSS_ATTR;
65 /****************************************************************************
66 * Standard kernel stuff
67 ****************************************************************************/
68 void kernel_init(void)
70 /* Init the threading API */
71 init_threads();
73 /* Other processors will not reach this point in a multicore build.
74 * In a single-core build with multiple cores they fall-through and
75 * sleep in cop_main without returning. */
76 if (CURRENT_CORE == CPU)
78 memset(tick_funcs, 0, sizeof(tick_funcs));
79 memset(&all_queues, 0, sizeof(all_queues));
80 corelock_init(&all_queues.cl);
81 tick_start(1000/HZ);
82 #ifdef KDEV_INIT
83 kernel_device_init();
84 #endif
88 /****************************************************************************
89 * Timer tick - Timer initialization and interrupt handler is defined at
90 * the target level.
91 ****************************************************************************/
92 int tick_add_task(void (*f)(void))
94 int oldlevel = disable_irq_save();
95 void **arr = (void **)tick_funcs;
96 void **p = find_array_ptr(arr, f);
98 /* Add a task if there is room */
99 if(p - arr < MAX_NUM_TICK_TASKS)
101 *p = f; /* If already in list, no problem. */
103 else
105 panicf("Error! tick_add_task(): out of tasks");
108 restore_irq(oldlevel);
109 return 0;
112 int tick_remove_task(void (*f)(void))
114 int oldlevel = disable_irq_save();
115 int rc = remove_array_ptr((void **)tick_funcs, f);
116 restore_irq(oldlevel);
117 return rc;
120 /****************************************************************************
121 * Tick-based interval timers/one-shots - be mindful this is not really
122 * intended for continuous timers but for events that need to run for a short
123 * time and be cancelled without further software intervention.
124 ****************************************************************************/
125 #ifdef INCLUDE_TIMEOUT_API
126 /* list of active timeout events */
127 static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
129 /* timeout tick task - calls event handlers when they expire
130 * Event handlers may alter expiration, callback and data during operation.
132 static void timeout_tick(void)
134 unsigned long tick = current_tick;
135 struct timeout **p = tmo_list;
136 struct timeout *curr;
138 for(curr = *p; curr != NULL; curr = *(++p))
140 int ticks;
142 if(TIME_BEFORE(tick, curr->expires))
143 continue;
145 /* this event has expired - call callback */
146 ticks = curr->callback(curr);
147 if(ticks > 0)
149 curr->expires = tick + ticks; /* reload */
151 else
153 timeout_cancel(curr); /* cancel */
158 /* Cancels a timeout callback - can be called from the ISR */
159 void timeout_cancel(struct timeout *tmo)
161 int oldlevel = disable_irq_save();
162 int rc = remove_array_ptr((void **)tmo_list, tmo);
164 if(rc >= 0 && *tmo_list == NULL)
166 tick_remove_task(timeout_tick); /* Last one - remove task */
169 restore_irq(oldlevel);
172 /* Adds a timeout callback - calling with an active timeout resets the
173 interval - can be called from the ISR */
174 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
175 int ticks, intptr_t data)
177 int oldlevel;
178 void **arr, **p;
180 if(tmo == NULL)
181 return;
183 oldlevel = disable_irq_save();
185 /* See if this one is already registered */
186 arr = (void **)tmo_list;
187 p = find_array_ptr(arr, tmo);
189 if(p - arr < MAX_NUM_TIMEOUTS)
191 /* Vacancy */
192 if(*p == NULL)
194 /* Not present */
195 if(*tmo_list == NULL)
197 tick_add_task(timeout_tick); /* First one - add task */
200 *p = tmo;
203 tmo->callback = callback;
204 tmo->data = data;
205 tmo->expires = current_tick + ticks;
208 restore_irq(oldlevel);
211 #endif /* INCLUDE_TIMEOUT_API */
213 /****************************************************************************
214 * Thread stuff
215 ****************************************************************************/
216 unsigned sleep(unsigned ticks)
218 #if defined(CPU_PP) && defined(BOOTLOADER)
219 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
220 while (TIME_BEFORE(USEC_TIMER, stop))
221 switch_thread();
222 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
223 /* hacky.. */
224 long sleep_ticks = current_tick + ticks + 1;
225 while (TIME_BEFORE(current_tick, sleep_ticks))
226 switch_thread();
227 #else
228 disable_irq();
229 sleep_thread(ticks);
230 switch_thread();
231 #endif
232 return 0;
235 void yield(void)
237 #if ((defined(TATUNG_TPJ1022)) && defined(BOOTLOADER))
238 /* Some targets don't like yielding in the bootloader */
239 #else
240 switch_thread();
241 #endif
244 /****************************************************************************
245 * Queue handling stuff
246 ****************************************************************************/
248 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
249 /****************************************************************************
250 * Sender thread queue structure that aids implementation of priority
251 * inheritance on queues because the send list structure is the same as
252 * for all other kernel objects:
254 * Example state:
255 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
256 * E3 was posted with queue_post
257 * 4 events remain enqueued (E1-E4)
259 * rd wr
260 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
261 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
262 * \/ \/ \/
263 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
264 * q->send->curr_sender: /\
266 * Thread has E0 in its own struct queue_event.
268 ****************************************************************************/
270 /* Puts the specified return value in the waiting thread's return value
271 * and wakes the thread.
273 * A sender should be confirmed to exist before calling which makes it
274 * more efficent to reject the majority of cases that don't need this
275 * called.
277 static void queue_release_sender(struct thread_entry * volatile * sender,
278 intptr_t retval)
280 struct thread_entry *thread = *sender;
282 *sender = NULL; /* Clear slot. */
283 #ifdef HAVE_WAKEUP_EXT_CB
284 thread->wakeup_ext_cb = NULL; /* Clear callback. */
285 #endif
286 thread->retval = retval; /* Assign thread-local return value. */
287 *thread->bqp = thread; /* Move blocking queue head to thread since
288 wakeup_thread wakes the first thread in
289 the list. */
290 wakeup_thread(thread->bqp);
293 /* Releases any waiting threads that are queued with queue_send -
294 * reply with 0.
296 static void queue_release_all_senders(struct event_queue *q)
298 if(q->send)
300 unsigned int i;
301 for(i = q->read; i != q->write; i++)
303 struct thread_entry **spp =
304 &q->send->senders[i & QUEUE_LENGTH_MASK];
306 if(*spp)
308 queue_release_sender(spp, 0);
314 /* Callback to do extra forced removal steps from sender list in addition
315 * to the normal blocking queue removal and priority dis-inherit */
316 static void queue_remove_sender_thread_cb(struct thread_entry *thread)
318 *((struct thread_entry **)thread->retval) = NULL;
319 #ifdef HAVE_WAKEUP_EXT_CB
320 thread->wakeup_ext_cb = NULL;
321 #endif
322 thread->retval = 0;
325 /* Enables queue_send on the specified queue - caller allocates the extra
326 * data structure. Only queues which are taken to be owned by a thread should
327 * enable this however an official owner is not compulsory but must be
328 * specified for priority inheritance to operate.
330 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
331 * messages results in an undefined order of message replies or possible default
332 * replies if two or more waits happen before a reply is done.
334 void queue_enable_queue_send(struct event_queue *q,
335 struct queue_sender_list *send,
336 unsigned int owner_id)
338 int oldlevel = disable_irq_save();
339 corelock_lock(&q->cl);
341 if(send != NULL && q->send == NULL)
343 memset(send, 0, sizeof(*send));
344 #ifdef HAVE_PRIORITY_SCHEDULING
345 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
346 send->blocker.priority = PRIORITY_IDLE;
347 if(owner_id != 0)
349 send->blocker.thread = thread_id_entry(owner_id);
350 q->blocker_p = &send->blocker;
352 #endif
353 q->send = send;
356 corelock_unlock(&q->cl);
357 restore_irq(oldlevel);
359 (void)owner_id;
362 /* Unblock a blocked thread at a given event index */
363 static inline void queue_do_unblock_sender(struct queue_sender_list *send,
364 unsigned int i)
366 if(send)
368 struct thread_entry **spp = &send->senders[i];
370 if(UNLIKELY(*spp))
372 queue_release_sender(spp, 0);
377 /* Perform the auto-reply sequence */
378 static inline void queue_do_auto_reply(struct queue_sender_list *send)
380 if(send && send->curr_sender)
382 /* auto-reply */
383 queue_release_sender(&send->curr_sender, 0);
387 /* Moves waiting thread's refrence from the senders array to the
388 * current_sender which represents the thread waiting for a reponse to the
389 * last message removed from the queue. This also protects the thread from
390 * being bumped due to overflow which would not be a valid action since its
391 * message _is_ being processed at this point. */
392 static inline void queue_do_fetch_sender(struct queue_sender_list *send,
393 unsigned int rd)
395 if(send)
397 struct thread_entry **spp = &send->senders[rd];
399 if(*spp)
401 /* Move thread reference from array to the next thread
402 that queue_reply will release */
403 send->curr_sender = *spp;
404 (*spp)->retval = (intptr_t)spp;
405 *spp = NULL;
407 /* else message was posted asynchronously with queue_post */
410 #else
411 /* Empty macros for when synchoronous sending is not made */
412 #define queue_release_all_senders(q)
413 #define queue_do_unblock_sender(send, i)
414 #define queue_do_auto_reply(send)
415 #define queue_do_fetch_sender(send, rd)
416 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
418 /* Queue must not be available for use during this call */
419 void queue_init(struct event_queue *q, bool register_queue)
421 int oldlevel = disable_irq_save();
423 if(register_queue)
425 corelock_lock(&all_queues.cl);
428 corelock_init(&q->cl);
429 q->queue = NULL;
430 /* What garbage is in write is irrelevant because of the masking design-
431 * any other functions the empty the queue do this as well so that
432 * queue_count and queue_empty return sane values in the case of a
433 * concurrent change without locking inside them. */
434 q->read = q->write;
435 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
436 q->send = NULL; /* No message sending by default */
437 IF_PRIO( q->blocker_p = NULL; )
438 #endif
440 if(register_queue)
442 void **queues = (void **)all_queues.queues;
443 void **p = find_array_ptr(queues, q);
445 if(p - queues >= MAX_NUM_QUEUES)
447 panicf("queue_init->out of queues");
450 if(*p == NULL)
452 /* Add it to the all_queues array */
453 *p = q;
454 corelock_unlock(&all_queues.cl);
458 restore_irq(oldlevel);
461 /* Queue must not be available for use during this call */
462 void queue_delete(struct event_queue *q)
464 int oldlevel = disable_irq_save();
465 corelock_lock(&all_queues.cl);
466 corelock_lock(&q->cl);
468 /* Remove the queue if registered */
469 remove_array_ptr((void **)all_queues.queues, q);
471 corelock_unlock(&all_queues.cl);
473 /* Release thread(s) waiting on queue head */
474 thread_queue_wake(&q->queue);
476 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
477 if(q->send)
479 /* Release threads waiting for replies */
480 queue_release_all_senders(q);
482 /* Reply to any dequeued message waiting for one */
483 queue_do_auto_reply(q->send);
485 q->send = NULL;
486 IF_PRIO( q->blocker_p = NULL; )
488 #endif
490 q->read = q->write;
492 corelock_unlock(&q->cl);
493 restore_irq(oldlevel);
496 /* NOTE: multiple threads waiting on a queue head cannot have a well-
497 defined release order if timeouts are used. If multiple threads must
498 access the queue head, use a dispatcher or queue_wait only. */
499 void queue_wait(struct event_queue *q, struct queue_event *ev)
501 int oldlevel;
502 unsigned int rd;
504 #ifdef HAVE_PRIORITY_SCHEDULING
505 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
506 QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
507 "queue_wait->wrong thread\n");
508 #endif
510 oldlevel = disable_irq_save();
511 corelock_lock(&q->cl);
513 /* auto-reply */
514 queue_do_auto_reply(q->send);
516 while(1)
518 struct thread_entry *current;
520 rd = q->read;
521 if (rd != q->write) /* A waking message could disappear */
522 break;
524 current = thread_id_entry(THREAD_ID_CURRENT);
526 IF_COP( current->obj_cl = &q->cl; )
527 current->bqp = &q->queue;
529 block_thread(current);
531 corelock_unlock(&q->cl);
532 switch_thread();
534 oldlevel = disable_irq_save();
535 corelock_lock(&q->cl);
538 q->read = rd + 1;
539 rd &= QUEUE_LENGTH_MASK;
540 *ev = q->events[rd];
542 /* Get data for a waiting thread if one */
543 queue_do_fetch_sender(q->send, rd);
545 corelock_unlock(&q->cl);
546 restore_irq(oldlevel);
549 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
551 int oldlevel;
552 unsigned int rd, wr;
554 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
555 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
556 QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
557 "queue_wait_w_tmo->wrong thread\n");
558 #endif
560 oldlevel = disable_irq_save();
561 corelock_lock(&q->cl);
563 /* Auto-reply */
564 queue_do_auto_reply(q->send);
566 rd = q->read;
567 wr = q->write;
568 if (rd == wr && ticks > 0)
570 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
572 IF_COP( current->obj_cl = &q->cl; )
573 current->bqp = &q->queue;
575 block_thread_w_tmo(current, ticks);
576 corelock_unlock(&q->cl);
578 switch_thread();
580 oldlevel = disable_irq_save();
581 corelock_lock(&q->cl);
583 rd = q->read;
584 wr = q->write;
587 /* no worry about a removed message here - status is checked inside
588 locks - perhaps verify if timeout or false alarm */
589 if (rd != wr)
591 q->read = rd + 1;
592 rd &= QUEUE_LENGTH_MASK;
593 *ev = q->events[rd];
594 /* Get data for a waiting thread if one */
595 queue_do_fetch_sender(q->send, rd);
597 else
599 ev->id = SYS_TIMEOUT;
602 corelock_unlock(&q->cl);
603 restore_irq(oldlevel);
606 void queue_post(struct event_queue *q, long id, intptr_t data)
608 int oldlevel;
609 unsigned int wr;
611 oldlevel = disable_irq_save();
612 corelock_lock(&q->cl);
614 wr = q->write++ & QUEUE_LENGTH_MASK;
616 q->events[wr].id = id;
617 q->events[wr].data = data;
619 /* overflow protect - unblock any thread waiting at this index */
620 queue_do_unblock_sender(q->send, wr);
622 /* Wakeup a waiting thread if any */
623 wakeup_thread(&q->queue);
625 corelock_unlock(&q->cl);
626 restore_irq(oldlevel);
629 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
630 /* IRQ handlers are not allowed use of this function - we only aim to
631 protect the queue integrity by turning them off. */
632 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
634 int oldlevel;
635 unsigned int wr;
637 oldlevel = disable_irq_save();
638 corelock_lock(&q->cl);
640 wr = q->write++ & QUEUE_LENGTH_MASK;
642 q->events[wr].id = id;
643 q->events[wr].data = data;
645 if(LIKELY(q->send))
647 struct queue_sender_list *send = q->send;
648 struct thread_entry **spp = &send->senders[wr];
649 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
651 if(UNLIKELY(*spp))
653 /* overflow protect - unblock any thread waiting at this index */
654 queue_release_sender(spp, 0);
657 /* Wakeup a waiting thread if any */
658 wakeup_thread(&q->queue);
660 /* Save thread in slot, add to list and wait for reply */
661 *spp = current;
662 IF_COP( current->obj_cl = &q->cl; )
663 IF_PRIO( current->blocker = q->blocker_p; )
664 #ifdef HAVE_WAKEUP_EXT_CB
665 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
666 #endif
667 current->retval = (intptr_t)spp;
668 current->bqp = &send->list;
670 block_thread(current);
672 corelock_unlock(&q->cl);
673 switch_thread();
675 return current->retval;
678 /* Function as queue_post if sending is not enabled */
679 wakeup_thread(&q->queue);
681 corelock_unlock(&q->cl);
682 restore_irq(oldlevel);
684 return 0;
687 #if 0 /* not used now but probably will be later */
688 /* Query if the last message dequeued was added by queue_send or not */
689 bool queue_in_queue_send(struct event_queue *q)
691 bool in_send;
693 #if NUM_CORES > 1
694 int oldlevel = disable_irq_save();
695 corelock_lock(&q->cl);
696 #endif
698 in_send = q->send && q->send->curr_sender;
700 #if NUM_CORES > 1
701 corelock_unlock(&q->cl);
702 restore_irq(oldlevel);
703 #endif
705 return in_send;
707 #endif
709 /* Replies with retval to the last dequeued message sent with queue_send */
710 void queue_reply(struct event_queue *q, intptr_t retval)
712 if(q->send && q->send->curr_sender)
714 struct queue_sender_list *sender;
716 int oldlevel = disable_irq_save();
717 corelock_lock(&q->cl);
719 sender = q->send;
721 /* Double-check locking */
722 if(LIKELY(sender && sender->curr_sender))
723 queue_release_sender(&sender->curr_sender, retval);
725 corelock_unlock(&q->cl);
726 restore_irq(oldlevel);
729 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
731 bool queue_peek(struct event_queue *q, struct queue_event *ev)
733 unsigned int rd;
735 if(q->read == q->write)
736 return false;
738 bool have_msg = false;
740 int oldlevel = disable_irq_save();
741 corelock_lock(&q->cl);
743 rd = q->read;
744 if(rd != q->write)
746 *ev = q->events[rd & QUEUE_LENGTH_MASK];
747 have_msg = true;
750 corelock_unlock(&q->cl);
751 restore_irq(oldlevel);
753 return have_msg;
756 /* Poll queue to see if a message exists - careful in using the result if
757 * queue_remove_from_head is called when messages are posted - possibly use
758 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
759 * unsignals the queue may cause an unwanted block */
760 bool queue_empty(const struct event_queue* q)
762 return ( q->read == q->write );
765 void queue_clear(struct event_queue* q)
767 int oldlevel;
769 oldlevel = disable_irq_save();
770 corelock_lock(&q->cl);
772 /* Release all threads waiting in the queue for a reply -
773 dequeued sent message will be handled by owning thread */
774 queue_release_all_senders(q);
776 q->read = q->write;
778 corelock_unlock(&q->cl);
779 restore_irq(oldlevel);
782 void queue_remove_from_head(struct event_queue *q, long id)
784 int oldlevel;
786 oldlevel = disable_irq_save();
787 corelock_lock(&q->cl);
789 while(q->read != q->write)
791 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
793 if(q->events[rd].id != id)
795 break;
798 /* Release any thread waiting on this message */
799 queue_do_unblock_sender(q->send, rd);
801 q->read++;
804 corelock_unlock(&q->cl);
805 restore_irq(oldlevel);
809 * The number of events waiting in the queue.
811 * @param struct of event_queue
812 * @return number of events in the queue
814 int queue_count(const struct event_queue *q)
816 return q->write - q->read;
819 int queue_broadcast(long id, intptr_t data)
821 struct event_queue **p = all_queues.queues;
822 struct event_queue *q;
824 #if NUM_CORES > 1
825 int oldlevel = disable_irq_save();
826 corelock_lock(&all_queues.cl);
827 #endif
829 for(q = *p; q != NULL; q = *(++p))
831 queue_post(q, id, data);
834 #if NUM_CORES > 1
835 corelock_unlock(&all_queues.cl);
836 restore_irq(oldlevel);
837 #endif
839 return p - all_queues.queues;
842 /****************************************************************************
843 * Simple mutex functions ;)
844 ****************************************************************************/
846 static inline void __attribute__((always_inline))
847 mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
849 #ifdef HAVE_PRIORITY_SCHEDULING
850 mtx->blocker.thread = td;
851 #else
852 mtx->thread = td;
853 #endif
856 static inline struct thread_entry * __attribute__((always_inline))
857 mutex_get_thread(volatile struct mutex *mtx)
859 #ifdef HAVE_PRIORITY_SCHEDULING
860 return mtx->blocker.thread;
861 #else
862 return mtx->thread;
863 #endif
866 /* Initialize a mutex object - call before any use and do not call again once
867 * the object is available to other threads */
868 void mutex_init(struct mutex *m)
870 corelock_init(&m->cl);
871 m->queue = NULL;
872 m->recursion = 0;
873 mutex_set_thread(m, NULL);
874 #ifdef HAVE_PRIORITY_SCHEDULING
875 m->blocker.priority = PRIORITY_IDLE;
876 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
877 m->no_preempt = false;
878 #endif
881 /* Gain ownership of a mutex object or block until it becomes free */
882 void mutex_lock(struct mutex *m)
884 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
886 if(current == mutex_get_thread(m))
888 /* current thread already owns this mutex */
889 m->recursion++;
890 return;
893 /* lock out other cores */
894 corelock_lock(&m->cl);
896 /* must read thread again inside cs (a multiprocessor concern really) */
897 if(LIKELY(mutex_get_thread(m) == NULL))
899 /* lock is open */
900 mutex_set_thread(m, current);
901 corelock_unlock(&m->cl);
902 return;
905 /* block until the lock is open... */
906 IF_COP( current->obj_cl = &m->cl; )
907 IF_PRIO( current->blocker = &m->blocker; )
908 current->bqp = &m->queue;
910 disable_irq();
911 block_thread(current);
913 corelock_unlock(&m->cl);
915 /* ...and turn control over to next thread */
916 switch_thread();
919 /* Release ownership of a mutex object - only owning thread must call this */
920 void mutex_unlock(struct mutex *m)
922 /* unlocker not being the owner is an unlocking violation */
923 KERNEL_ASSERT(mutex_get_thread(m) == thread_id_entry(THREAD_ID_CURRENT),
924 "mutex_unlock->wrong thread (%s != %s)\n",
925 mutex_get_thread(m)->name,
926 thread_id_entry(THREAD_ID_CURRENT)->name);
928 if(m->recursion > 0)
930 /* this thread still owns lock */
931 m->recursion--;
932 return;
935 /* lock out other cores */
936 corelock_lock(&m->cl);
938 /* transfer to next queued thread if any */
939 if(LIKELY(m->queue == NULL))
941 /* no threads waiting - open the lock */
942 mutex_set_thread(m, NULL);
943 corelock_unlock(&m->cl);
944 return;
946 else
948 const int oldlevel = disable_irq_save();
949 /* Tranfer of owning thread is handled in the wakeup protocol
950 * if priorities are enabled otherwise just set it from the
951 * queue head. */
952 IFN_PRIO( mutex_set_thread(m, m->queue); )
953 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
954 restore_irq(oldlevel);
956 corelock_unlock(&m->cl);
958 #ifdef HAVE_PRIORITY_SCHEDULING
959 if((result & THREAD_SWITCH) && !m->no_preempt)
960 switch_thread();
961 #endif
965 /****************************************************************************
966 * Simple semaphore functions ;)
967 ****************************************************************************/
968 #ifdef HAVE_SEMAPHORE_OBJECTS
969 void semaphore_init(struct semaphore *s, int max, int start)
971 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
972 "semaphore_init->inv arg\n");
973 s->queue = NULL;
974 s->max = max;
975 s->count = start;
976 corelock_init(&s->cl);
979 void semaphore_wait(struct semaphore *s)
981 struct thread_entry *current;
983 corelock_lock(&s->cl);
985 if(LIKELY(--s->count >= 0))
987 /* wait satisfied */
988 corelock_unlock(&s->cl);
989 return;
992 /* too many waits - block until dequeued... */
993 current = thread_id_entry(THREAD_ID_CURRENT);
995 IF_COP( current->obj_cl = &s->cl; )
996 current->bqp = &s->queue;
998 disable_irq();
999 block_thread(current);
1001 corelock_unlock(&s->cl);
1003 /* ...and turn control over to next thread */
1004 switch_thread();
1007 void semaphore_release(struct semaphore *s)
1009 IF_PRIO( unsigned int result = THREAD_NONE; )
1011 corelock_lock(&s->cl);
1013 if(s->count < s->max && ++s->count <= 0)
1015 /* there should be threads in this queue */
1016 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1017 /* a thread was queued - wake it up */
1018 int oldlevel = disable_irq_save();
1019 IF_PRIO( result = ) wakeup_thread(&s->queue);
1020 restore_irq(oldlevel);
1023 corelock_unlock(&s->cl);
1025 #ifdef HAVE_PRIORITY_SCHEDULING
1026 if(result & THREAD_SWITCH)
1027 switch_thread();
1028 #endif
1030 #endif /* HAVE_SEMAPHORE_OBJECTS */
1032 #ifdef HAVE_WAKEUP_OBJECTS
1033 /****************************************************************************
1034 * Lightweight IRQ-compatible wakeup object
1037 /* Initialize the wakeup object */
1038 void wakeup_init(struct wakeup *w)
1040 w->queue = NULL;
1041 w->signalled = false;
1042 IF_COP( corelock_init(&w->cl); )
1045 /* Wait for a signal blocking indefinitely or for a specified period */
1046 int wakeup_wait(struct wakeup *w, int timeout)
1048 int ret = OBJ_WAIT_SUCCEEDED; /* Presume success */
1049 int oldlevel = disable_irq_save();
1051 corelock_lock(&w->cl);
1053 if(LIKELY(!w->signalled && timeout != TIMEOUT_NOBLOCK))
1055 struct thread_entry * current = thread_id_entry(THREAD_ID_CURRENT);
1057 IF_COP( current->obj_cl = &w->cl; )
1058 current->bqp = &w->queue;
1060 if (timeout != TIMEOUT_BLOCK)
1061 block_thread_w_tmo(current, timeout);
1062 else
1063 block_thread(current);
1065 corelock_unlock(&w->cl);
1066 switch_thread();
1068 oldlevel = disable_irq_save();
1069 corelock_lock(&w->cl);
1072 if(UNLIKELY(!w->signalled))
1074 /* Timed-out or failed */
1075 ret = (timeout != TIMEOUT_BLOCK) ?
1076 OBJ_WAIT_TIMEDOUT : OBJ_WAIT_FAILED;
1079 w->signalled = false; /* Reset */
1081 corelock_unlock(&w->cl);
1082 restore_irq(oldlevel);
1084 return ret;
1087 /* Signal the thread waiting or leave the signal if the thread hasn't
1088 * waited yet.
1090 * returns THREAD_NONE or THREAD_OK
1092 int wakeup_signal(struct wakeup *w)
1094 int oldlevel = disable_irq_save();
1095 int ret;
1097 corelock_lock(&w->cl);
1099 w->signalled = true;
1100 ret = wakeup_thread(&w->queue);
1102 corelock_unlock(&w->cl);
1103 restore_irq(oldlevel);
1105 return ret;
1107 #endif /* HAVE_WAKEUP_OBJECTS */