1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
36 #define KERNEL_OBJECT_CHECKS 0
39 #if KERNEL_OBJECT_CHECKS
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
48 #define KERNEL_ASSERT(exp, msg...) ({})
51 #if !defined(CPU_PP) || !defined(BOOTLOADER) || \
52 defined(HAVE_BOOTLOADER_USB_MODE)
53 volatile long current_tick SHAREDDATA_ATTR
= 0;
56 /* Unless otherwise defined, do nothing */
57 #ifndef YIELD_KERNEL_HOOK
58 #define YIELD_KERNEL_HOOK() false
60 #ifndef SLEEP_KERNEL_HOOK
61 #define SLEEP_KERNEL_HOOK(ticks) false
64 /* List of tick tasks - final element always NULL for termination */
65 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
67 /* This array holds all queues that are initiated. It is used for broadcast. */
70 struct event_queue
*queues
[MAX_NUM_QUEUES
+1];
71 #ifdef HAVE_CORELOCK_OBJECT
74 } all_queues SHAREDBSS_ATTR
;
76 /****************************************************************************
77 * Standard kernel stuff
78 ****************************************************************************/
79 void kernel_init(void)
81 /* Init the threading API */
84 /* Other processors will not reach this point in a multicore build.
85 * In a single-core build with multiple cores they fall-through and
86 * sleep in cop_main without returning. */
87 if (CURRENT_CORE
== CPU
)
89 memset(tick_funcs
, 0, sizeof(tick_funcs
));
90 memset(&all_queues
, 0, sizeof(all_queues
));
91 corelock_init(&all_queues
.cl
);
99 /****************************************************************************
100 * Timer tick - Timer initialization and interrupt handler is defined at
102 ****************************************************************************/
103 int tick_add_task(void (*f
)(void))
105 int oldlevel
= disable_irq_save();
106 void **arr
= (void **)tick_funcs
;
107 void **p
= find_array_ptr(arr
, f
);
109 /* Add a task if there is room */
110 if(p
- arr
< MAX_NUM_TICK_TASKS
)
112 *p
= f
; /* If already in list, no problem. */
116 panicf("Error! tick_add_task(): out of tasks");
119 restore_irq(oldlevel
);
123 int tick_remove_task(void (*f
)(void))
125 int oldlevel
= disable_irq_save();
126 int rc
= remove_array_ptr((void **)tick_funcs
, f
);
127 restore_irq(oldlevel
);
131 /****************************************************************************
132 * Tick-based interval timers/one-shots - be mindful this is not really
133 * intended for continuous timers but for events that need to run for a short
134 * time and be cancelled without further software intervention.
135 ****************************************************************************/
136 #ifdef INCLUDE_TIMEOUT_API
137 /* list of active timeout events */
138 static struct timeout
*tmo_list
[MAX_NUM_TIMEOUTS
+1];
140 /* timeout tick task - calls event handlers when they expire
141 * Event handlers may alter expiration, callback and data during operation.
143 static void timeout_tick(void)
145 unsigned long tick
= current_tick
;
146 struct timeout
**p
= tmo_list
;
147 struct timeout
*curr
;
149 for(curr
= *p
; curr
!= NULL
; curr
= *(++p
))
153 if(TIME_BEFORE(tick
, curr
->expires
))
156 /* this event has expired - call callback */
157 ticks
= curr
->callback(curr
);
160 curr
->expires
= tick
+ ticks
; /* reload */
164 timeout_cancel(curr
); /* cancel */
169 /* Cancels a timeout callback - can be called from the ISR */
170 void timeout_cancel(struct timeout
*tmo
)
172 int oldlevel
= disable_irq_save();
173 int rc
= remove_array_ptr((void **)tmo_list
, tmo
);
175 if(rc
>= 0 && *tmo_list
== NULL
)
177 tick_remove_task(timeout_tick
); /* Last one - remove task */
180 restore_irq(oldlevel
);
183 /* Adds a timeout callback - calling with an active timeout resets the
184 interval - can be called from the ISR */
185 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
186 int ticks
, intptr_t data
)
194 oldlevel
= disable_irq_save();
196 /* See if this one is already registered */
197 arr
= (void **)tmo_list
;
198 p
= find_array_ptr(arr
, tmo
);
200 if(p
- arr
< MAX_NUM_TIMEOUTS
)
206 if(*tmo_list
== NULL
)
208 tick_add_task(timeout_tick
); /* First one - add task */
214 tmo
->callback
= callback
;
216 tmo
->expires
= current_tick
+ ticks
;
219 restore_irq(oldlevel
);
222 #endif /* INCLUDE_TIMEOUT_API */
224 /****************************************************************************
226 ****************************************************************************/
227 unsigned sleep(unsigned ticks
)
229 /* In certain situations, certain bootloaders in particular, a normal
230 * threading call is inappropriate. */
231 if (SLEEP_KERNEL_HOOK(ticks
))
232 return 0; /* Handled */
242 /* In certain situations, certain bootloaders in particular, a normal
243 * threading call is inappropriate. */
244 if (YIELD_KERNEL_HOOK())
245 return; /* handled */
250 /****************************************************************************
251 * Queue handling stuff
252 ****************************************************************************/
254 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
255 /****************************************************************************
256 * Sender thread queue structure that aids implementation of priority
257 * inheritance on queues because the send list structure is the same as
258 * for all other kernel objects:
261 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
262 * E3 was posted with queue_post
263 * 4 events remain enqueued (E1-E4)
266 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
267 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
269 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
270 * q->send->curr_sender: /\
272 * Thread has E0 in its own struct queue_event.
274 ****************************************************************************/
276 /* Puts the specified return value in the waiting thread's return value
277 * and wakes the thread.
279 * A sender should be confirmed to exist before calling which makes it
280 * more efficent to reject the majority of cases that don't need this
283 static void queue_release_sender(struct thread_entry
* volatile * sender
,
286 struct thread_entry
*thread
= *sender
;
288 *sender
= NULL
; /* Clear slot. */
289 #ifdef HAVE_WAKEUP_EXT_CB
290 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
292 thread
->retval
= retval
; /* Assign thread-local return value. */
293 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
294 wakeup_thread wakes the first thread in
296 wakeup_thread(thread
->bqp
);
299 /* Releases any waiting threads that are queued with queue_send -
302 static void queue_release_all_senders(struct event_queue
*q
)
307 for(i
= q
->read
; i
!= q
->write
; i
++)
309 struct thread_entry
**spp
=
310 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
314 queue_release_sender(spp
, 0);
320 /* Callback to do extra forced removal steps from sender list in addition
321 * to the normal blocking queue removal and priority dis-inherit */
322 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
324 *((struct thread_entry
**)thread
->retval
) = NULL
;
325 #ifdef HAVE_WAKEUP_EXT_CB
326 thread
->wakeup_ext_cb
= NULL
;
331 /* Enables queue_send on the specified queue - caller allocates the extra
332 * data structure. Only queues which are taken to be owned by a thread should
333 * enable this however an official owner is not compulsory but must be
334 * specified for priority inheritance to operate.
336 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
337 * messages results in an undefined order of message replies or possible default
338 * replies if two or more waits happen before a reply is done.
340 void queue_enable_queue_send(struct event_queue
*q
,
341 struct queue_sender_list
*send
,
342 unsigned int owner_id
)
344 int oldlevel
= disable_irq_save();
345 corelock_lock(&q
->cl
);
347 if(send
!= NULL
&& q
->send
== NULL
)
349 memset(send
, 0, sizeof(*send
));
350 #ifdef HAVE_PRIORITY_SCHEDULING
351 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
352 send
->blocker
.priority
= PRIORITY_IDLE
;
355 send
->blocker
.thread
= thread_id_entry(owner_id
);
356 q
->blocker_p
= &send
->blocker
;
362 corelock_unlock(&q
->cl
);
363 restore_irq(oldlevel
);
368 /* Unblock a blocked thread at a given event index */
369 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
374 struct thread_entry
**spp
= &send
->senders
[i
];
378 queue_release_sender(spp
, 0);
383 /* Perform the auto-reply sequence */
384 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
386 if(send
&& send
->curr_sender
)
389 queue_release_sender(&send
->curr_sender
, 0);
393 /* Moves waiting thread's refrence from the senders array to the
394 * current_sender which represents the thread waiting for a reponse to the
395 * last message removed from the queue. This also protects the thread from
396 * being bumped due to overflow which would not be a valid action since its
397 * message _is_ being processed at this point. */
398 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
403 struct thread_entry
**spp
= &send
->senders
[rd
];
407 /* Move thread reference from array to the next thread
408 that queue_reply will release */
409 send
->curr_sender
= *spp
;
410 (*spp
)->retval
= (intptr_t)spp
;
413 /* else message was posted asynchronously with queue_post */
417 /* Empty macros for when synchoronous sending is not made */
418 #define queue_release_all_senders(q)
419 #define queue_do_unblock_sender(send, i)
420 #define queue_do_auto_reply(send)
421 #define queue_do_fetch_sender(send, rd)
422 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
424 /* Queue must not be available for use during this call */
425 void queue_init(struct event_queue
*q
, bool register_queue
)
427 int oldlevel
= disable_irq_save();
431 corelock_lock(&all_queues
.cl
);
434 corelock_init(&q
->cl
);
436 /* What garbage is in write is irrelevant because of the masking design-
437 * any other functions the empty the queue do this as well so that
438 * queue_count and queue_empty return sane values in the case of a
439 * concurrent change without locking inside them. */
441 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
442 q
->send
= NULL
; /* No message sending by default */
443 IF_PRIO( q
->blocker_p
= NULL
; )
448 void **queues
= (void **)all_queues
.queues
;
449 void **p
= find_array_ptr(queues
, q
);
451 if(p
- queues
>= MAX_NUM_QUEUES
)
453 panicf("queue_init->out of queues");
458 /* Add it to the all_queues array */
460 corelock_unlock(&all_queues
.cl
);
464 restore_irq(oldlevel
);
467 /* Queue must not be available for use during this call */
468 void queue_delete(struct event_queue
*q
)
470 int oldlevel
= disable_irq_save();
471 corelock_lock(&all_queues
.cl
);
472 corelock_lock(&q
->cl
);
474 /* Remove the queue if registered */
475 remove_array_ptr((void **)all_queues
.queues
, q
);
477 corelock_unlock(&all_queues
.cl
);
479 /* Release thread(s) waiting on queue head */
480 thread_queue_wake(&q
->queue
);
482 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
485 /* Release threads waiting for replies */
486 queue_release_all_senders(q
);
488 /* Reply to any dequeued message waiting for one */
489 queue_do_auto_reply(q
->send
);
492 IF_PRIO( q
->blocker_p
= NULL
; )
498 corelock_unlock(&q
->cl
);
499 restore_irq(oldlevel
);
502 /* NOTE: multiple threads waiting on a queue head cannot have a well-
503 defined release order if timeouts are used. If multiple threads must
504 access the queue head, use a dispatcher or queue_wait only. */
505 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
510 #ifdef HAVE_PRIORITY_SCHEDULING
511 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
512 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
513 "queue_wait->wrong thread\n");
516 oldlevel
= disable_irq_save();
517 corelock_lock(&q
->cl
);
520 queue_do_auto_reply(q
->send
);
524 struct thread_entry
*current
;
527 if (rd
!= q
->write
) /* A waking message could disappear */
530 current
= thread_id_entry(THREAD_ID_CURRENT
);
532 IF_COP( current
->obj_cl
= &q
->cl
; )
533 current
->bqp
= &q
->queue
;
535 block_thread(current
);
537 corelock_unlock(&q
->cl
);
540 oldlevel
= disable_irq_save();
541 corelock_lock(&q
->cl
);
545 rd
&= QUEUE_LENGTH_MASK
;
548 /* Get data for a waiting thread if one */
549 queue_do_fetch_sender(q
->send
, rd
);
551 corelock_unlock(&q
->cl
);
552 restore_irq(oldlevel
);
555 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
560 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
561 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
562 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
563 "queue_wait_w_tmo->wrong thread\n");
566 oldlevel
= disable_irq_save();
567 corelock_lock(&q
->cl
);
570 queue_do_auto_reply(q
->send
);
574 if (rd
== wr
&& ticks
> 0)
576 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
578 IF_COP( current
->obj_cl
= &q
->cl
; )
579 current
->bqp
= &q
->queue
;
581 block_thread_w_tmo(current
, ticks
);
582 corelock_unlock(&q
->cl
);
586 oldlevel
= disable_irq_save();
587 corelock_lock(&q
->cl
);
593 /* no worry about a removed message here - status is checked inside
594 locks - perhaps verify if timeout or false alarm */
598 rd
&= QUEUE_LENGTH_MASK
;
600 /* Get data for a waiting thread if one */
601 queue_do_fetch_sender(q
->send
, rd
);
605 ev
->id
= SYS_TIMEOUT
;
608 corelock_unlock(&q
->cl
);
609 restore_irq(oldlevel
);
612 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
617 oldlevel
= disable_irq_save();
618 corelock_lock(&q
->cl
);
620 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
622 KERNEL_ASSERT((q
->write
- q
->read
) <= QUEUE_LENGTH
,
623 "queue_post ovf q=%08lX", (long)q
);
625 q
->events
[wr
].id
= id
;
626 q
->events
[wr
].data
= data
;
628 /* overflow protect - unblock any thread waiting at this index */
629 queue_do_unblock_sender(q
->send
, wr
);
631 /* Wakeup a waiting thread if any */
632 wakeup_thread(&q
->queue
);
634 corelock_unlock(&q
->cl
);
635 restore_irq(oldlevel
);
638 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
639 /* IRQ handlers are not allowed use of this function - we only aim to
640 protect the queue integrity by turning them off. */
641 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
646 oldlevel
= disable_irq_save();
647 corelock_lock(&q
->cl
);
649 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
651 KERNEL_ASSERT((q
->write
- q
->read
) <= QUEUE_LENGTH
,
652 "queue_send ovf q=%08lX", (long)q
);
654 q
->events
[wr
].id
= id
;
655 q
->events
[wr
].data
= data
;
659 struct queue_sender_list
*send
= q
->send
;
660 struct thread_entry
**spp
= &send
->senders
[wr
];
661 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
665 /* overflow protect - unblock any thread waiting at this index */
666 queue_release_sender(spp
, 0);
669 /* Wakeup a waiting thread if any */
670 wakeup_thread(&q
->queue
);
672 /* Save thread in slot, add to list and wait for reply */
674 IF_COP( current
->obj_cl
= &q
->cl
; )
675 IF_PRIO( current
->blocker
= q
->blocker_p
; )
676 #ifdef HAVE_WAKEUP_EXT_CB
677 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
679 current
->retval
= (intptr_t)spp
;
680 current
->bqp
= &send
->list
;
682 block_thread(current
);
684 corelock_unlock(&q
->cl
);
687 return current
->retval
;
690 /* Function as queue_post if sending is not enabled */
691 wakeup_thread(&q
->queue
);
693 corelock_unlock(&q
->cl
);
694 restore_irq(oldlevel
);
699 #if 0 /* not used now but probably will be later */
700 /* Query if the last message dequeued was added by queue_send or not */
701 bool queue_in_queue_send(struct event_queue
*q
)
706 int oldlevel
= disable_irq_save();
707 corelock_lock(&q
->cl
);
710 in_send
= q
->send
&& q
->send
->curr_sender
;
713 corelock_unlock(&q
->cl
);
714 restore_irq(oldlevel
);
721 /* Replies with retval to the last dequeued message sent with queue_send */
722 void queue_reply(struct event_queue
*q
, intptr_t retval
)
724 if(q
->send
&& q
->send
->curr_sender
)
726 struct queue_sender_list
*sender
;
728 int oldlevel
= disable_irq_save();
729 corelock_lock(&q
->cl
);
733 /* Double-check locking */
734 if(LIKELY(sender
&& sender
->curr_sender
))
735 queue_release_sender(&sender
->curr_sender
, retval
);
737 corelock_unlock(&q
->cl
);
738 restore_irq(oldlevel
);
741 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
743 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
747 if(q
->read
== q
->write
)
750 bool have_msg
= false;
752 int oldlevel
= disable_irq_save();
753 corelock_lock(&q
->cl
);
758 *ev
= q
->events
[rd
& QUEUE_LENGTH_MASK
];
762 corelock_unlock(&q
->cl
);
763 restore_irq(oldlevel
);
768 /* Poll queue to see if a message exists - careful in using the result if
769 * queue_remove_from_head is called when messages are posted - possibly use
770 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
771 * unsignals the queue may cause an unwanted block */
772 bool queue_empty(const struct event_queue
* q
)
774 return ( q
->read
== q
->write
);
777 void queue_clear(struct event_queue
* q
)
781 oldlevel
= disable_irq_save();
782 corelock_lock(&q
->cl
);
784 /* Release all threads waiting in the queue for a reply -
785 dequeued sent message will be handled by owning thread */
786 queue_release_all_senders(q
);
790 corelock_unlock(&q
->cl
);
791 restore_irq(oldlevel
);
794 void queue_remove_from_head(struct event_queue
*q
, long id
)
798 oldlevel
= disable_irq_save();
799 corelock_lock(&q
->cl
);
801 while(q
->read
!= q
->write
)
803 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
805 if(q
->events
[rd
].id
!= id
)
810 /* Release any thread waiting on this message */
811 queue_do_unblock_sender(q
->send
, rd
);
816 corelock_unlock(&q
->cl
);
817 restore_irq(oldlevel
);
821 * The number of events waiting in the queue.
823 * @param struct of event_queue
824 * @return number of events in the queue
826 int queue_count(const struct event_queue
*q
)
828 return q
->write
- q
->read
;
831 int queue_broadcast(long id
, intptr_t data
)
833 struct event_queue
**p
= all_queues
.queues
;
834 struct event_queue
*q
;
837 int oldlevel
= disable_irq_save();
838 corelock_lock(&all_queues
.cl
);
841 for(q
= *p
; q
!= NULL
; q
= *(++p
))
843 queue_post(q
, id
, data
);
847 corelock_unlock(&all_queues
.cl
);
848 restore_irq(oldlevel
);
851 return p
- all_queues
.queues
;
854 /****************************************************************************
855 * Simple mutex functions ;)
856 ****************************************************************************/
858 static inline void __attribute__((always_inline
))
859 mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
861 #ifdef HAVE_PRIORITY_SCHEDULING
862 mtx
->blocker
.thread
= td
;
868 static inline struct thread_entry
* __attribute__((always_inline
))
869 mutex_get_thread(volatile struct mutex
*mtx
)
871 #ifdef HAVE_PRIORITY_SCHEDULING
872 return mtx
->blocker
.thread
;
878 /* Initialize a mutex object - call before any use and do not call again once
879 * the object is available to other threads */
880 void mutex_init(struct mutex
*m
)
882 corelock_init(&m
->cl
);
885 mutex_set_thread(m
, NULL
);
886 #ifdef HAVE_PRIORITY_SCHEDULING
887 m
->blocker
.priority
= PRIORITY_IDLE
;
888 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
889 m
->no_preempt
= false;
893 /* Gain ownership of a mutex object or block until it becomes free */
894 void mutex_lock(struct mutex
*m
)
896 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
898 if(current
== mutex_get_thread(m
))
900 /* current thread already owns this mutex */
905 /* lock out other cores */
906 corelock_lock(&m
->cl
);
908 /* must read thread again inside cs (a multiprocessor concern really) */
909 if(LIKELY(mutex_get_thread(m
) == NULL
))
912 mutex_set_thread(m
, current
);
913 corelock_unlock(&m
->cl
);
917 /* block until the lock is open... */
918 IF_COP( current
->obj_cl
= &m
->cl
; )
919 IF_PRIO( current
->blocker
= &m
->blocker
; )
920 current
->bqp
= &m
->queue
;
923 block_thread(current
);
925 corelock_unlock(&m
->cl
);
927 /* ...and turn control over to next thread */
931 /* Release ownership of a mutex object - only owning thread must call this */
932 void mutex_unlock(struct mutex
*m
)
934 /* unlocker not being the owner is an unlocking violation */
935 KERNEL_ASSERT(mutex_get_thread(m
) == thread_id_entry(THREAD_ID_CURRENT
),
936 "mutex_unlock->wrong thread (%s != %s)\n",
937 mutex_get_thread(m
)->name
,
938 thread_id_entry(THREAD_ID_CURRENT
)->name
);
942 /* this thread still owns lock */
947 /* lock out other cores */
948 corelock_lock(&m
->cl
);
950 /* transfer to next queued thread if any */
951 if(LIKELY(m
->queue
== NULL
))
953 /* no threads waiting - open the lock */
954 mutex_set_thread(m
, NULL
);
955 corelock_unlock(&m
->cl
);
960 const int oldlevel
= disable_irq_save();
961 /* Tranfer of owning thread is handled in the wakeup protocol
962 * if priorities are enabled otherwise just set it from the
964 IFN_PRIO( mutex_set_thread(m
, m
->queue
); )
965 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
966 restore_irq(oldlevel
);
968 corelock_unlock(&m
->cl
);
970 #ifdef HAVE_PRIORITY_SCHEDULING
971 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
977 /****************************************************************************
978 * Simple semaphore functions ;)
979 ****************************************************************************/
980 #ifdef HAVE_SEMAPHORE_OBJECTS
981 void semaphore_init(struct semaphore
*s
, int max
, int start
)
983 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
984 "semaphore_init->inv arg\n");
988 corelock_init(&s
->cl
);
991 void semaphore_wait(struct semaphore
*s
)
993 struct thread_entry
*current
;
995 corelock_lock(&s
->cl
);
997 if(LIKELY(--s
->count
>= 0))
1000 corelock_unlock(&s
->cl
);
1004 /* too many waits - block until dequeued... */
1005 current
= thread_id_entry(THREAD_ID_CURRENT
);
1007 IF_COP( current
->obj_cl
= &s
->cl
; )
1008 current
->bqp
= &s
->queue
;
1011 block_thread(current
);
1013 corelock_unlock(&s
->cl
);
1015 /* ...and turn control over to next thread */
1019 void semaphore_release(struct semaphore
*s
)
1021 IF_PRIO( unsigned int result
= THREAD_NONE
; )
1023 corelock_lock(&s
->cl
);
1025 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1027 /* there should be threads in this queue */
1028 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1029 /* a thread was queued - wake it up */
1030 int oldlevel
= disable_irq_save();
1031 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1032 restore_irq(oldlevel
);
1035 corelock_unlock(&s
->cl
);
1037 #ifdef HAVE_PRIORITY_SCHEDULING
1038 if(result
& THREAD_SWITCH
)
1042 #endif /* HAVE_SEMAPHORE_OBJECTS */
1044 #ifdef HAVE_WAKEUP_OBJECTS
1045 /****************************************************************************
1046 * Lightweight IRQ-compatible wakeup object
1049 /* Initialize the wakeup object */
1050 void wakeup_init(struct wakeup
*w
)
1053 w
->signalled
= false;
1054 IF_COP( corelock_init(&w
->cl
); )
1057 /* Wait for a signal blocking indefinitely or for a specified period */
1058 int wakeup_wait(struct wakeup
*w
, int timeout
)
1060 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1061 int oldlevel
= disable_irq_save();
1063 corelock_lock(&w
->cl
);
1065 if(LIKELY(!w
->signalled
&& timeout
!= TIMEOUT_NOBLOCK
))
1067 struct thread_entry
* current
= thread_id_entry(THREAD_ID_CURRENT
);
1069 IF_COP( current
->obj_cl
= &w
->cl
; )
1070 current
->bqp
= &w
->queue
;
1072 if (timeout
!= TIMEOUT_BLOCK
)
1073 block_thread_w_tmo(current
, timeout
);
1075 block_thread(current
);
1077 corelock_unlock(&w
->cl
);
1080 oldlevel
= disable_irq_save();
1081 corelock_lock(&w
->cl
);
1084 if(UNLIKELY(!w
->signalled
))
1086 /* Timed-out or failed */
1087 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1088 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1091 w
->signalled
= false; /* Reset */
1093 corelock_unlock(&w
->cl
);
1094 restore_irq(oldlevel
);
1099 /* Signal the thread waiting or leave the signal if the thread hasn't
1102 * returns THREAD_NONE or THREAD_OK
1104 int wakeup_signal(struct wakeup
*w
)
1106 int oldlevel
= disable_irq_save();
1109 corelock_lock(&w
->cl
);
1111 w
->signalled
= true;
1112 ret
= wakeup_thread(&w
->queue
);
1114 corelock_unlock(&w
->cl
);
1115 restore_irq(oldlevel
);
1119 #endif /* HAVE_WAKEUP_OBJECTS */