1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
36 #define KERNEL_OBJECT_CHECKS 0
39 #if KERNEL_OBJECT_CHECKS
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
48 #define KERNEL_ASSERT(exp, msg...) ({})
51 #if !defined(CPU_PP) || !defined(BOOTLOADER) || \
52 defined(HAVE_BOOTLOADER_USB_MODE)
53 volatile long current_tick SHAREDDATA_ATTR
= 0;
56 /* Unless otherwise defined, do nothing */
57 #ifndef YIELD_KERNEL_HOOK
58 #define YIELD_KERNEL_HOOK() false
60 #ifndef SLEEP_KERNEL_HOOK
61 #define SLEEP_KERNEL_HOOK(ticks) false
64 /* List of tick tasks - final element always NULL for termination */
65 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
67 /* This array holds all queues that are initiated. It is used for broadcast. */
70 struct event_queue
*queues
[MAX_NUM_QUEUES
+1];
71 IF_COP( struct corelock cl
; )
72 } all_queues SHAREDBSS_ATTR
;
74 /****************************************************************************
75 * Standard kernel stuff
76 ****************************************************************************/
77 void kernel_init(void)
79 /* Init the threading API */
82 /* Other processors will not reach this point in a multicore build.
83 * In a single-core build with multiple cores they fall-through and
84 * sleep in cop_main without returning. */
85 if (CURRENT_CORE
== CPU
)
87 memset(tick_funcs
, 0, sizeof(tick_funcs
));
88 memset(&all_queues
, 0, sizeof(all_queues
));
89 corelock_init(&all_queues
.cl
);
97 /****************************************************************************
98 * Timer tick - Timer initialization and interrupt handler is defined at
100 ****************************************************************************/
101 int tick_add_task(void (*f
)(void))
103 int oldlevel
= disable_irq_save();
104 void **arr
= (void **)tick_funcs
;
105 void **p
= find_array_ptr(arr
, f
);
107 /* Add a task if there is room */
108 if(p
- arr
< MAX_NUM_TICK_TASKS
)
110 *p
= f
; /* If already in list, no problem. */
114 panicf("Error! tick_add_task(): out of tasks");
117 restore_irq(oldlevel
);
121 int tick_remove_task(void (*f
)(void))
123 int oldlevel
= disable_irq_save();
124 int rc
= remove_array_ptr((void **)tick_funcs
, f
);
125 restore_irq(oldlevel
);
129 /****************************************************************************
130 * Tick-based interval timers/one-shots - be mindful this is not really
131 * intended for continuous timers but for events that need to run for a short
132 * time and be cancelled without further software intervention.
133 ****************************************************************************/
134 #ifdef INCLUDE_TIMEOUT_API
135 /* list of active timeout events */
136 static struct timeout
*tmo_list
[MAX_NUM_TIMEOUTS
+1];
138 /* timeout tick task - calls event handlers when they expire
139 * Event handlers may alter expiration, callback and data during operation.
141 static void timeout_tick(void)
143 unsigned long tick
= current_tick
;
144 struct timeout
**p
= tmo_list
;
145 struct timeout
*curr
;
147 for(curr
= *p
; curr
!= NULL
; curr
= *(++p
))
151 if(TIME_BEFORE(tick
, curr
->expires
))
154 /* this event has expired - call callback */
155 ticks
= curr
->callback(curr
);
158 curr
->expires
= tick
+ ticks
; /* reload */
162 timeout_cancel(curr
); /* cancel */
167 /* Cancels a timeout callback - can be called from the ISR */
168 void timeout_cancel(struct timeout
*tmo
)
170 int oldlevel
= disable_irq_save();
171 int rc
= remove_array_ptr((void **)tmo_list
, tmo
);
173 if(rc
>= 0 && *tmo_list
== NULL
)
175 tick_remove_task(timeout_tick
); /* Last one - remove task */
178 restore_irq(oldlevel
);
181 /* Adds a timeout callback - calling with an active timeout resets the
182 interval - can be called from the ISR */
183 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
184 int ticks
, intptr_t data
)
192 oldlevel
= disable_irq_save();
194 /* See if this one is already registered */
195 arr
= (void **)tmo_list
;
196 p
= find_array_ptr(arr
, tmo
);
198 if(p
- arr
< MAX_NUM_TIMEOUTS
)
204 if(*tmo_list
== NULL
)
206 tick_add_task(timeout_tick
); /* First one - add task */
212 tmo
->callback
= callback
;
214 tmo
->expires
= current_tick
+ ticks
;
217 restore_irq(oldlevel
);
220 #endif /* INCLUDE_TIMEOUT_API */
222 /****************************************************************************
224 ****************************************************************************/
225 unsigned sleep(unsigned ticks
)
227 /* In certain situations, certain bootloaders in particular, a normal
228 * threading call is inappropriate. */
229 if (SLEEP_KERNEL_HOOK(ticks
))
230 return 0; /* Handled */
240 /* In certain situations, certain bootloaders in particular, a normal
241 * threading call is inappropriate. */
242 if (YIELD_KERNEL_HOOK())
243 return; /* handled */
248 /****************************************************************************
249 * Queue handling stuff
250 ****************************************************************************/
252 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
253 /****************************************************************************
254 * Sender thread queue structure that aids implementation of priority
255 * inheritance on queues because the send list structure is the same as
256 * for all other kernel objects:
259 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
260 * E3 was posted with queue_post
261 * 4 events remain enqueued (E1-E4)
264 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
265 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
267 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
268 * q->send->curr_sender: /\
270 * Thread has E0 in its own struct queue_event.
272 ****************************************************************************/
274 /* Puts the specified return value in the waiting thread's return value
275 * and wakes the thread.
277 * A sender should be confirmed to exist before calling which makes it
278 * more efficent to reject the majority of cases that don't need this
281 static void queue_release_sender(struct thread_entry
* volatile * sender
,
284 struct thread_entry
*thread
= *sender
;
286 *sender
= NULL
; /* Clear slot. */
287 #ifdef HAVE_WAKEUP_EXT_CB
288 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
290 thread
->retval
= retval
; /* Assign thread-local return value. */
291 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
292 wakeup_thread wakes the first thread in
294 wakeup_thread(thread
->bqp
);
297 /* Releases any waiting threads that are queued with queue_send -
300 static void queue_release_all_senders(struct event_queue
*q
)
305 for(i
= q
->read
; i
!= q
->write
; i
++)
307 struct thread_entry
**spp
=
308 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
312 queue_release_sender(spp
, 0);
318 /* Callback to do extra forced removal steps from sender list in addition
319 * to the normal blocking queue removal and priority dis-inherit */
320 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
322 *((struct thread_entry
**)thread
->retval
) = NULL
;
323 #ifdef HAVE_WAKEUP_EXT_CB
324 thread
->wakeup_ext_cb
= NULL
;
329 /* Enables queue_send on the specified queue - caller allocates the extra
330 * data structure. Only queues which are taken to be owned by a thread should
331 * enable this however an official owner is not compulsory but must be
332 * specified for priority inheritance to operate.
334 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
335 * messages results in an undefined order of message replies or possible default
336 * replies if two or more waits happen before a reply is done.
338 void queue_enable_queue_send(struct event_queue
*q
,
339 struct queue_sender_list
*send
,
340 unsigned int owner_id
)
342 int oldlevel
= disable_irq_save();
343 corelock_lock(&q
->cl
);
345 if(send
!= NULL
&& q
->send
== NULL
)
347 memset(send
, 0, sizeof(*send
));
348 #ifdef HAVE_PRIORITY_SCHEDULING
349 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
350 send
->blocker
.priority
= PRIORITY_IDLE
;
353 send
->blocker
.thread
= thread_id_entry(owner_id
);
354 q
->blocker_p
= &send
->blocker
;
360 corelock_unlock(&q
->cl
);
361 restore_irq(oldlevel
);
366 /* Unblock a blocked thread at a given event index */
367 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
372 struct thread_entry
**spp
= &send
->senders
[i
];
376 queue_release_sender(spp
, 0);
381 /* Perform the auto-reply sequence */
382 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
384 if(send
&& send
->curr_sender
)
387 queue_release_sender(&send
->curr_sender
, 0);
391 /* Moves waiting thread's refrence from the senders array to the
392 * current_sender which represents the thread waiting for a reponse to the
393 * last message removed from the queue. This also protects the thread from
394 * being bumped due to overflow which would not be a valid action since its
395 * message _is_ being processed at this point. */
396 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
401 struct thread_entry
**spp
= &send
->senders
[rd
];
405 /* Move thread reference from array to the next thread
406 that queue_reply will release */
407 send
->curr_sender
= *spp
;
408 (*spp
)->retval
= (intptr_t)spp
;
411 /* else message was posted asynchronously with queue_post */
415 /* Empty macros for when synchoronous sending is not made */
416 #define queue_release_all_senders(q)
417 #define queue_do_unblock_sender(send, i)
418 #define queue_do_auto_reply(send)
419 #define queue_do_fetch_sender(send, rd)
420 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
422 /* Queue must not be available for use during this call */
423 void queue_init(struct event_queue
*q
, bool register_queue
)
425 int oldlevel
= disable_irq_save();
429 corelock_lock(&all_queues
.cl
);
432 corelock_init(&q
->cl
);
434 /* What garbage is in write is irrelevant because of the masking design-
435 * any other functions the empty the queue do this as well so that
436 * queue_count and queue_empty return sane values in the case of a
437 * concurrent change without locking inside them. */
439 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
440 q
->send
= NULL
; /* No message sending by default */
441 IF_PRIO( q
->blocker_p
= NULL
; )
446 void **queues
= (void **)all_queues
.queues
;
447 void **p
= find_array_ptr(queues
, q
);
449 if(p
- queues
>= MAX_NUM_QUEUES
)
451 panicf("queue_init->out of queues");
456 /* Add it to the all_queues array */
458 corelock_unlock(&all_queues
.cl
);
462 restore_irq(oldlevel
);
465 /* Queue must not be available for use during this call */
466 void queue_delete(struct event_queue
*q
)
468 int oldlevel
= disable_irq_save();
469 corelock_lock(&all_queues
.cl
);
470 corelock_lock(&q
->cl
);
472 /* Remove the queue if registered */
473 remove_array_ptr((void **)all_queues
.queues
, q
);
475 corelock_unlock(&all_queues
.cl
);
477 /* Release thread(s) waiting on queue head */
478 thread_queue_wake(&q
->queue
);
480 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
483 /* Release threads waiting for replies */
484 queue_release_all_senders(q
);
486 /* Reply to any dequeued message waiting for one */
487 queue_do_auto_reply(q
->send
);
490 IF_PRIO( q
->blocker_p
= NULL
; )
496 corelock_unlock(&q
->cl
);
497 restore_irq(oldlevel
);
500 /* NOTE: multiple threads waiting on a queue head cannot have a well-
501 defined release order if timeouts are used. If multiple threads must
502 access the queue head, use a dispatcher or queue_wait only. */
503 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
508 #ifdef HAVE_PRIORITY_SCHEDULING
509 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
510 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
511 "queue_wait->wrong thread\n");
514 oldlevel
= disable_irq_save();
515 corelock_lock(&q
->cl
);
518 queue_do_auto_reply(q
->send
);
522 struct thread_entry
*current
;
525 if (rd
!= q
->write
) /* A waking message could disappear */
528 current
= thread_id_entry(THREAD_ID_CURRENT
);
530 IF_COP( current
->obj_cl
= &q
->cl
; )
531 current
->bqp
= &q
->queue
;
533 block_thread(current
);
535 corelock_unlock(&q
->cl
);
538 oldlevel
= disable_irq_save();
539 corelock_lock(&q
->cl
);
543 rd
&= QUEUE_LENGTH_MASK
;
546 /* Get data for a waiting thread if one */
547 queue_do_fetch_sender(q
->send
, rd
);
549 corelock_unlock(&q
->cl
);
550 restore_irq(oldlevel
);
553 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
558 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
559 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
560 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
561 "queue_wait_w_tmo->wrong thread\n");
564 oldlevel
= disable_irq_save();
565 corelock_lock(&q
->cl
);
568 queue_do_auto_reply(q
->send
);
572 if (rd
== wr
&& ticks
> 0)
574 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
576 IF_COP( current
->obj_cl
= &q
->cl
; )
577 current
->bqp
= &q
->queue
;
579 block_thread_w_tmo(current
, ticks
);
580 corelock_unlock(&q
->cl
);
584 oldlevel
= disable_irq_save();
585 corelock_lock(&q
->cl
);
591 /* no worry about a removed message here - status is checked inside
592 locks - perhaps verify if timeout or false alarm */
596 rd
&= QUEUE_LENGTH_MASK
;
598 /* Get data for a waiting thread if one */
599 queue_do_fetch_sender(q
->send
, rd
);
603 ev
->id
= SYS_TIMEOUT
;
606 corelock_unlock(&q
->cl
);
607 restore_irq(oldlevel
);
610 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
615 oldlevel
= disable_irq_save();
616 corelock_lock(&q
->cl
);
618 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
620 KERNEL_ASSERT((q
->write
- q
->read
) <= QUEUE_LENGTH
,
621 "queue_post ovf q=%08lX", (long)q
);
623 q
->events
[wr
].id
= id
;
624 q
->events
[wr
].data
= data
;
626 /* overflow protect - unblock any thread waiting at this index */
627 queue_do_unblock_sender(q
->send
, wr
);
629 /* Wakeup a waiting thread if any */
630 wakeup_thread(&q
->queue
);
632 corelock_unlock(&q
->cl
);
633 restore_irq(oldlevel
);
636 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
637 /* IRQ handlers are not allowed use of this function - we only aim to
638 protect the queue integrity by turning them off. */
639 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
644 oldlevel
= disable_irq_save();
645 corelock_lock(&q
->cl
);
647 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
649 KERNEL_ASSERT((q
->write
- q
->read
) <= QUEUE_LENGTH
,
650 "queue_send ovf q=%08lX", (long)q
);
652 q
->events
[wr
].id
= id
;
653 q
->events
[wr
].data
= data
;
657 struct queue_sender_list
*send
= q
->send
;
658 struct thread_entry
**spp
= &send
->senders
[wr
];
659 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
663 /* overflow protect - unblock any thread waiting at this index */
664 queue_release_sender(spp
, 0);
667 /* Wakeup a waiting thread if any */
668 wakeup_thread(&q
->queue
);
670 /* Save thread in slot, add to list and wait for reply */
672 IF_COP( current
->obj_cl
= &q
->cl
; )
673 IF_PRIO( current
->blocker
= q
->blocker_p
; )
674 #ifdef HAVE_WAKEUP_EXT_CB
675 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
677 current
->retval
= (intptr_t)spp
;
678 current
->bqp
= &send
->list
;
680 block_thread(current
);
682 corelock_unlock(&q
->cl
);
685 return current
->retval
;
688 /* Function as queue_post if sending is not enabled */
689 wakeup_thread(&q
->queue
);
691 corelock_unlock(&q
->cl
);
692 restore_irq(oldlevel
);
697 #if 0 /* not used now but probably will be later */
698 /* Query if the last message dequeued was added by queue_send or not */
699 bool queue_in_queue_send(struct event_queue
*q
)
704 int oldlevel
= disable_irq_save();
705 corelock_lock(&q
->cl
);
708 in_send
= q
->send
&& q
->send
->curr_sender
;
711 corelock_unlock(&q
->cl
);
712 restore_irq(oldlevel
);
719 /* Replies with retval to the last dequeued message sent with queue_send */
720 void queue_reply(struct event_queue
*q
, intptr_t retval
)
722 if(q
->send
&& q
->send
->curr_sender
)
724 struct queue_sender_list
*sender
;
726 int oldlevel
= disable_irq_save();
727 corelock_lock(&q
->cl
);
731 /* Double-check locking */
732 if(LIKELY(sender
&& sender
->curr_sender
))
733 queue_release_sender(&sender
->curr_sender
, retval
);
735 corelock_unlock(&q
->cl
);
736 restore_irq(oldlevel
);
739 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
741 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
745 if(q
->read
== q
->write
)
748 bool have_msg
= false;
750 int oldlevel
= disable_irq_save();
751 corelock_lock(&q
->cl
);
756 *ev
= q
->events
[rd
& QUEUE_LENGTH_MASK
];
760 corelock_unlock(&q
->cl
);
761 restore_irq(oldlevel
);
766 /* Poll queue to see if a message exists - careful in using the result if
767 * queue_remove_from_head is called when messages are posted - possibly use
768 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
769 * unsignals the queue may cause an unwanted block */
770 bool queue_empty(const struct event_queue
* q
)
772 return ( q
->read
== q
->write
);
775 void queue_clear(struct event_queue
* q
)
779 oldlevel
= disable_irq_save();
780 corelock_lock(&q
->cl
);
782 /* Release all threads waiting in the queue for a reply -
783 dequeued sent message will be handled by owning thread */
784 queue_release_all_senders(q
);
788 corelock_unlock(&q
->cl
);
789 restore_irq(oldlevel
);
792 void queue_remove_from_head(struct event_queue
*q
, long id
)
796 oldlevel
= disable_irq_save();
797 corelock_lock(&q
->cl
);
799 while(q
->read
!= q
->write
)
801 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
803 if(q
->events
[rd
].id
!= id
)
808 /* Release any thread waiting on this message */
809 queue_do_unblock_sender(q
->send
, rd
);
814 corelock_unlock(&q
->cl
);
815 restore_irq(oldlevel
);
819 * The number of events waiting in the queue.
821 * @param struct of event_queue
822 * @return number of events in the queue
824 int queue_count(const struct event_queue
*q
)
826 return q
->write
- q
->read
;
829 int queue_broadcast(long id
, intptr_t data
)
831 struct event_queue
**p
= all_queues
.queues
;
832 struct event_queue
*q
;
835 int oldlevel
= disable_irq_save();
836 corelock_lock(&all_queues
.cl
);
839 for(q
= *p
; q
!= NULL
; q
= *(++p
))
841 queue_post(q
, id
, data
);
845 corelock_unlock(&all_queues
.cl
);
846 restore_irq(oldlevel
);
849 return p
- all_queues
.queues
;
852 /****************************************************************************
853 * Simple mutex functions ;)
854 ****************************************************************************/
856 static inline void __attribute__((always_inline
))
857 mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
859 #ifdef HAVE_PRIORITY_SCHEDULING
860 mtx
->blocker
.thread
= td
;
866 static inline struct thread_entry
* __attribute__((always_inline
))
867 mutex_get_thread(volatile struct mutex
*mtx
)
869 #ifdef HAVE_PRIORITY_SCHEDULING
870 return mtx
->blocker
.thread
;
876 /* Initialize a mutex object - call before any use and do not call again once
877 * the object is available to other threads */
878 void mutex_init(struct mutex
*m
)
880 corelock_init(&m
->cl
);
883 mutex_set_thread(m
, NULL
);
884 #ifdef HAVE_PRIORITY_SCHEDULING
885 m
->blocker
.priority
= PRIORITY_IDLE
;
886 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
887 m
->no_preempt
= false;
891 /* Gain ownership of a mutex object or block until it becomes free */
892 void mutex_lock(struct mutex
*m
)
894 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
896 if(current
== mutex_get_thread(m
))
898 /* current thread already owns this mutex */
903 /* lock out other cores */
904 corelock_lock(&m
->cl
);
906 /* must read thread again inside cs (a multiprocessor concern really) */
907 if(LIKELY(mutex_get_thread(m
) == NULL
))
910 mutex_set_thread(m
, current
);
911 corelock_unlock(&m
->cl
);
915 /* block until the lock is open... */
916 IF_COP( current
->obj_cl
= &m
->cl
; )
917 IF_PRIO( current
->blocker
= &m
->blocker
; )
918 current
->bqp
= &m
->queue
;
921 block_thread(current
);
923 corelock_unlock(&m
->cl
);
925 /* ...and turn control over to next thread */
929 /* Release ownership of a mutex object - only owning thread must call this */
930 void mutex_unlock(struct mutex
*m
)
932 /* unlocker not being the owner is an unlocking violation */
933 KERNEL_ASSERT(mutex_get_thread(m
) == thread_id_entry(THREAD_ID_CURRENT
),
934 "mutex_unlock->wrong thread (%s != %s)\n",
935 mutex_get_thread(m
)->name
,
936 thread_id_entry(THREAD_ID_CURRENT
)->name
);
940 /* this thread still owns lock */
945 /* lock out other cores */
946 corelock_lock(&m
->cl
);
948 /* transfer to next queued thread if any */
949 if(LIKELY(m
->queue
== NULL
))
951 /* no threads waiting - open the lock */
952 mutex_set_thread(m
, NULL
);
953 corelock_unlock(&m
->cl
);
958 const int oldlevel
= disable_irq_save();
959 /* Tranfer of owning thread is handled in the wakeup protocol
960 * if priorities are enabled otherwise just set it from the
962 IFN_PRIO( mutex_set_thread(m
, m
->queue
); )
963 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
964 restore_irq(oldlevel
);
966 corelock_unlock(&m
->cl
);
968 #ifdef HAVE_PRIORITY_SCHEDULING
969 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
975 /****************************************************************************
976 * Simple semaphore functions ;)
977 ****************************************************************************/
978 #ifdef HAVE_SEMAPHORE_OBJECTS
979 void semaphore_init(struct semaphore
*s
, int max
, int start
)
981 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
982 "semaphore_init->inv arg\n");
986 corelock_init(&s
->cl
);
989 void semaphore_wait(struct semaphore
*s
)
991 struct thread_entry
*current
;
993 corelock_lock(&s
->cl
);
995 if(LIKELY(--s
->count
>= 0))
998 corelock_unlock(&s
->cl
);
1002 /* too many waits - block until dequeued... */
1003 current
= thread_id_entry(THREAD_ID_CURRENT
);
1005 IF_COP( current
->obj_cl
= &s
->cl
; )
1006 current
->bqp
= &s
->queue
;
1009 block_thread(current
);
1011 corelock_unlock(&s
->cl
);
1013 /* ...and turn control over to next thread */
1017 void semaphore_release(struct semaphore
*s
)
1019 IF_PRIO( unsigned int result
= THREAD_NONE
; )
1021 corelock_lock(&s
->cl
);
1023 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1025 /* there should be threads in this queue */
1026 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1027 /* a thread was queued - wake it up */
1028 int oldlevel
= disable_irq_save();
1029 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1030 restore_irq(oldlevel
);
1033 corelock_unlock(&s
->cl
);
1035 #ifdef HAVE_PRIORITY_SCHEDULING
1036 if(result
& THREAD_SWITCH
)
1040 #endif /* HAVE_SEMAPHORE_OBJECTS */
1042 #ifdef HAVE_WAKEUP_OBJECTS
1043 /****************************************************************************
1044 * Lightweight IRQ-compatible wakeup object
1047 /* Initialize the wakeup object */
1048 void wakeup_init(struct wakeup
*w
)
1051 w
->signalled
= false;
1052 IF_COP( corelock_init(&w
->cl
); )
1055 /* Wait for a signal blocking indefinitely or for a specified period */
1056 int wakeup_wait(struct wakeup
*w
, int timeout
)
1058 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1059 int oldlevel
= disable_irq_save();
1061 corelock_lock(&w
->cl
);
1063 if(LIKELY(!w
->signalled
&& timeout
!= TIMEOUT_NOBLOCK
))
1065 struct thread_entry
* current
= thread_id_entry(THREAD_ID_CURRENT
);
1067 IF_COP( current
->obj_cl
= &w
->cl
; )
1068 current
->bqp
= &w
->queue
;
1070 if (timeout
!= TIMEOUT_BLOCK
)
1071 block_thread_w_tmo(current
, timeout
);
1073 block_thread(current
);
1075 corelock_unlock(&w
->cl
);
1078 oldlevel
= disable_irq_save();
1079 corelock_lock(&w
->cl
);
1082 if(UNLIKELY(!w
->signalled
))
1084 /* Timed-out or failed */
1085 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1086 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1089 w
->signalled
= false; /* Reset */
1091 corelock_unlock(&w
->cl
);
1092 restore_irq(oldlevel
);
1097 /* Signal the thread waiting or leave the signal if the thread hasn't
1100 * returns THREAD_NONE or THREAD_OK
1102 int wakeup_signal(struct wakeup
*w
)
1104 int oldlevel
= disable_irq_save();
1107 corelock_lock(&w
->cl
);
1109 w
->signalled
= true;
1110 ret
= wakeup_thread(&w
->queue
);
1112 corelock_unlock(&w
->cl
);
1113 restore_irq(oldlevel
);
1117 #endif /* HAVE_WAKEUP_OBJECTS */