1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
36 #define KERNEL_OBJECT_CHECKS 0
39 #if KERNEL_OBJECT_CHECKS
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
48 #define KERNEL_ASSERT(exp, msg...) ({})
51 #if !defined(CPU_PP) || !defined(BOOTLOADER)
52 volatile long current_tick SHAREDDATA_ATTR
= 0;
55 /* List of tick tasks - final element always NULL for termination */
56 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
58 /* This array holds all queues that are initiated. It is used for broadcast. */
61 struct event_queue
*queues
[MAX_NUM_QUEUES
+1];
62 IF_COP( struct corelock cl
; )
63 } all_queues SHAREDBSS_ATTR
;
65 /****************************************************************************
66 * Standard kernel stuff
67 ****************************************************************************/
68 void kernel_init(void)
70 /* Init the threading API */
73 /* Other processors will not reach this point in a multicore build.
74 * In a single-core build with multiple cores they fall-through and
75 * sleep in cop_main without returning. */
76 if (CURRENT_CORE
== CPU
)
78 memset(tick_funcs
, 0, sizeof(tick_funcs
));
79 memset(&all_queues
, 0, sizeof(all_queues
));
80 corelock_init(&all_queues
.cl
);
88 /****************************************************************************
89 * Timer tick - Timer initialization and interrupt handler is defined at
91 ****************************************************************************/
92 int tick_add_task(void (*f
)(void))
94 int oldlevel
= disable_irq_save();
95 void **arr
= (void **)tick_funcs
;
96 void **p
= find_array_ptr(arr
, f
);
98 /* Add a task if there is room */
99 if(p
- arr
< MAX_NUM_TICK_TASKS
)
101 *p
= f
; /* If already in list, no problem. */
105 panicf("Error! tick_add_task(): out of tasks");
108 restore_irq(oldlevel
);
112 int tick_remove_task(void (*f
)(void))
114 int oldlevel
= disable_irq_save();
115 int rc
= remove_array_ptr((void **)tick_funcs
, f
);
116 restore_irq(oldlevel
);
120 /****************************************************************************
121 * Tick-based interval timers/one-shots - be mindful this is not really
122 * intended for continuous timers but for events that need to run for a short
123 * time and be cancelled without further software intervention.
124 ****************************************************************************/
125 #ifdef INCLUDE_TIMEOUT_API
126 /* list of active timeout events */
127 static struct timeout
*tmo_list
[MAX_NUM_TIMEOUTS
+1];
129 /* timeout tick task - calls event handlers when they expire
130 * Event handlers may alter expiration, callback and data during operation.
132 static void timeout_tick(void)
134 unsigned long tick
= current_tick
;
135 struct timeout
**p
= tmo_list
;
136 struct timeout
*curr
;
138 for(curr
= *p
; curr
!= NULL
; curr
= *(++p
))
142 if(TIME_BEFORE(tick
, curr
->expires
))
145 /* this event has expired - call callback */
146 ticks
= curr
->callback(curr
);
149 curr
->expires
= tick
+ ticks
; /* reload */
153 timeout_cancel(curr
); /* cancel */
158 /* Cancels a timeout callback - can be called from the ISR */
159 void timeout_cancel(struct timeout
*tmo
)
161 int oldlevel
= disable_irq_save();
162 int rc
= remove_array_ptr((void **)tmo_list
, tmo
);
164 if(rc
>= 0 && *tmo_list
== NULL
)
166 tick_remove_task(timeout_tick
); /* Last one - remove task */
169 restore_irq(oldlevel
);
172 /* Adds a timeout callback - calling with an active timeout resets the
173 interval - can be called from the ISR */
174 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
175 int ticks
, intptr_t data
)
183 oldlevel
= disable_irq_save();
185 /* See if this one is already registered */
186 arr
= (void **)tmo_list
;
187 p
= find_array_ptr(arr
, tmo
);
189 if(p
- arr
< MAX_NUM_TIMEOUTS
)
195 if(*tmo_list
== NULL
)
197 tick_add_task(timeout_tick
); /* First one - add task */
203 tmo
->callback
= callback
;
205 tmo
->expires
= current_tick
+ ticks
;
208 restore_irq(oldlevel
);
211 #endif /* INCLUDE_TIMEOUT_API */
213 /****************************************************************************
215 ****************************************************************************/
216 unsigned sleep(unsigned ticks
)
218 #if defined(CPU_PP) && defined(BOOTLOADER)
219 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
220 while (TIME_BEFORE(USEC_TIMER
, stop
))
222 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
224 long sleep_ticks
= current_tick
+ ticks
+ 1;
225 while (TIME_BEFORE(current_tick
, sleep_ticks
))
237 #if ((defined(TATUNG_TPJ1022)) && defined(BOOTLOADER))
238 /* Some targets don't like yielding in the bootloader */
244 /****************************************************************************
245 * Queue handling stuff
246 ****************************************************************************/
248 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
249 /****************************************************************************
250 * Sender thread queue structure that aids implementation of priority
251 * inheritance on queues because the send list structure is the same as
252 * for all other kernel objects:
255 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
256 * E3 was posted with queue_post
257 * 4 events remain enqueued (E1-E4)
260 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
261 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
263 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
264 * q->send->curr_sender: /\
266 * Thread has E0 in its own struct queue_event.
268 ****************************************************************************/
270 /* Puts the specified return value in the waiting thread's return value
271 * and wakes the thread.
273 * A sender should be confirmed to exist before calling which makes it
274 * more efficent to reject the majority of cases that don't need this
277 static void queue_release_sender(struct thread_entry
**sender
,
280 struct thread_entry
*thread
= *sender
;
282 *sender
= NULL
; /* Clear slot. */
283 #ifdef HAVE_WAKEUP_EXT_CB
284 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
286 thread
->retval
= retval
; /* Assign thread-local return value. */
287 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
288 wakeup_thread wakes the first thread in
290 wakeup_thread(thread
->bqp
);
293 /* Releases any waiting threads that are queued with queue_send -
296 static void queue_release_all_senders(struct event_queue
*q
)
301 for(i
= q
->read
; i
!= q
->write
; i
++)
303 struct thread_entry
**spp
=
304 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
308 queue_release_sender(spp
, 0);
314 /* Callback to do extra forced removal steps from sender list in addition
315 * to the normal blocking queue removal and priority dis-inherit */
316 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
318 *((struct thread_entry
**)thread
->retval
) = NULL
;
319 #ifdef HAVE_WAKEUP_EXT_CB
320 thread
->wakeup_ext_cb
= NULL
;
325 /* Enables queue_send on the specified queue - caller allocates the extra
326 * data structure. Only queues which are taken to be owned by a thread should
327 * enable this however an official owner is not compulsory but must be
328 * specified for priority inheritance to operate.
330 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
331 * messages results in an undefined order of message replies or possible default
332 * replies if two or more waits happen before a reply is done.
334 void queue_enable_queue_send(struct event_queue
*q
,
335 struct queue_sender_list
*send
,
336 unsigned int owner_id
)
338 int oldlevel
= disable_irq_save();
339 corelock_lock(&q
->cl
);
341 if(send
!= NULL
&& q
->send
== NULL
)
343 memset(send
, 0, sizeof(*send
));
344 #ifdef HAVE_PRIORITY_SCHEDULING
345 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
346 send
->blocker
.priority
= PRIORITY_IDLE
;
349 send
->blocker
.thread
= thread_id_entry(owner_id
);
350 q
->blocker_p
= &send
->blocker
;
356 corelock_unlock(&q
->cl
);
357 restore_irq(oldlevel
);
362 /* Unblock a blocked thread at a given event index */
363 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
368 struct thread_entry
**spp
= &send
->senders
[i
];
372 queue_release_sender(spp
, 0);
377 /* Perform the auto-reply sequence */
378 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
380 if(send
&& send
->curr_sender
)
383 queue_release_sender(&send
->curr_sender
, 0);
387 /* Moves waiting thread's refrence from the senders array to the
388 * current_sender which represents the thread waiting for a reponse to the
389 * last message removed from the queue. This also protects the thread from
390 * being bumped due to overflow which would not be a valid action since its
391 * message _is_ being processed at this point. */
392 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
397 struct thread_entry
**spp
= &send
->senders
[rd
];
401 /* Move thread reference from array to the next thread
402 that queue_reply will release */
403 send
->curr_sender
= *spp
;
404 (*spp
)->retval
= (intptr_t)spp
;
407 /* else message was posted asynchronously with queue_post */
411 /* Empty macros for when synchoronous sending is not made */
412 #define queue_release_all_senders(q)
413 #define queue_do_unblock_sender(send, i)
414 #define queue_do_auto_reply(send)
415 #define queue_do_fetch_sender(send, rd)
416 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
418 /* Queue must not be available for use during this call */
419 void queue_init(struct event_queue
*q
, bool register_queue
)
421 int oldlevel
= disable_irq_save();
425 corelock_lock(&all_queues
.cl
);
428 corelock_init(&q
->cl
);
432 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
433 q
->send
= NULL
; /* No message sending by default */
434 IF_PRIO( q
->blocker_p
= NULL
; )
439 void **queues
= (void **)all_queues
.queues
;
440 void **p
= find_array_ptr(queues
, q
);
442 if(p
- queues
>= MAX_NUM_QUEUES
)
444 panicf("queue_init->out of queues");
449 /* Add it to the all_queues array */
451 corelock_unlock(&all_queues
.cl
);
455 restore_irq(oldlevel
);
458 /* Queue must not be available for use during this call */
459 void queue_delete(struct event_queue
*q
)
461 int oldlevel
= disable_irq_save();
462 corelock_lock(&all_queues
.cl
);
463 corelock_lock(&q
->cl
);
465 /* Remove the queue if registered */
466 remove_array_ptr((void **)all_queues
.queues
, q
);
468 corelock_unlock(&all_queues
.cl
);
470 /* Release thread(s) waiting on queue head */
471 thread_queue_wake(&q
->queue
);
473 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
476 /* Release threads waiting for replies */
477 queue_release_all_senders(q
);
479 /* Reply to any dequeued message waiting for one */
480 queue_do_auto_reply(q
->send
);
483 IF_PRIO( q
->blocker_p
= NULL
; )
490 corelock_unlock(&q
->cl
);
491 restore_irq(oldlevel
);
494 /* NOTE: multiple threads waiting on a queue head cannot have a well-
495 defined release order if timeouts are used. If multiple threads must
496 access the queue head, use a dispatcher or queue_wait only. */
497 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
502 #ifdef HAVE_PRIORITY_SCHEDULING
503 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
504 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
505 "queue_wait->wrong thread\n");
508 oldlevel
= disable_irq_save();
509 corelock_lock(&q
->cl
);
512 queue_do_auto_reply(q
->send
);
514 if (q
->read
== q
->write
)
516 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
520 IF_COP( current
->obj_cl
= &q
->cl
; )
521 current
->bqp
= &q
->queue
;
523 block_thread(current
);
525 corelock_unlock(&q
->cl
);
528 oldlevel
= disable_irq_save();
529 corelock_lock(&q
->cl
);
531 /* A message that woke us could now be gone */
532 while (q
->read
== q
->write
);
535 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
538 /* Get data for a waiting thread if one */
539 queue_do_fetch_sender(q
->send
, rd
);
541 corelock_unlock(&q
->cl
);
542 restore_irq(oldlevel
);
545 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
549 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
550 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
551 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
552 "queue_wait_w_tmo->wrong thread\n");
555 oldlevel
= disable_irq_save();
556 corelock_lock(&q
->cl
);
559 queue_do_auto_reply(q
->send
);
561 if (q
->read
== q
->write
&& ticks
> 0)
563 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
565 IF_COP( current
->obj_cl
= &q
->cl
; )
566 current
->bqp
= &q
->queue
;
568 block_thread_w_tmo(current
, ticks
);
569 corelock_unlock(&q
->cl
);
573 oldlevel
= disable_irq_save();
574 corelock_lock(&q
->cl
);
577 /* no worry about a removed message here - status is checked inside
578 locks - perhaps verify if timeout or false alarm */
579 if (q
->read
!= q
->write
)
581 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
583 /* Get data for a waiting thread if one */
584 queue_do_fetch_sender(q
->send
, rd
);
588 ev
->id
= SYS_TIMEOUT
;
591 corelock_unlock(&q
->cl
);
592 restore_irq(oldlevel
);
595 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
600 oldlevel
= disable_irq_save();
601 corelock_lock(&q
->cl
);
603 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
605 q
->events
[wr
].id
= id
;
606 q
->events
[wr
].data
= data
;
608 /* overflow protect - unblock any thread waiting at this index */
609 queue_do_unblock_sender(q
->send
, wr
);
611 /* Wakeup a waiting thread if any */
612 wakeup_thread(&q
->queue
);
614 corelock_unlock(&q
->cl
);
615 restore_irq(oldlevel
);
618 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
619 /* IRQ handlers are not allowed use of this function - we only aim to
620 protect the queue integrity by turning them off. */
621 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
626 oldlevel
= disable_irq_save();
627 corelock_lock(&q
->cl
);
629 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
631 q
->events
[wr
].id
= id
;
632 q
->events
[wr
].data
= data
;
636 struct queue_sender_list
*send
= q
->send
;
637 struct thread_entry
**spp
= &send
->senders
[wr
];
638 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
642 /* overflow protect - unblock any thread waiting at this index */
643 queue_release_sender(spp
, 0);
646 /* Wakeup a waiting thread if any */
647 wakeup_thread(&q
->queue
);
649 /* Save thread in slot, add to list and wait for reply */
651 IF_COP( current
->obj_cl
= &q
->cl
; )
652 IF_PRIO( current
->blocker
= q
->blocker_p
; )
653 #ifdef HAVE_WAKEUP_EXT_CB
654 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
656 current
->retval
= (intptr_t)spp
;
657 current
->bqp
= &send
->list
;
659 block_thread(current
);
661 corelock_unlock(&q
->cl
);
664 return current
->retval
;
667 /* Function as queue_post if sending is not enabled */
668 wakeup_thread(&q
->queue
);
670 corelock_unlock(&q
->cl
);
671 restore_irq(oldlevel
);
676 #if 0 /* not used now but probably will be later */
677 /* Query if the last message dequeued was added by queue_send or not */
678 bool queue_in_queue_send(struct event_queue
*q
)
683 int oldlevel
= disable_irq_save();
684 corelock_lock(&q
->cl
);
687 in_send
= q
->send
&& q
->send
->curr_sender
;
690 corelock_unlock(&q
->cl
);
691 restore_irq(oldlevel
);
698 /* Replies with retval to the last dequeued message sent with queue_send */
699 void queue_reply(struct event_queue
*q
, intptr_t retval
)
701 if(q
->send
&& q
->send
->curr_sender
)
703 int oldlevel
= disable_irq_save();
704 corelock_lock(&q
->cl
);
705 /* Double-check locking */
706 IF_COP( if(LIKELY(q
->send
&& q
->send
->curr_sender
)) )
708 queue_release_sender(&q
->send
->curr_sender
, retval
);
711 corelock_unlock(&q
->cl
);
712 restore_irq(oldlevel
);
715 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
717 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
719 if(q
->read
== q
->write
)
722 bool have_msg
= false;
724 int oldlevel
= disable_irq_save();
725 corelock_lock(&q
->cl
);
727 if(q
->read
!= q
->write
)
729 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
733 corelock_unlock(&q
->cl
);
734 restore_irq(oldlevel
);
739 /* Poll queue to see if a message exists - careful in using the result if
740 * queue_remove_from_head is called when messages are posted - possibly use
741 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
742 * unsignals the queue may cause an unwanted block */
743 bool queue_empty(const struct event_queue
* q
)
745 return ( q
->read
== q
->write
);
748 void queue_clear(struct event_queue
* q
)
752 oldlevel
= disable_irq_save();
753 corelock_lock(&q
->cl
);
755 /* Release all threads waiting in the queue for a reply -
756 dequeued sent message will be handled by owning thread */
757 queue_release_all_senders(q
);
762 corelock_unlock(&q
->cl
);
763 restore_irq(oldlevel
);
766 void queue_remove_from_head(struct event_queue
*q
, long id
)
770 oldlevel
= disable_irq_save();
771 corelock_lock(&q
->cl
);
773 while(q
->read
!= q
->write
)
775 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
777 if(q
->events
[rd
].id
!= id
)
782 /* Release any thread waiting on this message */
783 queue_do_unblock_sender(q
->send
, rd
);
788 corelock_unlock(&q
->cl
);
789 restore_irq(oldlevel
);
793 * The number of events waiting in the queue.
795 * @param struct of event_queue
796 * @return number of events in the queue
798 int queue_count(const struct event_queue
*q
)
800 return q
->write
- q
->read
;
803 int queue_broadcast(long id
, intptr_t data
)
805 struct event_queue
**p
= all_queues
.queues
;
806 struct event_queue
*q
;
809 int oldlevel
= disable_irq_save();
810 corelock_lock(&all_queues
.cl
);
813 for(q
= *p
; q
!= NULL
; q
= *(++p
))
815 queue_post(q
, id
, data
);
819 corelock_unlock(&all_queues
.cl
);
820 restore_irq(oldlevel
);
823 return p
- all_queues
.queues
;
826 /****************************************************************************
827 * Simple mutex functions ;)
828 ****************************************************************************/
830 static inline void mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
831 __attribute__((always_inline
));
832 static inline void mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
834 #ifdef HAVE_PRIORITY_SCHEDULING
835 mtx
->blocker
.thread
= td
;
841 static inline struct thread_entry
* mutex_get_thread(struct mutex
*mtx
)
842 __attribute__((always_inline
));
843 static inline struct thread_entry
* mutex_get_thread(struct mutex
*mtx
)
845 #ifdef HAVE_PRIORITY_SCHEDULING
846 return mtx
->blocker
.thread
;
852 /* Initialize a mutex object - call before any use and do not call again once
853 * the object is available to other threads */
854 void mutex_init(struct mutex
*m
)
856 corelock_init(&m
->cl
);
860 mutex_set_thread(m
, NULL
);
861 #ifdef HAVE_PRIORITY_SCHEDULING
862 m
->blocker
.priority
= PRIORITY_IDLE
;
863 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
864 m
->no_preempt
= false;
868 /* Gain ownership of a mutex object or block until it becomes free */
869 void mutex_lock(struct mutex
*m
)
871 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
873 if(current
== mutex_get_thread(m
))
875 /* current thread already owns this mutex */
880 /* lock out other cores */
881 corelock_lock(&m
->cl
);
883 if(LIKELY(!m
->locked
))
886 mutex_set_thread(m
, current
);
888 corelock_unlock(&m
->cl
);
892 /* block until the lock is open... */
893 IF_COP( current
->obj_cl
= &m
->cl
; )
894 IF_PRIO( current
->blocker
= &m
->blocker
; )
895 current
->bqp
= &m
->queue
;
898 block_thread(current
);
900 corelock_unlock(&m
->cl
);
902 /* ...and turn control over to next thread */
906 /* Release ownership of a mutex object - only owning thread must call this */
907 void mutex_unlock(struct mutex
*m
)
909 /* unlocker not being the owner is an unlocking violation */
910 KERNEL_ASSERT(mutex_get_thread(m
) == thread_id_entry(THREAD_ID_CURRENT
),
911 "mutex_unlock->wrong thread (%s != %s)\n",
912 mutex_get_thread(m
)->name
,
913 thread_id_entry(THREAD_ID_CURRENT
)->name
);
917 /* this thread still owns lock */
922 /* lock out other cores */
923 corelock_lock(&m
->cl
);
925 /* transfer to next queued thread if any */
926 if(LIKELY(m
->queue
== NULL
))
928 /* no threads waiting - open the lock */
929 mutex_set_thread(m
, NULL
);
931 corelock_unlock(&m
->cl
);
936 const int oldlevel
= disable_irq_save();
937 /* Tranfer of owning thread is handled in the wakeup protocol
938 * if priorities are enabled otherwise just set it from the
940 IFN_PRIO( mutex_set_thread(m
, m
->queue
); )
941 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
942 restore_irq(oldlevel
);
944 corelock_unlock(&m
->cl
);
946 #ifdef HAVE_PRIORITY_SCHEDULING
947 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
953 /****************************************************************************
954 * Simple semaphore functions ;)
955 ****************************************************************************/
956 #ifdef HAVE_SEMAPHORE_OBJECTS
957 void semaphore_init(struct semaphore
*s
, int max
, int start
)
959 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
960 "semaphore_init->inv arg\n");
964 corelock_init(&s
->cl
);
967 void semaphore_wait(struct semaphore
*s
)
969 struct thread_entry
*current
;
971 corelock_lock(&s
->cl
);
973 if(LIKELY(--s
->count
>= 0))
976 corelock_unlock(&s
->cl
);
980 /* too many waits - block until dequeued... */
981 current
= thread_id_entry(THREAD_ID_CURRENT
);
983 IF_COP( current
->obj_cl
= &s
->cl
; )
984 current
->bqp
= &s
->queue
;
987 block_thread(current
);
989 corelock_unlock(&s
->cl
);
991 /* ...and turn control over to next thread */
995 void semaphore_release(struct semaphore
*s
)
997 IF_PRIO( unsigned int result
= THREAD_NONE
; )
999 corelock_lock(&s
->cl
);
1001 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1003 /* there should be threads in this queue */
1004 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1005 /* a thread was queued - wake it up */
1006 int oldlevel
= disable_irq_save();
1007 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1008 restore_irq(oldlevel
);
1011 corelock_unlock(&s
->cl
);
1013 #ifdef HAVE_PRIORITY_SCHEDULING
1014 if(result
& THREAD_SWITCH
)
1018 #endif /* HAVE_SEMAPHORE_OBJECTS */
1020 #ifdef HAVE_WAKEUP_OBJECTS
1021 /****************************************************************************
1022 * Lightweight IRQ-compatible wakeup object
1025 /* Initialize the wakeup object */
1026 void wakeup_init(struct wakeup
*w
)
1029 w
->signalled
= false;
1030 IF_COP( corelock_init(&w
->cl
); )
1033 /* Wait for a signal blocking indefinitely or for a specified period */
1034 int wakeup_wait(struct wakeup
*w
, int timeout
)
1036 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1037 int oldlevel
= disable_irq_save();
1039 corelock_lock(&w
->cl
);
1041 if(LIKELY(!w
->signalled
&& timeout
!= TIMEOUT_NOBLOCK
))
1043 struct thread_entry
* current
= thread_id_entry(THREAD_ID_CURRENT
);
1045 IF_COP( current
->obj_cl
= &w
->cl
; )
1046 current
->bqp
= &w
->queue
;
1048 if (timeout
!= TIMEOUT_BLOCK
)
1049 block_thread_w_tmo(current
, timeout
);
1051 block_thread(current
);
1053 corelock_unlock(&w
->cl
);
1056 oldlevel
= disable_irq_save();
1057 corelock_lock(&w
->cl
);
1060 if(UNLIKELY(!w
->signalled
))
1062 /* Timed-out or failed */
1063 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1064 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1067 w
->signalled
= false; /* Reset */
1069 corelock_unlock(&w
->cl
);
1070 restore_irq(oldlevel
);
1075 /* Signal the thread waiting or leave the signal if the thread hasn't
1078 * returns THREAD_NONE or THREAD_OK
1080 int wakeup_signal(struct wakeup
*w
)
1082 int oldlevel
= disable_irq_save();
1085 corelock_lock(&w
->cl
);
1087 w
->signalled
= true;
1088 ret
= wakeup_thread(&w
->queue
);
1090 corelock_unlock(&w
->cl
);
1091 restore_irq(oldlevel
);
1095 #endif /* HAVE_WAKEUP_OBJECTS */