1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
36 #define KERNEL_OBJECT_CHECKS 0
39 #if KERNEL_OBJECT_CHECKS
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
48 #define KERNEL_ASSERT(exp, msg...) ({})
51 #if !defined(CPU_PP) || !defined(BOOTLOADER)
52 volatile long current_tick SHAREDDATA_ATTR
= 0;
55 /* List of tick tasks - final element always NULL for termination */
56 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
58 /* This array holds all queues that are initiated. It is used for broadcast. */
61 struct event_queue
*queues
[MAX_NUM_QUEUES
+1];
62 IF_COP( struct corelock cl
; )
63 } all_queues SHAREDBSS_ATTR
;
65 /****************************************************************************
66 * Standard kernel stuff
67 ****************************************************************************/
68 void kernel_init(void)
70 /* Init the threading API */
73 /* Other processors will not reach this point in a multicore build.
74 * In a single-core build with multiple cores they fall-through and
75 * sleep in cop_main without returning. */
76 if (CURRENT_CORE
== CPU
)
78 memset(tick_funcs
, 0, sizeof(tick_funcs
));
79 memset(&all_queues
, 0, sizeof(all_queues
));
80 corelock_init(&all_queues
.cl
);
88 /****************************************************************************
89 * Timer tick - Timer initialization and interrupt handler is defined at
91 ****************************************************************************/
92 int tick_add_task(void (*f
)(void))
94 int oldlevel
= disable_irq_save();
95 void **arr
= (void **)tick_funcs
;
96 void **p
= find_array_ptr(arr
, f
);
98 /* Add a task if there is room */
99 if(p
- arr
< MAX_NUM_TICK_TASKS
)
101 *p
= f
; /* If already in list, no problem. */
105 panicf("Error! tick_add_task(): out of tasks");
108 restore_irq(oldlevel
);
112 int tick_remove_task(void (*f
)(void))
114 int oldlevel
= disable_irq_save();
115 int rc
= remove_array_ptr((void **)tick_funcs
, f
);
116 restore_irq(oldlevel
);
120 /****************************************************************************
121 * Tick-based interval timers/one-shots - be mindful this is not really
122 * intended for continuous timers but for events that need to run for a short
123 * time and be cancelled without further software intervention.
124 ****************************************************************************/
125 #ifdef INCLUDE_TIMEOUT_API
126 /* list of active timeout events */
127 static struct timeout
*tmo_list
[MAX_NUM_TIMEOUTS
+1];
129 /* timeout tick task - calls event handlers when they expire
130 * Event handlers may alter expiration, callback and data during operation.
132 static void timeout_tick(void)
134 unsigned long tick
= current_tick
;
135 struct timeout
**p
= tmo_list
;
136 struct timeout
*curr
;
138 for(curr
= *p
; curr
!= NULL
; curr
= *(++p
))
142 if(TIME_BEFORE(tick
, curr
->expires
))
145 /* this event has expired - call callback */
146 ticks
= curr
->callback(curr
);
149 curr
->expires
= tick
+ ticks
; /* reload */
153 timeout_cancel(curr
); /* cancel */
158 /* Cancels a timeout callback - can be called from the ISR */
159 void timeout_cancel(struct timeout
*tmo
)
161 int oldlevel
= disable_irq_save();
162 int rc
= remove_array_ptr((void **)tmo_list
, tmo
);
164 if(rc
>= 0 && *tmo_list
== NULL
)
166 tick_remove_task(timeout_tick
); /* Last one - remove task */
169 restore_irq(oldlevel
);
172 /* Adds a timeout callback - calling with an active timeout resets the
173 interval - can be called from the ISR */
174 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
175 int ticks
, intptr_t data
)
183 oldlevel
= disable_irq_save();
185 /* See if this one is already registered */
186 arr
= (void **)tmo_list
;
187 p
= find_array_ptr(arr
, tmo
);
189 if(p
- arr
< MAX_NUM_TIMEOUTS
)
195 if(*tmo_list
== NULL
)
197 tick_add_task(timeout_tick
); /* First one - add task */
203 tmo
->callback
= callback
;
205 tmo
->expires
= current_tick
+ ticks
;
208 restore_irq(oldlevel
);
211 #endif /* INCLUDE_TIMEOUT_API */
213 /****************************************************************************
215 ****************************************************************************/
216 void sleep(int ticks
)
218 #if defined(CPU_PP) && defined(BOOTLOADER)
219 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
220 while (TIME_BEFORE(USEC_TIMER
, stop
))
222 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
224 long sleep_ticks
= current_tick
+ ticks
+ 1;
225 while (TIME_BEFORE(current_tick
, sleep_ticks
))
236 #if ((defined(TATUNG_TPJ1022)) && defined(BOOTLOADER))
237 /* Some targets don't like yielding in the bootloader */
243 /****************************************************************************
244 * Queue handling stuff
245 ****************************************************************************/
247 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
248 /****************************************************************************
249 * Sender thread queue structure that aids implementation of priority
250 * inheritance on queues because the send list structure is the same as
251 * for all other kernel objects:
254 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
255 * E3 was posted with queue_post
256 * 4 events remain enqueued (E1-E4)
259 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
260 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
262 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
263 * q->send->curr_sender: /\
265 * Thread has E0 in its own struct queue_event.
267 ****************************************************************************/
269 /* Puts the specified return value in the waiting thread's return value
270 * and wakes the thread.
272 * A sender should be confirmed to exist before calling which makes it
273 * more efficent to reject the majority of cases that don't need this
276 static void queue_release_sender(struct thread_entry
**sender
,
279 struct thread_entry
*thread
= *sender
;
281 *sender
= NULL
; /* Clear slot. */
282 #ifdef HAVE_WAKEUP_EXT_CB
283 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
285 thread
->retval
= retval
; /* Assign thread-local return value. */
286 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
287 wakeup_thread wakes the first thread in
289 wakeup_thread(thread
->bqp
);
292 /* Releases any waiting threads that are queued with queue_send -
295 static void queue_release_all_senders(struct event_queue
*q
)
300 for(i
= q
->read
; i
!= q
->write
; i
++)
302 struct thread_entry
**spp
=
303 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
307 queue_release_sender(spp
, 0);
313 /* Callback to do extra forced removal steps from sender list in addition
314 * to the normal blocking queue removal and priority dis-inherit */
315 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
317 *((struct thread_entry
**)thread
->retval
) = NULL
;
318 #ifdef HAVE_WAKEUP_EXT_CB
319 thread
->wakeup_ext_cb
= NULL
;
324 /* Enables queue_send on the specified queue - caller allocates the extra
325 * data structure. Only queues which are taken to be owned by a thread should
326 * enable this however an official owner is not compulsory but must be
327 * specified for priority inheritance to operate.
329 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
330 * messages results in an undefined order of message replies or possible default
331 * replies if two or more waits happen before a reply is done.
333 void queue_enable_queue_send(struct event_queue
*q
,
334 struct queue_sender_list
*send
,
335 unsigned int owner_id
)
337 int oldlevel
= disable_irq_save();
338 corelock_lock(&q
->cl
);
340 if(send
!= NULL
&& q
->send
== NULL
)
342 memset(send
, 0, sizeof(*send
));
343 #ifdef HAVE_PRIORITY_SCHEDULING
344 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
345 send
->blocker
.priority
= PRIORITY_IDLE
;
348 send
->blocker
.thread
= thread_id_entry(owner_id
);
349 q
->blocker_p
= &send
->blocker
;
355 corelock_unlock(&q
->cl
);
356 restore_irq(oldlevel
);
361 /* Unblock a blocked thread at a given event index */
362 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
367 struct thread_entry
**spp
= &send
->senders
[i
];
371 queue_release_sender(spp
, 0);
376 /* Perform the auto-reply sequence */
377 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
379 if(send
&& send
->curr_sender
)
382 queue_release_sender(&send
->curr_sender
, 0);
386 /* Moves waiting thread's refrence from the senders array to the
387 * current_sender which represents the thread waiting for a reponse to the
388 * last message removed from the queue. This also protects the thread from
389 * being bumped due to overflow which would not be a valid action since its
390 * message _is_ being processed at this point. */
391 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
396 struct thread_entry
**spp
= &send
->senders
[rd
];
400 /* Move thread reference from array to the next thread
401 that queue_reply will release */
402 send
->curr_sender
= *spp
;
403 (*spp
)->retval
= (intptr_t)spp
;
406 /* else message was posted asynchronously with queue_post */
410 /* Empty macros for when synchoronous sending is not made */
411 #define queue_release_all_senders(q)
412 #define queue_do_unblock_sender(send, i)
413 #define queue_do_auto_reply(send)
414 #define queue_do_fetch_sender(send, rd)
415 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
417 /* Queue must not be available for use during this call */
418 void queue_init(struct event_queue
*q
, bool register_queue
)
420 int oldlevel
= disable_irq_save();
424 corelock_lock(&all_queues
.cl
);
427 corelock_init(&q
->cl
);
431 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
432 q
->send
= NULL
; /* No message sending by default */
433 IF_PRIO( q
->blocker_p
= NULL
; )
438 void **queues
= (void **)all_queues
.queues
;
439 void **p
= find_array_ptr(queues
, q
);
441 if(p
- queues
>= MAX_NUM_QUEUES
)
443 panicf("queue_init->out of queues");
448 /* Add it to the all_queues array */
450 corelock_unlock(&all_queues
.cl
);
454 restore_irq(oldlevel
);
457 /* Queue must not be available for use during this call */
458 void queue_delete(struct event_queue
*q
)
460 int oldlevel
= disable_irq_save();
461 corelock_lock(&all_queues
.cl
);
462 corelock_lock(&q
->cl
);
464 /* Remove the queue if registered */
465 remove_array_ptr((void **)all_queues
.queues
, q
);
467 corelock_unlock(&all_queues
.cl
);
469 /* Release thread(s) waiting on queue head */
470 thread_queue_wake(&q
->queue
);
472 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
475 /* Release threads waiting for replies */
476 queue_release_all_senders(q
);
478 /* Reply to any dequeued message waiting for one */
479 queue_do_auto_reply(q
->send
);
482 IF_PRIO( q
->blocker_p
= NULL
; )
489 corelock_unlock(&q
->cl
);
490 restore_irq(oldlevel
);
493 /* NOTE: multiple threads waiting on a queue head cannot have a well-
494 defined release order if timeouts are used. If multiple threads must
495 access the queue head, use a dispatcher or queue_wait only. */
496 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
501 #ifdef HAVE_PRIORITY_SCHEDULING
502 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
503 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
504 "queue_wait->wrong thread\n");
507 oldlevel
= disable_irq_save();
508 corelock_lock(&q
->cl
);
511 queue_do_auto_reply(q
->send
);
513 if (q
->read
== q
->write
)
515 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
519 IF_COP( current
->obj_cl
= &q
->cl
; )
520 current
->bqp
= &q
->queue
;
522 block_thread(current
);
524 corelock_unlock(&q
->cl
);
527 oldlevel
= disable_irq_save();
528 corelock_lock(&q
->cl
);
530 /* A message that woke us could now be gone */
531 while (q
->read
== q
->write
);
534 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
537 /* Get data for a waiting thread if one */
538 queue_do_fetch_sender(q
->send
, rd
);
540 corelock_unlock(&q
->cl
);
541 restore_irq(oldlevel
);
544 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
548 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
549 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
550 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
551 "queue_wait_w_tmo->wrong thread\n");
554 oldlevel
= disable_irq_save();
555 corelock_lock(&q
->cl
);
558 queue_do_auto_reply(q
->send
);
560 if (q
->read
== q
->write
&& ticks
> 0)
562 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
564 IF_COP( current
->obj_cl
= &q
->cl
; )
565 current
->bqp
= &q
->queue
;
567 block_thread_w_tmo(current
, ticks
);
568 corelock_unlock(&q
->cl
);
572 oldlevel
= disable_irq_save();
573 corelock_lock(&q
->cl
);
576 /* no worry about a removed message here - status is checked inside
577 locks - perhaps verify if timeout or false alarm */
578 if (q
->read
!= q
->write
)
580 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
582 /* Get data for a waiting thread if one */
583 queue_do_fetch_sender(q
->send
, rd
);
587 ev
->id
= SYS_TIMEOUT
;
590 corelock_unlock(&q
->cl
);
591 restore_irq(oldlevel
);
594 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
599 oldlevel
= disable_irq_save();
600 corelock_lock(&q
->cl
);
602 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
604 q
->events
[wr
].id
= id
;
605 q
->events
[wr
].data
= data
;
607 /* overflow protect - unblock any thread waiting at this index */
608 queue_do_unblock_sender(q
->send
, wr
);
610 /* Wakeup a waiting thread if any */
611 wakeup_thread(&q
->queue
);
613 corelock_unlock(&q
->cl
);
614 restore_irq(oldlevel
);
617 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
618 /* IRQ handlers are not allowed use of this function - we only aim to
619 protect the queue integrity by turning them off. */
620 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
625 oldlevel
= disable_irq_save();
626 corelock_lock(&q
->cl
);
628 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
630 q
->events
[wr
].id
= id
;
631 q
->events
[wr
].data
= data
;
635 struct queue_sender_list
*send
= q
->send
;
636 struct thread_entry
**spp
= &send
->senders
[wr
];
637 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
641 /* overflow protect - unblock any thread waiting at this index */
642 queue_release_sender(spp
, 0);
645 /* Wakeup a waiting thread if any */
646 wakeup_thread(&q
->queue
);
648 /* Save thread in slot, add to list and wait for reply */
650 IF_COP( current
->obj_cl
= &q
->cl
; )
651 IF_PRIO( current
->blocker
= q
->blocker_p
; )
652 #ifdef HAVE_WAKEUP_EXT_CB
653 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
655 current
->retval
= (intptr_t)spp
;
656 current
->bqp
= &send
->list
;
658 block_thread(current
);
660 corelock_unlock(&q
->cl
);
663 return current
->retval
;
666 /* Function as queue_post if sending is not enabled */
667 wakeup_thread(&q
->queue
);
669 corelock_unlock(&q
->cl
);
670 restore_irq(oldlevel
);
675 #if 0 /* not used now but probably will be later */
676 /* Query if the last message dequeued was added by queue_send or not */
677 bool queue_in_queue_send(struct event_queue
*q
)
682 int oldlevel
= disable_irq_save();
683 corelock_lock(&q
->cl
);
686 in_send
= q
->send
&& q
->send
->curr_sender
;
689 corelock_unlock(&q
->cl
);
690 restore_irq(oldlevel
);
697 /* Replies with retval to the last dequeued message sent with queue_send */
698 void queue_reply(struct event_queue
*q
, intptr_t retval
)
700 if(q
->send
&& q
->send
->curr_sender
)
702 int oldlevel
= disable_irq_save();
703 corelock_lock(&q
->cl
);
704 /* Double-check locking */
705 IF_COP( if(LIKELY(q
->send
&& q
->send
->curr_sender
)) )
707 queue_release_sender(&q
->send
->curr_sender
, retval
);
710 corelock_unlock(&q
->cl
);
711 restore_irq(oldlevel
);
714 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
716 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
718 if(q
->read
== q
->write
)
721 bool have_msg
= false;
723 int oldlevel
= disable_irq_save();
724 corelock_lock(&q
->cl
);
726 if(q
->read
!= q
->write
)
728 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
732 corelock_unlock(&q
->cl
);
733 restore_irq(oldlevel
);
738 /* Poll queue to see if a message exists - careful in using the result if
739 * queue_remove_from_head is called when messages are posted - possibly use
740 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
741 * unsignals the queue may cause an unwanted block */
742 bool queue_empty(const struct event_queue
* q
)
744 return ( q
->read
== q
->write
);
747 void queue_clear(struct event_queue
* q
)
751 oldlevel
= disable_irq_save();
752 corelock_lock(&q
->cl
);
754 /* Release all threads waiting in the queue for a reply -
755 dequeued sent message will be handled by owning thread */
756 queue_release_all_senders(q
);
761 corelock_unlock(&q
->cl
);
762 restore_irq(oldlevel
);
765 void queue_remove_from_head(struct event_queue
*q
, long id
)
769 oldlevel
= disable_irq_save();
770 corelock_lock(&q
->cl
);
772 while(q
->read
!= q
->write
)
774 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
776 if(q
->events
[rd
].id
!= id
)
781 /* Release any thread waiting on this message */
782 queue_do_unblock_sender(q
->send
, rd
);
787 corelock_unlock(&q
->cl
);
788 restore_irq(oldlevel
);
792 * The number of events waiting in the queue.
794 * @param struct of event_queue
795 * @return number of events in the queue
797 int queue_count(const struct event_queue
*q
)
799 return q
->write
- q
->read
;
802 int queue_broadcast(long id
, intptr_t data
)
804 struct event_queue
**p
= all_queues
.queues
;
805 struct event_queue
*q
;
808 int oldlevel
= disable_irq_save();
809 corelock_lock(&all_queues
.cl
);
812 for(q
= *p
; q
!= NULL
; q
= *(++p
))
814 queue_post(q
, id
, data
);
818 corelock_unlock(&all_queues
.cl
);
819 restore_irq(oldlevel
);
822 return p
- all_queues
.queues
;
825 /****************************************************************************
826 * Simple mutex functions ;)
827 ****************************************************************************/
829 static inline void mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
830 __attribute__((always_inline
));
831 static inline void mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
833 #ifdef HAVE_PRIORITY_SCHEDULING
834 mtx
->blocker
.thread
= td
;
840 static inline struct thread_entry
* mutex_get_thread(struct mutex
*mtx
)
841 __attribute__((always_inline
));
842 static inline struct thread_entry
* mutex_get_thread(struct mutex
*mtx
)
844 #ifdef HAVE_PRIORITY_SCHEDULING
845 return mtx
->blocker
.thread
;
851 /* Initialize a mutex object - call before any use and do not call again once
852 * the object is available to other threads */
853 void mutex_init(struct mutex
*m
)
855 corelock_init(&m
->cl
);
859 mutex_set_thread(m
, NULL
);
860 #ifdef HAVE_PRIORITY_SCHEDULING
861 m
->blocker
.priority
= PRIORITY_IDLE
;
862 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
863 m
->no_preempt
= false;
867 /* Gain ownership of a mutex object or block until it becomes free */
868 void mutex_lock(struct mutex
*m
)
870 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
872 if(current
== mutex_get_thread(m
))
874 /* current thread already owns this mutex */
879 /* lock out other cores */
880 corelock_lock(&m
->cl
);
882 if(LIKELY(!m
->locked
))
885 mutex_set_thread(m
, current
);
887 corelock_unlock(&m
->cl
);
891 /* block until the lock is open... */
892 IF_COP( current
->obj_cl
= &m
->cl
; )
893 IF_PRIO( current
->blocker
= &m
->blocker
; )
894 current
->bqp
= &m
->queue
;
897 block_thread(current
);
899 corelock_unlock(&m
->cl
);
901 /* ...and turn control over to next thread */
905 /* Release ownership of a mutex object - only owning thread must call this */
906 void mutex_unlock(struct mutex
*m
)
908 /* unlocker not being the owner is an unlocking violation */
909 KERNEL_ASSERT(mutex_get_thread(m
) == thread_id_entry(THREAD_ID_CURRENT
),
910 "mutex_unlock->wrong thread (%s != %s)\n",
911 mutex_get_thread(m
)->name
,
912 thread_id_entry(THREAD_ID_CURRENT
)->name
);
916 /* this thread still owns lock */
921 /* lock out other cores */
922 corelock_lock(&m
->cl
);
924 /* transfer to next queued thread if any */
925 if(LIKELY(m
->queue
== NULL
))
927 /* no threads waiting - open the lock */
928 mutex_set_thread(m
, NULL
);
930 corelock_unlock(&m
->cl
);
935 const int oldlevel
= disable_irq_save();
936 /* Tranfer of owning thread is handled in the wakeup protocol
937 * if priorities are enabled otherwise just set it from the
939 IFN_PRIO( mutex_set_thread(m
, m
->queue
); )
940 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
941 restore_irq(oldlevel
);
943 corelock_unlock(&m
->cl
);
945 #ifdef HAVE_PRIORITY_SCHEDULING
946 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
952 /****************************************************************************
953 * Simple semaphore functions ;)
954 ****************************************************************************/
955 #ifdef HAVE_SEMAPHORE_OBJECTS
956 void semaphore_init(struct semaphore
*s
, int max
, int start
)
958 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
959 "semaphore_init->inv arg\n");
963 corelock_init(&s
->cl
);
966 void semaphore_wait(struct semaphore
*s
)
968 struct thread_entry
*current
;
970 corelock_lock(&s
->cl
);
972 if(LIKELY(--s
->count
>= 0))
975 corelock_unlock(&s
->cl
);
979 /* too many waits - block until dequeued... */
980 current
= thread_id_entry(THREAD_ID_CURRENT
);
982 IF_COP( current
->obj_cl
= &s
->cl
; )
983 current
->bqp
= &s
->queue
;
986 block_thread(current
);
988 corelock_unlock(&s
->cl
);
990 /* ...and turn control over to next thread */
994 void semaphore_release(struct semaphore
*s
)
996 IF_PRIO( unsigned int result
= THREAD_NONE
; )
998 corelock_lock(&s
->cl
);
1000 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1002 /* there should be threads in this queue */
1003 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1004 /* a thread was queued - wake it up */
1005 int oldlevel
= disable_irq_save();
1006 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1007 restore_irq(oldlevel
);
1010 corelock_unlock(&s
->cl
);
1012 #ifdef HAVE_PRIORITY_SCHEDULING
1013 if(result
& THREAD_SWITCH
)
1017 #endif /* HAVE_SEMAPHORE_OBJECTS */
1019 #ifdef HAVE_WAKEUP_OBJECTS
1020 /****************************************************************************
1021 * Lightweight IRQ-compatible wakeup object
1024 /* Initialize the wakeup object */
1025 void wakeup_init(struct wakeup
*w
)
1028 w
->signalled
= false;
1029 IF_COP( corelock_init(&w
->cl
); )
1032 /* Wait for a signal blocking indefinitely or for a specified period */
1033 int wakeup_wait(struct wakeup
*w
, int timeout
)
1035 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1036 int oldlevel
= disable_irq_save();
1038 corelock_lock(&w
->cl
);
1040 if(LIKELY(!w
->signalled
&& timeout
!= TIMEOUT_NOBLOCK
))
1042 struct thread_entry
* current
= thread_id_entry(THREAD_ID_CURRENT
);
1044 IF_COP( current
->obj_cl
= &w
->cl
; )
1045 current
->bqp
= &w
->queue
;
1047 if (timeout
!= TIMEOUT_BLOCK
)
1048 block_thread_w_tmo(current
, timeout
);
1050 block_thread(current
);
1052 corelock_unlock(&w
->cl
);
1055 oldlevel
= disable_irq_save();
1056 corelock_lock(&w
->cl
);
1059 if(UNLIKELY(!w
->signalled
))
1061 /* Timed-out or failed */
1062 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1063 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1066 w
->signalled
= false; /* Reset */
1068 corelock_unlock(&w
->cl
);
1069 restore_irq(oldlevel
);
1074 /* Signal the thread waiting or leave the signal if the thread hasn't
1077 * returns THREAD_NONE or THREAD_OK
1079 int wakeup_signal(struct wakeup
*w
)
1081 int oldlevel
= disable_irq_save();
1084 corelock_lock(&w
->cl
);
1086 w
->signalled
= true;
1087 ret
= wakeup_thread(&w
->queue
);
1089 corelock_unlock(&w
->cl
);
1090 restore_irq(oldlevel
);
1094 #endif /* HAVE_WAKEUP_OBJECTS */