1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
26 #include "system-sdl.h"
34 /* Make this nonzero to enable more elaborate checks on objects */
35 #if defined(DEBUG) || defined(SIMULATOR)
36 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
38 #define KERNEL_OBJECT_CHECKS 0
41 #if KERNEL_OBJECT_CHECKS
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
46 #define KERNEL_ASSERT(exp, msg...) \
47 ({ if (!({ exp; })) panicf(msg); })
50 #define KERNEL_ASSERT(exp, msg...) ({})
53 #if !defined(CPU_PP) || !defined(BOOTLOADER)
54 volatile long current_tick SHAREDDATA_ATTR
= 0;
57 /* List of tick tasks - final element always NULL for termination */
58 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
60 extern struct core_entry cores
[NUM_CORES
];
62 /* This array holds all queues that are initiated. It is used for broadcast. */
65 struct event_queue
*queues
[MAX_NUM_QUEUES
+1];
66 IF_COP( struct corelock cl
; )
67 } all_queues SHAREDBSS_ATTR
;
69 /****************************************************************************
71 ****************************************************************************/
73 /* Find a pointer in a pointer array. Returns the addess of the element if
74 * found or the address of the terminating NULL otherwise. */
75 static void ** find_array_ptr(void **arr
, void *ptr
)
78 for(curr
= *arr
; curr
!= NULL
&& curr
!= ptr
; curr
= *(++arr
));
82 /* Remove a pointer from a pointer array if it exists. Compacts it so that
83 * no gaps exist. Returns 0 on success and -1 if the element wasn't found. */
84 static int remove_array_ptr(void **arr
, void *ptr
)
87 arr
= find_array_ptr(arr
, ptr
);
92 /* Found. Slide up following items. */
95 void **arr1
= arr
+ 1;
96 *arr
++ = curr
= *arr1
;
103 /****************************************************************************
104 * Standard kernel stuff
105 ****************************************************************************/
106 void kernel_init(void)
108 /* Init the threading API */
111 /* Other processors will not reach this point in a multicore build.
112 * In a single-core build with multiple cores they fall-through and
113 * sleep in cop_main without returning. */
114 if (CURRENT_CORE
== CPU
)
116 memset(tick_funcs
, 0, sizeof(tick_funcs
));
117 memset(&all_queues
, 0, sizeof(all_queues
));
118 corelock_init(&all_queues
.cl
);
121 kernel_device_init();
126 /****************************************************************************
127 * Timer tick - Timer initialization and interrupt handler is defined at
129 ****************************************************************************/
130 int tick_add_task(void (*f
)(void))
132 int oldlevel
= disable_irq_save();
133 void **arr
= (void **)tick_funcs
;
134 void **p
= find_array_ptr(arr
, f
);
136 /* Add a task if there is room */
137 if(p
- arr
< MAX_NUM_TICK_TASKS
)
139 *p
= f
; /* If already in list, no problem. */
143 panicf("Error! tick_add_task(): out of tasks");
146 restore_irq(oldlevel
);
150 int tick_remove_task(void (*f
)(void))
152 int oldlevel
= disable_irq_save();
153 int rc
= remove_array_ptr((void **)tick_funcs
, f
);
154 restore_irq(oldlevel
);
158 /****************************************************************************
159 * Tick-based interval timers/one-shots - be mindful this is not really
160 * intended for continuous timers but for events that need to run for a short
161 * time and be cancelled without further software intervention.
162 ****************************************************************************/
163 #ifdef INCLUDE_TIMEOUT_API
164 /* list of active timeout events */
165 static struct timeout
*tmo_list
[MAX_NUM_TIMEOUTS
+1];
167 /* timeout tick task - calls event handlers when they expire
168 * Event handlers may alter expiration, callback and data during operation.
170 static void timeout_tick(void)
172 unsigned long tick
= current_tick
;
173 struct timeout
**p
= tmo_list
;
174 struct timeout
*curr
;
176 for(curr
= *p
; curr
!= NULL
; curr
= *(++p
))
180 if(TIME_BEFORE(tick
, curr
->expires
))
183 /* this event has expired - call callback */
184 ticks
= curr
->callback(curr
);
187 curr
->expires
= tick
+ ticks
; /* reload */
191 timeout_cancel(curr
); /* cancel */
196 /* Cancels a timeout callback - can be called from the ISR */
197 void timeout_cancel(struct timeout
*tmo
)
199 int oldlevel
= disable_irq_save();
200 void **arr
= (void **)tmo_list
;
201 int rc
= remove_array_ptr(arr
, tmo
);
203 if(rc
>= 0 && *arr
== NULL
)
205 tick_remove_task(timeout_tick
); /* Last one - remove task */
208 restore_irq(oldlevel
);
211 /* Adds a timeout callback - calling with an active timeout resets the
212 interval - can be called from the ISR */
213 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
214 int ticks
, intptr_t data
)
222 oldlevel
= disable_irq_save();
224 /* See if this one is already registered */
225 arr
= (void **)tmo_list
;
226 p
= find_array_ptr(arr
, tmo
);
228 if(p
- arr
< MAX_NUM_TIMEOUTS
)
236 tick_add_task(timeout_tick
); /* First one - add task */
242 tmo
->callback
= callback
;
244 tmo
->expires
= current_tick
+ ticks
;
247 restore_irq(oldlevel
);
250 #endif /* INCLUDE_TIMEOUT_API */
252 /****************************************************************************
254 ****************************************************************************/
255 void sleep(int ticks
)
257 #if defined(CPU_PP) && defined(BOOTLOADER)
258 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
259 while (TIME_BEFORE(USEC_TIMER
, stop
))
261 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
263 long sleep_ticks
= current_tick
+ ticks
+ 1;
264 while (sleep_ticks
> current_tick
)
275 #if ((defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
276 /* Some targets don't like yielding in the bootloader */
282 /****************************************************************************
283 * Queue handling stuff
284 ****************************************************************************/
286 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
287 /****************************************************************************
288 * Sender thread queue structure that aids implementation of priority
289 * inheritance on queues because the send list structure is the same as
290 * for all other kernel objects:
293 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
294 * E3 was posted with queue_post
295 * 4 events remain enqueued (E1-E4)
298 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
299 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
301 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
302 * q->send->curr_sender: /\
304 * Thread has E0 in its own struct queue_event.
306 ****************************************************************************/
308 /* Puts the specified return value in the waiting thread's return value
309 * and wakes the thread.
311 * A sender should be confirmed to exist before calling which makes it
312 * more efficent to reject the majority of cases that don't need this
315 static void queue_release_sender(struct thread_entry
**sender
,
318 struct thread_entry
*thread
= *sender
;
320 *sender
= NULL
; /* Clear slot. */
321 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
322 thread
->retval
= retval
; /* Assign thread-local return value. */
323 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
324 wakeup_thread wakes the first thread in
326 wakeup_thread(thread
->bqp
);
329 /* Releases any waiting threads that are queued with queue_send -
332 static void queue_release_all_senders(struct event_queue
*q
)
337 for(i
= q
->read
; i
!= q
->write
; i
++)
339 struct thread_entry
**spp
=
340 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
344 queue_release_sender(spp
, 0);
350 /* Callback to do extra forced removal steps from sender list in addition
351 * to the normal blocking queue removal and priority dis-inherit */
352 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
354 *((struct thread_entry
**)thread
->retval
) = NULL
;
355 thread
->wakeup_ext_cb
= NULL
;
359 /* Enables queue_send on the specified queue - caller allocates the extra
360 * data structure. Only queues which are taken to be owned by a thread should
361 * enable this however an official owner is not compulsory but must be
362 * specified for priority inheritance to operate.
364 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
365 * messages results in an undefined order of message replies or possible default
366 * replies if two or more waits happen before a reply is done.
368 void queue_enable_queue_send(struct event_queue
*q
,
369 struct queue_sender_list
*send
,
370 unsigned int owner_id
)
372 int oldlevel
= disable_irq_save();
373 corelock_lock(&q
->cl
);
375 if(send
!= NULL
&& q
->send
== NULL
)
377 memset(send
, 0, sizeof(*send
));
378 #ifdef HAVE_PRIORITY_SCHEDULING
379 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
380 send
->blocker
.priority
= PRIORITY_IDLE
;
383 send
->blocker
.thread
= thread_id_entry(owner_id
);
384 q
->blocker_p
= &send
->blocker
;
390 corelock_unlock(&q
->cl
);
391 restore_irq(oldlevel
);
396 /* Unblock a blocked thread at a given event index */
397 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
402 struct thread_entry
**spp
= &send
->senders
[i
];
406 queue_release_sender(spp
, 0);
411 /* Perform the auto-reply sequence */
412 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
414 if(send
&& send
->curr_sender
)
417 queue_release_sender(&send
->curr_sender
, 0);
421 /* Moves waiting thread's refrence from the senders array to the
422 * current_sender which represents the thread waiting for a reponse to the
423 * last message removed from the queue. This also protects the thread from
424 * being bumped due to overflow which would not be a valid action since its
425 * message _is_ being processed at this point. */
426 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
431 struct thread_entry
**spp
= &send
->senders
[rd
];
435 /* Move thread reference from array to the next thread
436 that queue_reply will release */
437 send
->curr_sender
= *spp
;
438 (*spp
)->retval
= (intptr_t)spp
;
441 /* else message was posted asynchronously with queue_post */
445 /* Empty macros for when synchoronous sending is not made */
446 #define queue_release_all_senders(q)
447 #define queue_do_unblock_sender(send, i)
448 #define queue_do_auto_reply(send)
449 #define queue_do_fetch_sender(send, rd)
450 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
452 /* Queue must not be available for use during this call */
453 void queue_init(struct event_queue
*q
, bool register_queue
)
455 int oldlevel
= disable_irq_save();
459 corelock_lock(&all_queues
.cl
);
462 corelock_init(&q
->cl
);
466 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
467 q
->send
= NULL
; /* No message sending by default */
468 IF_PRIO( q
->blocker_p
= NULL
; )
473 void **queues
= (void **)all_queues
.queues
;
474 void **p
= find_array_ptr(queues
, q
);
476 if(p
- queues
>= MAX_NUM_QUEUES
)
478 panicf("queue_init->out of queues");
483 /* Add it to the all_queues array */
485 corelock_unlock(&all_queues
.cl
);
489 restore_irq(oldlevel
);
492 /* Queue must not be available for use during this call */
493 void queue_delete(struct event_queue
*q
)
495 int oldlevel
= disable_irq_save();
496 corelock_lock(&all_queues
.cl
);
497 corelock_lock(&q
->cl
);
499 /* Remove the queue if registered */
500 remove_array_ptr((void **)all_queues
.queues
, q
);
502 corelock_unlock(&all_queues
.cl
);
504 /* Release thread(s) waiting on queue head */
505 thread_queue_wake(&q
->queue
);
507 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
510 /* Release threads waiting for replies */
511 queue_release_all_senders(q
);
513 /* Reply to any dequeued message waiting for one */
514 queue_do_auto_reply(q
->send
);
517 IF_PRIO( q
->blocker_p
= NULL
; )
524 corelock_unlock(&q
->cl
);
525 restore_irq(oldlevel
);
528 /* NOTE: multiple threads waiting on a queue head cannot have a well-
529 defined release order if timeouts are used. If multiple threads must
530 access the queue head, use a dispatcher or queue_wait only. */
531 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
536 #ifdef HAVE_PRIORITY_SCHEDULING
537 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
538 QUEUE_GET_THREAD(q
) == cores
[CURRENT_CORE
].running
,
539 "queue_wait->wrong thread\n");
542 oldlevel
= disable_irq_save();
543 corelock_lock(&q
->cl
);
546 queue_do_auto_reply(q
->send
);
548 if (q
->read
== q
->write
)
550 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
554 IF_COP( current
->obj_cl
= &q
->cl
; )
555 current
->bqp
= &q
->queue
;
557 block_thread(current
);
559 corelock_unlock(&q
->cl
);
562 oldlevel
= disable_irq_save();
563 corelock_lock(&q
->cl
);
565 /* A message that woke us could now be gone */
566 while (q
->read
== q
->write
);
569 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
572 /* Get data for a waiting thread if one */
573 queue_do_fetch_sender(q
->send
, rd
);
575 corelock_unlock(&q
->cl
);
576 restore_irq(oldlevel
);
579 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
583 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
584 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
585 QUEUE_GET_THREAD(q
) == cores
[CURRENT_CORE
].running
,
586 "queue_wait_w_tmo->wrong thread\n");
589 oldlevel
= disable_irq_save();
590 corelock_lock(&q
->cl
);
593 queue_do_auto_reply(q
->send
);
595 if (q
->read
== q
->write
&& ticks
> 0)
597 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
599 IF_COP( current
->obj_cl
= &q
->cl
; )
600 current
->bqp
= &q
->queue
;
602 block_thread_w_tmo(current
, ticks
);
603 corelock_unlock(&q
->cl
);
607 oldlevel
= disable_irq_save();
608 corelock_lock(&q
->cl
);
611 /* no worry about a removed message here - status is checked inside
612 locks - perhaps verify if timeout or false alarm */
613 if (q
->read
!= q
->write
)
615 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
617 /* Get data for a waiting thread if one */
618 queue_do_fetch_sender(q
->send
, rd
);
622 ev
->id
= SYS_TIMEOUT
;
625 corelock_unlock(&q
->cl
);
626 restore_irq(oldlevel
);
629 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
634 oldlevel
= disable_irq_save();
635 corelock_lock(&q
->cl
);
637 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
639 q
->events
[wr
].id
= id
;
640 q
->events
[wr
].data
= data
;
642 /* overflow protect - unblock any thread waiting at this index */
643 queue_do_unblock_sender(q
->send
, wr
);
645 /* Wakeup a waiting thread if any */
646 wakeup_thread(&q
->queue
);
648 corelock_unlock(&q
->cl
);
649 restore_irq(oldlevel
);
652 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
653 /* IRQ handlers are not allowed use of this function - we only aim to
654 protect the queue integrity by turning them off. */
655 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
660 oldlevel
= disable_irq_save();
661 corelock_lock(&q
->cl
);
663 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
665 q
->events
[wr
].id
= id
;
666 q
->events
[wr
].data
= data
;
670 struct queue_sender_list
*send
= q
->send
;
671 struct thread_entry
**spp
= &send
->senders
[wr
];
672 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
676 /* overflow protect - unblock any thread waiting at this index */
677 queue_release_sender(spp
, 0);
680 /* Wakeup a waiting thread if any */
681 wakeup_thread(&q
->queue
);
683 /* Save thread in slot, add to list and wait for reply */
685 IF_COP( current
->obj_cl
= &q
->cl
; )
686 IF_PRIO( current
->blocker
= q
->blocker_p
; )
687 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
688 current
->retval
= (intptr_t)spp
;
689 current
->bqp
= &send
->list
;
691 block_thread(current
);
693 corelock_unlock(&q
->cl
);
696 return current
->retval
;
699 /* Function as queue_post if sending is not enabled */
700 wakeup_thread(&q
->queue
);
702 corelock_unlock(&q
->cl
);
703 restore_irq(oldlevel
);
708 #if 0 /* not used now but probably will be later */
709 /* Query if the last message dequeued was added by queue_send or not */
710 bool queue_in_queue_send(struct event_queue
*q
)
715 int oldlevel
= disable_irq_save();
716 corelock_lock(&q
->cl
);
719 in_send
= q
->send
&& q
->send
->curr_sender
;
722 corelock_unlock(&q
->cl
);
723 restore_irq(oldlevel
);
730 /* Replies with retval to the last dequeued message sent with queue_send */
731 void queue_reply(struct event_queue
*q
, intptr_t retval
)
733 if(q
->send
&& q
->send
->curr_sender
)
735 int oldlevel
= disable_irq_save();
736 corelock_lock(&q
->cl
);
737 /* Double-check locking */
738 IF_COP( if(LIKELY(q
->send
&& q
->send
->curr_sender
)) )
740 queue_release_sender(&q
->send
->curr_sender
, retval
);
743 corelock_unlock(&q
->cl
);
744 restore_irq(oldlevel
);
748 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
750 if(q
->read
== q
->write
)
753 bool have_msg
= false;
755 int oldlevel
= disable_irq_save();
756 corelock_lock(&q
->cl
);
758 if(q
->read
!= q
->write
)
760 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
764 corelock_unlock(&q
->cl
);
765 restore_irq(oldlevel
);
769 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
771 /* Poll queue to see if a message exists - careful in using the result if
772 * queue_remove_from_head is called when messages are posted - possibly use
773 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
774 * unsignals the queue may cause an unwanted block */
775 bool queue_empty(const struct event_queue
* q
)
777 return ( q
->read
== q
->write
);
780 void queue_clear(struct event_queue
* q
)
784 oldlevel
= disable_irq_save();
785 corelock_lock(&q
->cl
);
787 /* Release all threads waiting in the queue for a reply -
788 dequeued sent message will be handled by owning thread */
789 queue_release_all_senders(q
);
794 corelock_unlock(&q
->cl
);
795 restore_irq(oldlevel
);
798 void queue_remove_from_head(struct event_queue
*q
, long id
)
802 oldlevel
= disable_irq_save();
803 corelock_lock(&q
->cl
);
805 while(q
->read
!= q
->write
)
807 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
809 if(q
->events
[rd
].id
!= id
)
814 /* Release any thread waiting on this message */
815 queue_do_unblock_sender(q
->send
, rd
);
820 corelock_unlock(&q
->cl
);
821 restore_irq(oldlevel
);
825 * The number of events waiting in the queue.
827 * @param struct of event_queue
828 * @return number of events in the queue
830 int queue_count(const struct event_queue
*q
)
832 return q
->write
- q
->read
;
835 int queue_broadcast(long id
, intptr_t data
)
837 struct event_queue
**p
= all_queues
.queues
;
838 struct event_queue
*q
;
841 int oldlevel
= disable_irq_save();
842 corelock_lock(&all_queues
.cl
);
845 for(q
= *p
; q
!= NULL
; q
= *(++p
))
847 queue_post(q
, id
, data
);
851 corelock_unlock(&all_queues
.cl
);
852 restore_irq(oldlevel
);
855 return p
- all_queues
.queues
;
858 /****************************************************************************
859 * Simple mutex functions ;)
860 ****************************************************************************/
862 /* Initialize a mutex object - call before any use and do not call again once
863 * the object is available to other threads */
864 void mutex_init(struct mutex
*m
)
866 corelock_init(&m
->cl
);
870 MUTEX_SET_THREAD(m
, NULL
);
871 #ifdef HAVE_PRIORITY_SCHEDULING
872 m
->blocker
.priority
= PRIORITY_IDLE
;
873 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
874 m
->no_preempt
= false;
878 /* Gain ownership of a mutex object or block until it becomes free */
879 void mutex_lock(struct mutex
*m
)
881 const unsigned int core
= CURRENT_CORE
;
882 struct thread_entry
*current
= cores
[core
].running
;
884 if(current
== MUTEX_GET_THREAD(m
))
886 /* current thread already owns this mutex */
891 /* lock out other cores */
892 corelock_lock(&m
->cl
);
894 if(LIKELY(m
->locked
== 0))
897 MUTEX_SET_THREAD(m
, current
);
899 corelock_unlock(&m
->cl
);
903 /* block until the lock is open... */
904 IF_COP( current
->obj_cl
= &m
->cl
; )
905 IF_PRIO( current
->blocker
= &m
->blocker
; )
906 current
->bqp
= &m
->queue
;
909 block_thread(current
);
911 corelock_unlock(&m
->cl
);
913 /* ...and turn control over to next thread */
917 /* Release ownership of a mutex object - only owning thread must call this */
918 void mutex_unlock(struct mutex
*m
)
920 /* unlocker not being the owner is an unlocking violation */
921 KERNEL_ASSERT(MUTEX_GET_THREAD(m
) == cores
[CURRENT_CORE
].running
,
922 "mutex_unlock->wrong thread (%s != %s)\n",
923 MUTEX_GET_THREAD(m
)->name
,
924 cores
[CURRENT_CORE
].running
->name
);
928 /* this thread still owns lock */
933 /* lock out other cores */
934 corelock_lock(&m
->cl
);
936 /* transfer to next queued thread if any */
937 if(LIKELY(m
->queue
== NULL
))
939 /* no threads waiting - open the lock */
940 MUTEX_SET_THREAD(m
, NULL
);
942 corelock_unlock(&m
->cl
);
947 const int oldlevel
= disable_irq_save();
948 /* Tranfer of owning thread is handled in the wakeup protocol
949 * if priorities are enabled otherwise just set it from the
951 IFN_PRIO( MUTEX_SET_THREAD(m
, m
->queue
); )
952 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
953 restore_irq(oldlevel
);
955 corelock_unlock(&m
->cl
);
957 #ifdef HAVE_PRIORITY_SCHEDULING
958 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
964 /****************************************************************************
965 * Simple semaphore functions ;)
966 ****************************************************************************/
967 #ifdef HAVE_SEMAPHORE_OBJECTS
968 void semaphore_init(struct semaphore
*s
, int max
, int start
)
970 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
971 "semaphore_init->inv arg\n");
975 corelock_init(&s
->cl
);
978 void semaphore_wait(struct semaphore
*s
)
980 struct thread_entry
*current
;
982 corelock_lock(&s
->cl
);
984 if(LIKELY(--s
->count
>= 0))
987 corelock_unlock(&s
->cl
);
991 /* too many waits - block until dequeued... */
992 current
= cores
[CURRENT_CORE
].running
;
994 IF_COP( current
->obj_cl
= &s
->cl
; )
995 current
->bqp
= &s
->queue
;
998 block_thread(current
);
1000 corelock_unlock(&s
->cl
);
1002 /* ...and turn control over to next thread */
1006 void semaphore_release(struct semaphore
*s
)
1008 IF_PRIO( unsigned int result
= THREAD_NONE
; )
1010 corelock_lock(&s
->cl
);
1012 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1014 /* there should be threads in this queue */
1015 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1016 /* a thread was queued - wake it up */
1017 int oldlevel
= disable_irq_save();
1018 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1019 restore_irq(oldlevel
);
1022 corelock_unlock(&s
->cl
);
1024 #ifdef HAVE_PRIORITY_SCHEDULING
1025 if(result
& THREAD_SWITCH
)
1029 #endif /* HAVE_SEMAPHORE_OBJECTS */
1031 #ifdef HAVE_WAKEUP_OBJECTS
1032 /****************************************************************************
1033 * Lightweight IRQ-compatible wakeup object
1036 /* Initialize the wakeup object */
1037 void wakeup_init(struct wakeup
*w
)
1041 IF_COP( corelock_init(&w
->cl
); )
1044 /* Wait for a signal blocking indefinitely or for a specified period */
1045 int wakeup_wait(struct wakeup
*w
, int timeout
)
1047 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1048 int oldlevel
= disable_irq_save();
1050 corelock_lock(&w
->cl
);
1052 if(LIKELY(w
->signalled
== 0 && timeout
!= TIMEOUT_NOBLOCK
))
1054 struct thread_entry
* current
= cores
[CURRENT_CORE
].running
;
1056 IF_COP( current
->obj_cl
= &w
->cl
; )
1057 current
->bqp
= &w
->queue
;
1059 if (timeout
!= TIMEOUT_BLOCK
)
1060 block_thread_w_tmo(current
, timeout
);
1062 block_thread(current
);
1064 corelock_unlock(&w
->cl
);
1067 oldlevel
= disable_irq_save();
1068 corelock_lock(&w
->cl
);
1071 if(UNLIKELY(w
->signalled
== 0))
1073 /* Timed-out or failed */
1074 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1075 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1078 w
->signalled
= 0; /* Reset */
1080 corelock_unlock(&w
->cl
);
1081 restore_irq(oldlevel
);
1086 /* Signal the thread waiting or leave the signal if the thread hasn't
1089 * returns THREAD_NONE or THREAD_OK
1091 int wakeup_signal(struct wakeup
*w
)
1093 int oldlevel
= disable_irq_save();
1096 corelock_lock(&w
->cl
);
1099 ret
= wakeup_thread(&w
->queue
);
1101 corelock_unlock(&w
->cl
);
1102 restore_irq(oldlevel
);
1106 #endif /* HAVE_WAKEUP_OBJECTS */