1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
31 /* Make this nonzero to enable more elaborate checks on objects */
32 #if defined(DEBUG) || defined(SIMULATOR)
33 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
35 #define KERNEL_OBJECT_CHECKS 0
38 #if KERNEL_OBJECT_CHECKS
40 #define KERNEL_ASSERT(exp, msg...) \
41 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) panicf(msg); })
47 #define KERNEL_ASSERT(exp, msg...) ({})
50 #if !defined(CPU_PP) || !defined(BOOTLOADER)
51 volatile long current_tick SHAREDDATA_ATTR
= 0;
54 /* List of tick tasks - final element always NULL for termination */
55 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
57 /* This array holds all queues that are initiated. It is used for broadcast. */
60 struct event_queue
*queues
[MAX_NUM_QUEUES
+1];
61 IF_COP( struct corelock cl
; )
62 } all_queues SHAREDBSS_ATTR
;
64 /****************************************************************************
66 ****************************************************************************/
68 /* Find a pointer in a pointer array. Returns the addess of the element if
69 * found or the address of the terminating NULL otherwise. */
70 static void ** find_array_ptr(void **arr
, void *ptr
)
73 for(curr
= *arr
; curr
!= NULL
&& curr
!= ptr
; curr
= *(++arr
));
77 /* Remove a pointer from a pointer array if it exists. Compacts it so that
78 * no gaps exist. Returns 0 on success and -1 if the element wasn't found. */
79 static int remove_array_ptr(void **arr
, void *ptr
)
82 arr
= find_array_ptr(arr
, ptr
);
87 /* Found. Slide up following items. */
90 void **arr1
= arr
+ 1;
91 *arr
++ = curr
= *arr1
;
98 /****************************************************************************
99 * Standard kernel stuff
100 ****************************************************************************/
101 void kernel_init(void)
103 /* Init the threading API */
106 /* Other processors will not reach this point in a multicore build.
107 * In a single-core build with multiple cores they fall-through and
108 * sleep in cop_main without returning. */
109 if (CURRENT_CORE
== CPU
)
111 memset(tick_funcs
, 0, sizeof(tick_funcs
));
112 memset(&all_queues
, 0, sizeof(all_queues
));
113 corelock_init(&all_queues
.cl
);
116 kernel_device_init();
121 /****************************************************************************
122 * Timer tick - Timer initialization and interrupt handler is defined at
124 ****************************************************************************/
125 int tick_add_task(void (*f
)(void))
127 int oldlevel
= disable_irq_save();
128 void **arr
= (void **)tick_funcs
;
129 void **p
= find_array_ptr(arr
, f
);
131 /* Add a task if there is room */
132 if(p
- arr
< MAX_NUM_TICK_TASKS
)
134 *p
= f
; /* If already in list, no problem. */
138 panicf("Error! tick_add_task(): out of tasks");
141 restore_irq(oldlevel
);
145 int tick_remove_task(void (*f
)(void))
147 int oldlevel
= disable_irq_save();
148 int rc
= remove_array_ptr((void **)tick_funcs
, f
);
149 restore_irq(oldlevel
);
153 /****************************************************************************
154 * Tick-based interval timers/one-shots - be mindful this is not really
155 * intended for continuous timers but for events that need to run for a short
156 * time and be cancelled without further software intervention.
157 ****************************************************************************/
158 #ifdef INCLUDE_TIMEOUT_API
159 /* list of active timeout events */
160 static struct timeout
*tmo_list
[MAX_NUM_TIMEOUTS
+1];
162 /* timeout tick task - calls event handlers when they expire
163 * Event handlers may alter expiration, callback and data during operation.
165 static void timeout_tick(void)
167 unsigned long tick
= current_tick
;
168 struct timeout
**p
= tmo_list
;
169 struct timeout
*curr
;
171 for(curr
= *p
; curr
!= NULL
; curr
= *(++p
))
175 if(TIME_BEFORE(tick
, curr
->expires
))
178 /* this event has expired - call callback */
179 ticks
= curr
->callback(curr
);
182 curr
->expires
= tick
+ ticks
; /* reload */
186 timeout_cancel(curr
); /* cancel */
191 /* Cancels a timeout callback - can be called from the ISR */
192 void timeout_cancel(struct timeout
*tmo
)
194 int oldlevel
= disable_irq_save();
195 void **arr
= (void **)tmo_list
;
196 int rc
= remove_array_ptr(arr
, tmo
);
198 if(rc
>= 0 && *arr
== NULL
)
200 tick_remove_task(timeout_tick
); /* Last one - remove task */
203 restore_irq(oldlevel
);
206 /* Adds a timeout callback - calling with an active timeout resets the
207 interval - can be called from the ISR */
208 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
209 int ticks
, intptr_t data
)
217 oldlevel
= disable_irq_save();
219 /* See if this one is already registered */
220 arr
= (void **)tmo_list
;
221 p
= find_array_ptr(arr
, tmo
);
223 if(p
- arr
< MAX_NUM_TIMEOUTS
)
231 tick_add_task(timeout_tick
); /* First one - add task */
237 tmo
->callback
= callback
;
239 tmo
->expires
= current_tick
+ ticks
;
242 restore_irq(oldlevel
);
245 #endif /* INCLUDE_TIMEOUT_API */
247 /****************************************************************************
249 ****************************************************************************/
250 void sleep(int ticks
)
252 #if defined(CPU_PP) && defined(BOOTLOADER)
253 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
254 while (TIME_BEFORE(USEC_TIMER
, stop
))
256 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
258 long sleep_ticks
= current_tick
+ ticks
+ 1;
259 while (TIME_BEFORE(current_tick
, sleep_ticks
))
270 #if ((defined(TATUNG_TPJ1022)) && defined(BOOTLOADER))
271 /* Some targets don't like yielding in the bootloader */
277 /****************************************************************************
278 * Queue handling stuff
279 ****************************************************************************/
281 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
282 /****************************************************************************
283 * Sender thread queue structure that aids implementation of priority
284 * inheritance on queues because the send list structure is the same as
285 * for all other kernel objects:
288 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
289 * E3 was posted with queue_post
290 * 4 events remain enqueued (E1-E4)
293 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
294 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
296 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
297 * q->send->curr_sender: /\
299 * Thread has E0 in its own struct queue_event.
301 ****************************************************************************/
303 /* Puts the specified return value in the waiting thread's return value
304 * and wakes the thread.
306 * A sender should be confirmed to exist before calling which makes it
307 * more efficent to reject the majority of cases that don't need this
310 static void queue_release_sender(struct thread_entry
**sender
,
313 struct thread_entry
*thread
= *sender
;
315 *sender
= NULL
; /* Clear slot. */
316 #ifdef HAVE_WAKEUP_EXT_CB
317 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
319 thread
->retval
= retval
; /* Assign thread-local return value. */
320 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
321 wakeup_thread wakes the first thread in
323 wakeup_thread(thread
->bqp
);
326 /* Releases any waiting threads that are queued with queue_send -
329 static void queue_release_all_senders(struct event_queue
*q
)
334 for(i
= q
->read
; i
!= q
->write
; i
++)
336 struct thread_entry
**spp
=
337 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
341 queue_release_sender(spp
, 0);
347 /* Callback to do extra forced removal steps from sender list in addition
348 * to the normal blocking queue removal and priority dis-inherit */
349 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
351 *((struct thread_entry
**)thread
->retval
) = NULL
;
352 #ifdef HAVE_WAKEUP_EXT_CB
353 thread
->wakeup_ext_cb
= NULL
;
358 /* Enables queue_send on the specified queue - caller allocates the extra
359 * data structure. Only queues which are taken to be owned by a thread should
360 * enable this however an official owner is not compulsory but must be
361 * specified for priority inheritance to operate.
363 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
364 * messages results in an undefined order of message replies or possible default
365 * replies if two or more waits happen before a reply is done.
367 void queue_enable_queue_send(struct event_queue
*q
,
368 struct queue_sender_list
*send
,
369 unsigned int owner_id
)
371 int oldlevel
= disable_irq_save();
372 corelock_lock(&q
->cl
);
374 if(send
!= NULL
&& q
->send
== NULL
)
376 memset(send
, 0, sizeof(*send
));
377 #ifdef HAVE_PRIORITY_SCHEDULING
378 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
379 send
->blocker
.priority
= PRIORITY_IDLE
;
382 send
->blocker
.thread
= thread_id_entry(owner_id
);
383 q
->blocker_p
= &send
->blocker
;
389 corelock_unlock(&q
->cl
);
390 restore_irq(oldlevel
);
395 /* Unblock a blocked thread at a given event index */
396 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
401 struct thread_entry
**spp
= &send
->senders
[i
];
405 queue_release_sender(spp
, 0);
410 /* Perform the auto-reply sequence */
411 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
413 if(send
&& send
->curr_sender
)
416 queue_release_sender(&send
->curr_sender
, 0);
420 /* Moves waiting thread's refrence from the senders array to the
421 * current_sender which represents the thread waiting for a reponse to the
422 * last message removed from the queue. This also protects the thread from
423 * being bumped due to overflow which would not be a valid action since its
424 * message _is_ being processed at this point. */
425 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
430 struct thread_entry
**spp
= &send
->senders
[rd
];
434 /* Move thread reference from array to the next thread
435 that queue_reply will release */
436 send
->curr_sender
= *spp
;
437 (*spp
)->retval
= (intptr_t)spp
;
440 /* else message was posted asynchronously with queue_post */
444 /* Empty macros for when synchoronous sending is not made */
445 #define queue_release_all_senders(q)
446 #define queue_do_unblock_sender(send, i)
447 #define queue_do_auto_reply(send)
448 #define queue_do_fetch_sender(send, rd)
449 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
451 /* Queue must not be available for use during this call */
452 void queue_init(struct event_queue
*q
, bool register_queue
)
454 int oldlevel
= disable_irq_save();
458 corelock_lock(&all_queues
.cl
);
461 corelock_init(&q
->cl
);
465 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
466 q
->send
= NULL
; /* No message sending by default */
467 IF_PRIO( q
->blocker_p
= NULL
; )
472 void **queues
= (void **)all_queues
.queues
;
473 void **p
= find_array_ptr(queues
, q
);
475 if(p
- queues
>= MAX_NUM_QUEUES
)
477 panicf("queue_init->out of queues");
482 /* Add it to the all_queues array */
484 corelock_unlock(&all_queues
.cl
);
488 restore_irq(oldlevel
);
491 /* Queue must not be available for use during this call */
492 void queue_delete(struct event_queue
*q
)
494 int oldlevel
= disable_irq_save();
495 corelock_lock(&all_queues
.cl
);
496 corelock_lock(&q
->cl
);
498 /* Remove the queue if registered */
499 remove_array_ptr((void **)all_queues
.queues
, q
);
501 corelock_unlock(&all_queues
.cl
);
503 /* Release thread(s) waiting on queue head */
504 thread_queue_wake(&q
->queue
);
506 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
509 /* Release threads waiting for replies */
510 queue_release_all_senders(q
);
512 /* Reply to any dequeued message waiting for one */
513 queue_do_auto_reply(q
->send
);
516 IF_PRIO( q
->blocker_p
= NULL
; )
523 corelock_unlock(&q
->cl
);
524 restore_irq(oldlevel
);
527 /* NOTE: multiple threads waiting on a queue head cannot have a well-
528 defined release order if timeouts are used. If multiple threads must
529 access the queue head, use a dispatcher or queue_wait only. */
530 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
535 #ifdef HAVE_PRIORITY_SCHEDULING
536 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
537 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
538 "queue_wait->wrong thread\n");
541 oldlevel
= disable_irq_save();
542 corelock_lock(&q
->cl
);
545 queue_do_auto_reply(q
->send
);
547 if (q
->read
== q
->write
)
549 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
553 IF_COP( current
->obj_cl
= &q
->cl
; )
554 current
->bqp
= &q
->queue
;
556 block_thread(current
);
558 corelock_unlock(&q
->cl
);
561 oldlevel
= disable_irq_save();
562 corelock_lock(&q
->cl
);
564 /* A message that woke us could now be gone */
565 while (q
->read
== q
->write
);
568 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
571 /* Get data for a waiting thread if one */
572 queue_do_fetch_sender(q
->send
, rd
);
574 corelock_unlock(&q
->cl
);
575 restore_irq(oldlevel
);
578 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
582 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
583 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
584 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
585 "queue_wait_w_tmo->wrong thread\n");
588 oldlevel
= disable_irq_save();
589 corelock_lock(&q
->cl
);
592 queue_do_auto_reply(q
->send
);
594 if (q
->read
== q
->write
&& ticks
> 0)
596 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
598 IF_COP( current
->obj_cl
= &q
->cl
; )
599 current
->bqp
= &q
->queue
;
601 block_thread_w_tmo(current
, ticks
);
602 corelock_unlock(&q
->cl
);
606 oldlevel
= disable_irq_save();
607 corelock_lock(&q
->cl
);
610 /* no worry about a removed message here - status is checked inside
611 locks - perhaps verify if timeout or false alarm */
612 if (q
->read
!= q
->write
)
614 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
616 /* Get data for a waiting thread if one */
617 queue_do_fetch_sender(q
->send
, rd
);
621 ev
->id
= SYS_TIMEOUT
;
624 corelock_unlock(&q
->cl
);
625 restore_irq(oldlevel
);
628 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
633 oldlevel
= disable_irq_save();
634 corelock_lock(&q
->cl
);
636 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
638 q
->events
[wr
].id
= id
;
639 q
->events
[wr
].data
= data
;
641 /* overflow protect - unblock any thread waiting at this index */
642 queue_do_unblock_sender(q
->send
, wr
);
644 /* Wakeup a waiting thread if any */
645 wakeup_thread(&q
->queue
);
647 corelock_unlock(&q
->cl
);
648 restore_irq(oldlevel
);
651 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
652 /* IRQ handlers are not allowed use of this function - we only aim to
653 protect the queue integrity by turning them off. */
654 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
659 oldlevel
= disable_irq_save();
660 corelock_lock(&q
->cl
);
662 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
664 q
->events
[wr
].id
= id
;
665 q
->events
[wr
].data
= data
;
669 struct queue_sender_list
*send
= q
->send
;
670 struct thread_entry
**spp
= &send
->senders
[wr
];
671 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
675 /* overflow protect - unblock any thread waiting at this index */
676 queue_release_sender(spp
, 0);
679 /* Wakeup a waiting thread if any */
680 wakeup_thread(&q
->queue
);
682 /* Save thread in slot, add to list and wait for reply */
684 IF_COP( current
->obj_cl
= &q
->cl
; )
685 IF_PRIO( current
->blocker
= q
->blocker_p
; )
686 #ifdef HAVE_WAKEUP_EXT_CB
687 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
689 current
->retval
= (intptr_t)spp
;
690 current
->bqp
= &send
->list
;
692 block_thread(current
);
694 corelock_unlock(&q
->cl
);
697 return current
->retval
;
700 /* Function as queue_post if sending is not enabled */
701 wakeup_thread(&q
->queue
);
703 corelock_unlock(&q
->cl
);
704 restore_irq(oldlevel
);
709 #if 0 /* not used now but probably will be later */
710 /* Query if the last message dequeued was added by queue_send or not */
711 bool queue_in_queue_send(struct event_queue
*q
)
716 int oldlevel
= disable_irq_save();
717 corelock_lock(&q
->cl
);
720 in_send
= q
->send
&& q
->send
->curr_sender
;
723 corelock_unlock(&q
->cl
);
724 restore_irq(oldlevel
);
731 /* Replies with retval to the last dequeued message sent with queue_send */
732 void queue_reply(struct event_queue
*q
, intptr_t retval
)
734 if(q
->send
&& q
->send
->curr_sender
)
736 int oldlevel
= disable_irq_save();
737 corelock_lock(&q
->cl
);
738 /* Double-check locking */
739 IF_COP( if(LIKELY(q
->send
&& q
->send
->curr_sender
)) )
741 queue_release_sender(&q
->send
->curr_sender
, retval
);
744 corelock_unlock(&q
->cl
);
745 restore_irq(oldlevel
);
749 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
751 if(q
->read
== q
->write
)
754 bool have_msg
= false;
756 int oldlevel
= disable_irq_save();
757 corelock_lock(&q
->cl
);
759 if(q
->read
!= q
->write
)
761 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
765 corelock_unlock(&q
->cl
);
766 restore_irq(oldlevel
);
770 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
772 /* Poll queue to see if a message exists - careful in using the result if
773 * queue_remove_from_head is called when messages are posted - possibly use
774 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
775 * unsignals the queue may cause an unwanted block */
776 bool queue_empty(const struct event_queue
* q
)
778 return ( q
->read
== q
->write
);
781 void queue_clear(struct event_queue
* q
)
785 oldlevel
= disable_irq_save();
786 corelock_lock(&q
->cl
);
788 /* Release all threads waiting in the queue for a reply -
789 dequeued sent message will be handled by owning thread */
790 queue_release_all_senders(q
);
795 corelock_unlock(&q
->cl
);
796 restore_irq(oldlevel
);
799 void queue_remove_from_head(struct event_queue
*q
, long id
)
803 oldlevel
= disable_irq_save();
804 corelock_lock(&q
->cl
);
806 while(q
->read
!= q
->write
)
808 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
810 if(q
->events
[rd
].id
!= id
)
815 /* Release any thread waiting on this message */
816 queue_do_unblock_sender(q
->send
, rd
);
821 corelock_unlock(&q
->cl
);
822 restore_irq(oldlevel
);
826 * The number of events waiting in the queue.
828 * @param struct of event_queue
829 * @return number of events in the queue
831 int queue_count(const struct event_queue
*q
)
833 return q
->write
- q
->read
;
836 int queue_broadcast(long id
, intptr_t data
)
838 struct event_queue
**p
= all_queues
.queues
;
839 struct event_queue
*q
;
842 int oldlevel
= disable_irq_save();
843 corelock_lock(&all_queues
.cl
);
846 for(q
= *p
; q
!= NULL
; q
= *(++p
))
848 queue_post(q
, id
, data
);
852 corelock_unlock(&all_queues
.cl
);
853 restore_irq(oldlevel
);
856 return p
- all_queues
.queues
;
859 /****************************************************************************
860 * Simple mutex functions ;)
861 ****************************************************************************/
863 static inline void mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
864 __attribute__((always_inline
));
865 static inline void mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
867 #ifdef HAVE_PRIORITY_SCHEDULING
868 mtx
->blocker
.thread
= td
;
874 static inline struct thread_entry
* mutex_get_thread(struct mutex
*mtx
)
875 __attribute__((always_inline
));
876 static inline struct thread_entry
* mutex_get_thread(struct mutex
*mtx
)
878 #ifdef HAVE_PRIORITY_SCHEDULING
879 return mtx
->blocker
.thread
;
885 /* Initialize a mutex object - call before any use and do not call again once
886 * the object is available to other threads */
887 void mutex_init(struct mutex
*m
)
889 corelock_init(&m
->cl
);
893 mutex_set_thread(m
, NULL
);
894 #ifdef HAVE_PRIORITY_SCHEDULING
895 m
->blocker
.priority
= PRIORITY_IDLE
;
896 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
897 m
->no_preempt
= false;
901 /* Gain ownership of a mutex object or block until it becomes free */
902 void mutex_lock(struct mutex
*m
)
904 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
906 if(current
== mutex_get_thread(m
))
908 /* current thread already owns this mutex */
913 /* lock out other cores */
914 corelock_lock(&m
->cl
);
916 if(LIKELY(!m
->locked
))
919 mutex_set_thread(m
, current
);
921 corelock_unlock(&m
->cl
);
925 /* block until the lock is open... */
926 IF_COP( current
->obj_cl
= &m
->cl
; )
927 IF_PRIO( current
->blocker
= &m
->blocker
; )
928 current
->bqp
= &m
->queue
;
931 block_thread(current
);
933 corelock_unlock(&m
->cl
);
935 /* ...and turn control over to next thread */
939 /* Release ownership of a mutex object - only owning thread must call this */
940 void mutex_unlock(struct mutex
*m
)
942 /* unlocker not being the owner is an unlocking violation */
943 KERNEL_ASSERT(mutex_get_thread(m
) == thread_id_entry(THREAD_ID_CURRENT
),
944 "mutex_unlock->wrong thread (%s != %s)\n",
945 mutex_get_thread(m
)->name
,
946 thread_id_entry(THREAD_ID_CURRENT
)->name
);
950 /* this thread still owns lock */
955 /* lock out other cores */
956 corelock_lock(&m
->cl
);
958 /* transfer to next queued thread if any */
959 if(LIKELY(m
->queue
== NULL
))
961 /* no threads waiting - open the lock */
962 mutex_set_thread(m
, NULL
);
964 corelock_unlock(&m
->cl
);
969 const int oldlevel
= disable_irq_save();
970 /* Tranfer of owning thread is handled in the wakeup protocol
971 * if priorities are enabled otherwise just set it from the
973 IFN_PRIO( mutex_set_thread(m
, m
->queue
); )
974 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
975 restore_irq(oldlevel
);
977 corelock_unlock(&m
->cl
);
979 #ifdef HAVE_PRIORITY_SCHEDULING
980 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
986 /****************************************************************************
987 * Simple semaphore functions ;)
988 ****************************************************************************/
989 #ifdef HAVE_SEMAPHORE_OBJECTS
990 void semaphore_init(struct semaphore
*s
, int max
, int start
)
992 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
993 "semaphore_init->inv arg\n");
997 corelock_init(&s
->cl
);
1000 void semaphore_wait(struct semaphore
*s
)
1002 struct thread_entry
*current
;
1004 corelock_lock(&s
->cl
);
1006 if(LIKELY(--s
->count
>= 0))
1008 /* wait satisfied */
1009 corelock_unlock(&s
->cl
);
1013 /* too many waits - block until dequeued... */
1014 current
= thread_id_entry(THREAD_ID_CURRENT
);
1016 IF_COP( current
->obj_cl
= &s
->cl
; )
1017 current
->bqp
= &s
->queue
;
1020 block_thread(current
);
1022 corelock_unlock(&s
->cl
);
1024 /* ...and turn control over to next thread */
1028 void semaphore_release(struct semaphore
*s
)
1030 IF_PRIO( unsigned int result
= THREAD_NONE
; )
1032 corelock_lock(&s
->cl
);
1034 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1036 /* there should be threads in this queue */
1037 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1038 /* a thread was queued - wake it up */
1039 int oldlevel
= disable_irq_save();
1040 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1041 restore_irq(oldlevel
);
1044 corelock_unlock(&s
->cl
);
1046 #ifdef HAVE_PRIORITY_SCHEDULING
1047 if(result
& THREAD_SWITCH
)
1051 #endif /* HAVE_SEMAPHORE_OBJECTS */
1053 #ifdef HAVE_WAKEUP_OBJECTS
1054 /****************************************************************************
1055 * Lightweight IRQ-compatible wakeup object
1058 /* Initialize the wakeup object */
1059 void wakeup_init(struct wakeup
*w
)
1062 w
->signalled
= false;
1063 IF_COP( corelock_init(&w
->cl
); )
1066 /* Wait for a signal blocking indefinitely or for a specified period */
1067 int wakeup_wait(struct wakeup
*w
, int timeout
)
1069 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1070 int oldlevel
= disable_irq_save();
1072 corelock_lock(&w
->cl
);
1074 if(LIKELY(!w
->signalled
&& timeout
!= TIMEOUT_NOBLOCK
))
1076 struct thread_entry
* current
= thread_id_entry(THREAD_ID_CURRENT
);
1078 IF_COP( current
->obj_cl
= &w
->cl
; )
1079 current
->bqp
= &w
->queue
;
1081 if (timeout
!= TIMEOUT_BLOCK
)
1082 block_thread_w_tmo(current
, timeout
);
1084 block_thread(current
);
1086 corelock_unlock(&w
->cl
);
1089 oldlevel
= disable_irq_save();
1090 corelock_lock(&w
->cl
);
1093 if(UNLIKELY(!w
->signalled
))
1095 /* Timed-out or failed */
1096 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1097 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1100 w
->signalled
= false; /* Reset */
1102 corelock_unlock(&w
->cl
);
1103 restore_irq(oldlevel
);
1108 /* Signal the thread waiting or leave the signal if the thread hasn't
1111 * returns THREAD_NONE or THREAD_OK
1113 int wakeup_signal(struct wakeup
*w
)
1115 int oldlevel
= disable_irq_save();
1118 corelock_lock(&w
->cl
);
1120 w
->signalled
= true;
1121 ret
= wakeup_thread(&w
->queue
);
1123 corelock_unlock(&w
->cl
);
1124 restore_irq(oldlevel
);
1128 #endif /* HAVE_WAKEUP_OBJECTS */