1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
26 #include "system-sdl.h"
34 /* Make this nonzero to enable more elaborate checks on objects */
35 #if defined(DEBUG) || defined(SIMULATOR)
36 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
38 #define KERNEL_OBJECT_CHECKS 0
41 #if KERNEL_OBJECT_CHECKS
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
46 #define KERNEL_ASSERT(exp, msg...) \
47 ({ if (!({ exp; })) panicf(msg); })
50 #define KERNEL_ASSERT(exp, msg...) ({})
53 #if !defined(CPU_PP) || !defined(BOOTLOADER)
54 volatile long current_tick SHAREDDATA_ATTR
= 0;
57 /* List of tick tasks - final element always NULL for termination */
58 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
60 /* This array holds all queues that are initiated. It is used for broadcast. */
63 struct event_queue
*queues
[MAX_NUM_QUEUES
+1];
64 IF_COP( struct corelock cl
; )
65 } all_queues SHAREDBSS_ATTR
;
67 /****************************************************************************
69 ****************************************************************************/
71 /* Find a pointer in a pointer array. Returns the addess of the element if
72 * found or the address of the terminating NULL otherwise. */
73 static void ** find_array_ptr(void **arr
, void *ptr
)
76 for(curr
= *arr
; curr
!= NULL
&& curr
!= ptr
; curr
= *(++arr
));
80 /* Remove a pointer from a pointer array if it exists. Compacts it so that
81 * no gaps exist. Returns 0 on success and -1 if the element wasn't found. */
82 static int remove_array_ptr(void **arr
, void *ptr
)
85 arr
= find_array_ptr(arr
, ptr
);
90 /* Found. Slide up following items. */
93 void **arr1
= arr
+ 1;
94 *arr
++ = curr
= *arr1
;
101 /****************************************************************************
102 * Standard kernel stuff
103 ****************************************************************************/
104 void kernel_init(void)
106 /* Init the threading API */
109 /* Other processors will not reach this point in a multicore build.
110 * In a single-core build with multiple cores they fall-through and
111 * sleep in cop_main without returning. */
112 if (CURRENT_CORE
== CPU
)
114 memset(tick_funcs
, 0, sizeof(tick_funcs
));
115 memset(&all_queues
, 0, sizeof(all_queues
));
116 corelock_init(&all_queues
.cl
);
119 kernel_device_init();
124 /****************************************************************************
125 * Timer tick - Timer initialization and interrupt handler is defined at
127 ****************************************************************************/
128 int tick_add_task(void (*f
)(void))
130 int oldlevel
= disable_irq_save();
131 void **arr
= (void **)tick_funcs
;
132 void **p
= find_array_ptr(arr
, f
);
134 /* Add a task if there is room */
135 if(p
- arr
< MAX_NUM_TICK_TASKS
)
137 *p
= f
; /* If already in list, no problem. */
141 panicf("Error! tick_add_task(): out of tasks");
144 restore_irq(oldlevel
);
148 int tick_remove_task(void (*f
)(void))
150 int oldlevel
= disable_irq_save();
151 int rc
= remove_array_ptr((void **)tick_funcs
, f
);
152 restore_irq(oldlevel
);
156 /****************************************************************************
157 * Tick-based interval timers/one-shots - be mindful this is not really
158 * intended for continuous timers but for events that need to run for a short
159 * time and be cancelled without further software intervention.
160 ****************************************************************************/
161 #ifdef INCLUDE_TIMEOUT_API
162 /* list of active timeout events */
163 static struct timeout
*tmo_list
[MAX_NUM_TIMEOUTS
+1];
165 /* timeout tick task - calls event handlers when they expire
166 * Event handlers may alter expiration, callback and data during operation.
168 static void timeout_tick(void)
170 unsigned long tick
= current_tick
;
171 struct timeout
**p
= tmo_list
;
172 struct timeout
*curr
;
174 for(curr
= *p
; curr
!= NULL
; curr
= *(++p
))
178 if(TIME_BEFORE(tick
, curr
->expires
))
181 /* this event has expired - call callback */
182 ticks
= curr
->callback(curr
);
185 curr
->expires
= tick
+ ticks
; /* reload */
189 timeout_cancel(curr
); /* cancel */
194 /* Cancels a timeout callback - can be called from the ISR */
195 void timeout_cancel(struct timeout
*tmo
)
197 int oldlevel
= disable_irq_save();
198 void **arr
= (void **)tmo_list
;
199 int rc
= remove_array_ptr(arr
, tmo
);
201 if(rc
>= 0 && *arr
== NULL
)
203 tick_remove_task(timeout_tick
); /* Last one - remove task */
206 restore_irq(oldlevel
);
209 /* Adds a timeout callback - calling with an active timeout resets the
210 interval - can be called from the ISR */
211 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
212 int ticks
, intptr_t data
)
220 oldlevel
= disable_irq_save();
222 /* See if this one is already registered */
223 arr
= (void **)tmo_list
;
224 p
= find_array_ptr(arr
, tmo
);
226 if(p
- arr
< MAX_NUM_TIMEOUTS
)
234 tick_add_task(timeout_tick
); /* First one - add task */
240 tmo
->callback
= callback
;
242 tmo
->expires
= current_tick
+ ticks
;
245 restore_irq(oldlevel
);
248 #endif /* INCLUDE_TIMEOUT_API */
250 /****************************************************************************
252 ****************************************************************************/
253 void sleep(int ticks
)
255 #if defined(CPU_PP) && defined(BOOTLOADER)
256 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
257 while (TIME_BEFORE(USEC_TIMER
, stop
))
259 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
261 long sleep_ticks
= current_tick
+ ticks
+ 1;
262 while (TIME_BEFORE(current_tick
, sleep_ticks
))
273 #if ((defined(TATUNG_TPJ1022)) && defined(BOOTLOADER))
274 /* Some targets don't like yielding in the bootloader */
280 /****************************************************************************
281 * Queue handling stuff
282 ****************************************************************************/
284 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
285 /****************************************************************************
286 * Sender thread queue structure that aids implementation of priority
287 * inheritance on queues because the send list structure is the same as
288 * for all other kernel objects:
291 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
292 * E3 was posted with queue_post
293 * 4 events remain enqueued (E1-E4)
296 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
297 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
299 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
300 * q->send->curr_sender: /\
302 * Thread has E0 in its own struct queue_event.
304 ****************************************************************************/
306 /* Puts the specified return value in the waiting thread's return value
307 * and wakes the thread.
309 * A sender should be confirmed to exist before calling which makes it
310 * more efficent to reject the majority of cases that don't need this
313 static void queue_release_sender(struct thread_entry
**sender
,
316 struct thread_entry
*thread
= *sender
;
318 *sender
= NULL
; /* Clear slot. */
319 #ifdef HAVE_WAKEUP_EXT_CB
320 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
322 thread
->retval
= retval
; /* Assign thread-local return value. */
323 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
324 wakeup_thread wakes the first thread in
326 wakeup_thread(thread
->bqp
);
329 /* Releases any waiting threads that are queued with queue_send -
332 static void queue_release_all_senders(struct event_queue
*q
)
337 for(i
= q
->read
; i
!= q
->write
; i
++)
339 struct thread_entry
**spp
=
340 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
344 queue_release_sender(spp
, 0);
350 /* Callback to do extra forced removal steps from sender list in addition
351 * to the normal blocking queue removal and priority dis-inherit */
352 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
354 *((struct thread_entry
**)thread
->retval
) = NULL
;
355 #ifdef HAVE_WAKEUP_EXT_CB
356 thread
->wakeup_ext_cb
= NULL
;
361 /* Enables queue_send on the specified queue - caller allocates the extra
362 * data structure. Only queues which are taken to be owned by a thread should
363 * enable this however an official owner is not compulsory but must be
364 * specified for priority inheritance to operate.
366 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
367 * messages results in an undefined order of message replies or possible default
368 * replies if two or more waits happen before a reply is done.
370 void queue_enable_queue_send(struct event_queue
*q
,
371 struct queue_sender_list
*send
,
372 unsigned int owner_id
)
374 int oldlevel
= disable_irq_save();
375 corelock_lock(&q
->cl
);
377 if(send
!= NULL
&& q
->send
== NULL
)
379 memset(send
, 0, sizeof(*send
));
380 #ifdef HAVE_PRIORITY_SCHEDULING
381 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
382 send
->blocker
.priority
= PRIORITY_IDLE
;
385 send
->blocker
.thread
= thread_id_entry(owner_id
);
386 q
->blocker_p
= &send
->blocker
;
392 corelock_unlock(&q
->cl
);
393 restore_irq(oldlevel
);
398 /* Unblock a blocked thread at a given event index */
399 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
404 struct thread_entry
**spp
= &send
->senders
[i
];
408 queue_release_sender(spp
, 0);
413 /* Perform the auto-reply sequence */
414 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
416 if(send
&& send
->curr_sender
)
419 queue_release_sender(&send
->curr_sender
, 0);
423 /* Moves waiting thread's refrence from the senders array to the
424 * current_sender which represents the thread waiting for a reponse to the
425 * last message removed from the queue. This also protects the thread from
426 * being bumped due to overflow which would not be a valid action since its
427 * message _is_ being processed at this point. */
428 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
433 struct thread_entry
**spp
= &send
->senders
[rd
];
437 /* Move thread reference from array to the next thread
438 that queue_reply will release */
439 send
->curr_sender
= *spp
;
440 (*spp
)->retval
= (intptr_t)spp
;
443 /* else message was posted asynchronously with queue_post */
447 /* Empty macros for when synchoronous sending is not made */
448 #define queue_release_all_senders(q)
449 #define queue_do_unblock_sender(send, i)
450 #define queue_do_auto_reply(send)
451 #define queue_do_fetch_sender(send, rd)
452 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
454 /* Queue must not be available for use during this call */
455 void queue_init(struct event_queue
*q
, bool register_queue
)
457 int oldlevel
= disable_irq_save();
461 corelock_lock(&all_queues
.cl
);
464 corelock_init(&q
->cl
);
468 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
469 q
->send
= NULL
; /* No message sending by default */
470 IF_PRIO( q
->blocker_p
= NULL
; )
475 void **queues
= (void **)all_queues
.queues
;
476 void **p
= find_array_ptr(queues
, q
);
478 if(p
- queues
>= MAX_NUM_QUEUES
)
480 panicf("queue_init->out of queues");
485 /* Add it to the all_queues array */
487 corelock_unlock(&all_queues
.cl
);
491 restore_irq(oldlevel
);
494 /* Queue must not be available for use during this call */
495 void queue_delete(struct event_queue
*q
)
497 int oldlevel
= disable_irq_save();
498 corelock_lock(&all_queues
.cl
);
499 corelock_lock(&q
->cl
);
501 /* Remove the queue if registered */
502 remove_array_ptr((void **)all_queues
.queues
, q
);
504 corelock_unlock(&all_queues
.cl
);
506 /* Release thread(s) waiting on queue head */
507 thread_queue_wake(&q
->queue
);
509 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
512 /* Release threads waiting for replies */
513 queue_release_all_senders(q
);
515 /* Reply to any dequeued message waiting for one */
516 queue_do_auto_reply(q
->send
);
519 IF_PRIO( q
->blocker_p
= NULL
; )
526 corelock_unlock(&q
->cl
);
527 restore_irq(oldlevel
);
530 /* NOTE: multiple threads waiting on a queue head cannot have a well-
531 defined release order if timeouts are used. If multiple threads must
532 access the queue head, use a dispatcher or queue_wait only. */
533 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
538 #ifdef HAVE_PRIORITY_SCHEDULING
539 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
540 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
541 "queue_wait->wrong thread\n");
544 oldlevel
= disable_irq_save();
545 corelock_lock(&q
->cl
);
548 queue_do_auto_reply(q
->send
);
550 if (q
->read
== q
->write
)
552 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
556 IF_COP( current
->obj_cl
= &q
->cl
; )
557 current
->bqp
= &q
->queue
;
559 block_thread(current
);
561 corelock_unlock(&q
->cl
);
564 oldlevel
= disable_irq_save();
565 corelock_lock(&q
->cl
);
567 /* A message that woke us could now be gone */
568 while (q
->read
== q
->write
);
571 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
574 /* Get data for a waiting thread if one */
575 queue_do_fetch_sender(q
->send
, rd
);
577 corelock_unlock(&q
->cl
);
578 restore_irq(oldlevel
);
581 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
585 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
586 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
587 QUEUE_GET_THREAD(q
) == thread_id_entry(THREAD_ID_CURRENT
),
588 "queue_wait_w_tmo->wrong thread\n");
591 oldlevel
= disable_irq_save();
592 corelock_lock(&q
->cl
);
595 queue_do_auto_reply(q
->send
);
597 if (q
->read
== q
->write
&& ticks
> 0)
599 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
601 IF_COP( current
->obj_cl
= &q
->cl
; )
602 current
->bqp
= &q
->queue
;
604 block_thread_w_tmo(current
, ticks
);
605 corelock_unlock(&q
->cl
);
609 oldlevel
= disable_irq_save();
610 corelock_lock(&q
->cl
);
613 /* no worry about a removed message here - status is checked inside
614 locks - perhaps verify if timeout or false alarm */
615 if (q
->read
!= q
->write
)
617 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
619 /* Get data for a waiting thread if one */
620 queue_do_fetch_sender(q
->send
, rd
);
624 ev
->id
= SYS_TIMEOUT
;
627 corelock_unlock(&q
->cl
);
628 restore_irq(oldlevel
);
631 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
636 oldlevel
= disable_irq_save();
637 corelock_lock(&q
->cl
);
639 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
641 q
->events
[wr
].id
= id
;
642 q
->events
[wr
].data
= data
;
644 /* overflow protect - unblock any thread waiting at this index */
645 queue_do_unblock_sender(q
->send
, wr
);
647 /* Wakeup a waiting thread if any */
648 wakeup_thread(&q
->queue
);
650 corelock_unlock(&q
->cl
);
651 restore_irq(oldlevel
);
654 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
655 /* IRQ handlers are not allowed use of this function - we only aim to
656 protect the queue integrity by turning them off. */
657 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
662 oldlevel
= disable_irq_save();
663 corelock_lock(&q
->cl
);
665 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
667 q
->events
[wr
].id
= id
;
668 q
->events
[wr
].data
= data
;
672 struct queue_sender_list
*send
= q
->send
;
673 struct thread_entry
**spp
= &send
->senders
[wr
];
674 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
678 /* overflow protect - unblock any thread waiting at this index */
679 queue_release_sender(spp
, 0);
682 /* Wakeup a waiting thread if any */
683 wakeup_thread(&q
->queue
);
685 /* Save thread in slot, add to list and wait for reply */
687 IF_COP( current
->obj_cl
= &q
->cl
; )
688 IF_PRIO( current
->blocker
= q
->blocker_p
; )
689 #ifdef HAVE_WAKEUP_EXT_CB
690 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
692 current
->retval
= (intptr_t)spp
;
693 current
->bqp
= &send
->list
;
695 block_thread(current
);
697 corelock_unlock(&q
->cl
);
700 return current
->retval
;
703 /* Function as queue_post if sending is not enabled */
704 wakeup_thread(&q
->queue
);
706 corelock_unlock(&q
->cl
);
707 restore_irq(oldlevel
);
712 #if 0 /* not used now but probably will be later */
713 /* Query if the last message dequeued was added by queue_send or not */
714 bool queue_in_queue_send(struct event_queue
*q
)
719 int oldlevel
= disable_irq_save();
720 corelock_lock(&q
->cl
);
723 in_send
= q
->send
&& q
->send
->curr_sender
;
726 corelock_unlock(&q
->cl
);
727 restore_irq(oldlevel
);
734 /* Replies with retval to the last dequeued message sent with queue_send */
735 void queue_reply(struct event_queue
*q
, intptr_t retval
)
737 if(q
->send
&& q
->send
->curr_sender
)
739 int oldlevel
= disable_irq_save();
740 corelock_lock(&q
->cl
);
741 /* Double-check locking */
742 IF_COP( if(LIKELY(q
->send
&& q
->send
->curr_sender
)) )
744 queue_release_sender(&q
->send
->curr_sender
, retval
);
747 corelock_unlock(&q
->cl
);
748 restore_irq(oldlevel
);
752 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
754 if(q
->read
== q
->write
)
757 bool have_msg
= false;
759 int oldlevel
= disable_irq_save();
760 corelock_lock(&q
->cl
);
762 if(q
->read
!= q
->write
)
764 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
768 corelock_unlock(&q
->cl
);
769 restore_irq(oldlevel
);
773 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
775 /* Poll queue to see if a message exists - careful in using the result if
776 * queue_remove_from_head is called when messages are posted - possibly use
777 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
778 * unsignals the queue may cause an unwanted block */
779 bool queue_empty(const struct event_queue
* q
)
781 return ( q
->read
== q
->write
);
784 void queue_clear(struct event_queue
* q
)
788 oldlevel
= disable_irq_save();
789 corelock_lock(&q
->cl
);
791 /* Release all threads waiting in the queue for a reply -
792 dequeued sent message will be handled by owning thread */
793 queue_release_all_senders(q
);
798 corelock_unlock(&q
->cl
);
799 restore_irq(oldlevel
);
802 void queue_remove_from_head(struct event_queue
*q
, long id
)
806 oldlevel
= disable_irq_save();
807 corelock_lock(&q
->cl
);
809 while(q
->read
!= q
->write
)
811 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
813 if(q
->events
[rd
].id
!= id
)
818 /* Release any thread waiting on this message */
819 queue_do_unblock_sender(q
->send
, rd
);
824 corelock_unlock(&q
->cl
);
825 restore_irq(oldlevel
);
829 * The number of events waiting in the queue.
831 * @param struct of event_queue
832 * @return number of events in the queue
834 int queue_count(const struct event_queue
*q
)
836 return q
->write
- q
->read
;
839 int queue_broadcast(long id
, intptr_t data
)
841 struct event_queue
**p
= all_queues
.queues
;
842 struct event_queue
*q
;
845 int oldlevel
= disable_irq_save();
846 corelock_lock(&all_queues
.cl
);
849 for(q
= *p
; q
!= NULL
; q
= *(++p
))
851 queue_post(q
, id
, data
);
855 corelock_unlock(&all_queues
.cl
);
856 restore_irq(oldlevel
);
859 return p
- all_queues
.queues
;
862 /****************************************************************************
863 * Simple mutex functions ;)
864 ****************************************************************************/
866 static inline void mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
867 __attribute__((always_inline
));
868 static inline void mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
870 #ifdef HAVE_PRIORITY_SCHEDULING
871 mtx
->blocker
.thread
= td
;
877 static inline struct thread_entry
* mutex_get_thread(struct mutex
*mtx
)
878 __attribute__((always_inline
));
879 static inline struct thread_entry
* mutex_get_thread(struct mutex
*mtx
)
881 #ifdef HAVE_PRIORITY_SCHEDULING
882 return mtx
->blocker
.thread
;
888 /* Initialize a mutex object - call before any use and do not call again once
889 * the object is available to other threads */
890 void mutex_init(struct mutex
*m
)
892 corelock_init(&m
->cl
);
896 mutex_set_thread(m
, NULL
);
897 #ifdef HAVE_PRIORITY_SCHEDULING
898 m
->blocker
.priority
= PRIORITY_IDLE
;
899 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
900 m
->no_preempt
= false;
904 /* Gain ownership of a mutex object or block until it becomes free */
905 void mutex_lock(struct mutex
*m
)
907 struct thread_entry
*current
= thread_id_entry(THREAD_ID_CURRENT
);
909 if(current
== mutex_get_thread(m
))
911 /* current thread already owns this mutex */
916 /* lock out other cores */
917 corelock_lock(&m
->cl
);
919 if(LIKELY(!m
->locked
))
922 mutex_set_thread(m
, current
);
924 corelock_unlock(&m
->cl
);
928 /* block until the lock is open... */
929 IF_COP( current
->obj_cl
= &m
->cl
; )
930 IF_PRIO( current
->blocker
= &m
->blocker
; )
931 current
->bqp
= &m
->queue
;
934 block_thread(current
);
936 corelock_unlock(&m
->cl
);
938 /* ...and turn control over to next thread */
942 /* Release ownership of a mutex object - only owning thread must call this */
943 void mutex_unlock(struct mutex
*m
)
945 /* unlocker not being the owner is an unlocking violation */
946 KERNEL_ASSERT(mutex_get_thread(m
) == thread_id_entry(THREAD_ID_CURRENT
),
947 "mutex_unlock->wrong thread (%s != %s)\n",
948 mutex_get_thread(m
)->name
,
949 thread_id_entry(THREAD_ID_CURRENT
)->name
);
953 /* this thread still owns lock */
958 /* lock out other cores */
959 corelock_lock(&m
->cl
);
961 /* transfer to next queued thread if any */
962 if(LIKELY(m
->queue
== NULL
))
964 /* no threads waiting - open the lock */
965 mutex_set_thread(m
, NULL
);
967 corelock_unlock(&m
->cl
);
972 const int oldlevel
= disable_irq_save();
973 /* Tranfer of owning thread is handled in the wakeup protocol
974 * if priorities are enabled otherwise just set it from the
976 IFN_PRIO( mutex_set_thread(m
, m
->queue
); )
977 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
978 restore_irq(oldlevel
);
980 corelock_unlock(&m
->cl
);
982 #ifdef HAVE_PRIORITY_SCHEDULING
983 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
989 /****************************************************************************
990 * Simple semaphore functions ;)
991 ****************************************************************************/
992 #ifdef HAVE_SEMAPHORE_OBJECTS
993 void semaphore_init(struct semaphore
*s
, int max
, int start
)
995 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
996 "semaphore_init->inv arg\n");
1000 corelock_init(&s
->cl
);
1003 void semaphore_wait(struct semaphore
*s
)
1005 struct thread_entry
*current
;
1007 corelock_lock(&s
->cl
);
1009 if(LIKELY(--s
->count
>= 0))
1011 /* wait satisfied */
1012 corelock_unlock(&s
->cl
);
1016 /* too many waits - block until dequeued... */
1017 current
= thread_id_entry(THREAD_ID_CURRENT
);
1019 IF_COP( current
->obj_cl
= &s
->cl
; )
1020 current
->bqp
= &s
->queue
;
1023 block_thread(current
);
1025 corelock_unlock(&s
->cl
);
1027 /* ...and turn control over to next thread */
1031 void semaphore_release(struct semaphore
*s
)
1033 IF_PRIO( unsigned int result
= THREAD_NONE
; )
1035 corelock_lock(&s
->cl
);
1037 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1039 /* there should be threads in this queue */
1040 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1041 /* a thread was queued - wake it up */
1042 int oldlevel
= disable_irq_save();
1043 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1044 restore_irq(oldlevel
);
1047 corelock_unlock(&s
->cl
);
1049 #ifdef HAVE_PRIORITY_SCHEDULING
1050 if(result
& THREAD_SWITCH
)
1054 #endif /* HAVE_SEMAPHORE_OBJECTS */
1056 #ifdef HAVE_WAKEUP_OBJECTS
1057 /****************************************************************************
1058 * Lightweight IRQ-compatible wakeup object
1061 /* Initialize the wakeup object */
1062 void wakeup_init(struct wakeup
*w
)
1065 w
->signalled
= false;
1066 IF_COP( corelock_init(&w
->cl
); )
1069 /* Wait for a signal blocking indefinitely or for a specified period */
1070 int wakeup_wait(struct wakeup
*w
, int timeout
)
1072 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1073 int oldlevel
= disable_irq_save();
1075 corelock_lock(&w
->cl
);
1077 if(LIKELY(!w
->signalled
&& timeout
!= TIMEOUT_NOBLOCK
))
1079 struct thread_entry
* current
= thread_id_entry(THREAD_ID_CURRENT
);
1081 IF_COP( current
->obj_cl
= &w
->cl
; )
1082 current
->bqp
= &w
->queue
;
1084 if (timeout
!= TIMEOUT_BLOCK
)
1085 block_thread_w_tmo(current
, timeout
);
1087 block_thread(current
);
1089 corelock_unlock(&w
->cl
);
1092 oldlevel
= disable_irq_save();
1093 corelock_lock(&w
->cl
);
1096 if(UNLIKELY(!w
->signalled
))
1098 /* Timed-out or failed */
1099 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1100 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1103 w
->signalled
= false; /* Reset */
1105 corelock_unlock(&w
->cl
);
1106 restore_irq(oldlevel
);
1111 /* Signal the thread waiting or leave the signal if the thread hasn't
1114 * returns THREAD_NONE or THREAD_OK
1116 int wakeup_signal(struct wakeup
*w
)
1118 int oldlevel
= disable_irq_save();
1121 corelock_lock(&w
->cl
);
1123 w
->signalled
= true;
1124 ret
= wakeup_thread(&w
->queue
);
1126 corelock_unlock(&w
->cl
);
1127 restore_irq(oldlevel
);
1131 #endif /* HAVE_WAKEUP_OBJECTS */