1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
36 #define KERNEL_OBJECT_CHECKS 0
39 #if KERNEL_OBJECT_CHECKS
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
48 #define KERNEL_ASSERT(exp, msg...) ({})
51 #if !defined(CPU_PP) || !defined(BOOTLOADER) || \
52 defined(HAVE_BOOTLOADER_USB_MODE)
53 volatile long current_tick SHAREDDATA_ATTR
= 0;
56 /* Unless otherwise defined, do nothing */
57 #ifndef YIELD_KERNEL_HOOK
58 #define YIELD_KERNEL_HOOK() false
60 #ifndef SLEEP_KERNEL_HOOK
61 #define SLEEP_KERNEL_HOOK(ticks) false
64 /* List of tick tasks - final element always NULL for termination */
65 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
67 /* This array holds all queues that are initiated. It is used for broadcast. */
70 struct event_queue
*queues
[MAX_NUM_QUEUES
+1];
71 #ifdef HAVE_CORELOCK_OBJECT
74 } all_queues SHAREDBSS_ATTR
;
76 /****************************************************************************
77 * Standard kernel stuff
78 ****************************************************************************/
79 void kernel_init(void)
81 /* Init the threading API */
84 /* Other processors will not reach this point in a multicore build.
85 * In a single-core build with multiple cores they fall-through and
86 * sleep in cop_main without returning. */
87 if (CURRENT_CORE
== CPU
)
89 memset(tick_funcs
, 0, sizeof(tick_funcs
));
90 memset(&all_queues
, 0, sizeof(all_queues
));
91 corelock_init(&all_queues
.cl
);
99 /****************************************************************************
100 * Timer tick - Timer initialization and interrupt handler is defined at
102 ****************************************************************************/
103 int tick_add_task(void (*f
)(void))
105 int oldlevel
= disable_irq_save();
106 void **arr
= (void **)tick_funcs
;
107 void **p
= find_array_ptr(arr
, f
);
109 /* Add a task if there is room */
110 if(p
- arr
< MAX_NUM_TICK_TASKS
)
112 *p
= f
; /* If already in list, no problem. */
116 panicf("Error! tick_add_task(): out of tasks");
119 restore_irq(oldlevel
);
123 int tick_remove_task(void (*f
)(void))
125 int oldlevel
= disable_irq_save();
126 int rc
= remove_array_ptr((void **)tick_funcs
, f
);
127 restore_irq(oldlevel
);
131 /****************************************************************************
132 * Tick-based interval timers/one-shots - be mindful this is not really
133 * intended for continuous timers but for events that need to run for a short
134 * time and be cancelled without further software intervention.
135 ****************************************************************************/
136 #ifdef INCLUDE_TIMEOUT_API
137 /* list of active timeout events */
138 static struct timeout
*tmo_list
[MAX_NUM_TIMEOUTS
+1];
140 /* timeout tick task - calls event handlers when they expire
141 * Event handlers may alter expiration, callback and data during operation.
143 static void timeout_tick(void)
145 unsigned long tick
= current_tick
;
146 struct timeout
**p
= tmo_list
;
147 struct timeout
*curr
;
149 for(curr
= *p
; curr
!= NULL
; curr
= *(++p
))
153 if(TIME_BEFORE(tick
, curr
->expires
))
156 /* this event has expired - call callback */
157 ticks
= curr
->callback(curr
);
160 curr
->expires
= tick
+ ticks
; /* reload */
164 timeout_cancel(curr
); /* cancel */
169 /* Cancels a timeout callback - can be called from the ISR */
170 void timeout_cancel(struct timeout
*tmo
)
172 int oldlevel
= disable_irq_save();
173 int rc
= remove_array_ptr((void **)tmo_list
, tmo
);
175 if(rc
>= 0 && *tmo_list
== NULL
)
177 tick_remove_task(timeout_tick
); /* Last one - remove task */
180 restore_irq(oldlevel
);
183 /* Adds a timeout callback - calling with an active timeout resets the
184 interval - can be called from the ISR */
185 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
186 int ticks
, intptr_t data
)
194 oldlevel
= disable_irq_save();
196 /* See if this one is already registered */
197 arr
= (void **)tmo_list
;
198 p
= find_array_ptr(arr
, tmo
);
200 if(p
- arr
< MAX_NUM_TIMEOUTS
)
206 if(*tmo_list
== NULL
)
208 tick_add_task(timeout_tick
); /* First one - add task */
214 tmo
->callback
= callback
;
216 tmo
->expires
= current_tick
+ ticks
;
219 restore_irq(oldlevel
);
222 #endif /* INCLUDE_TIMEOUT_API */
224 /****************************************************************************
226 ****************************************************************************/
227 unsigned sleep(unsigned ticks
)
229 /* In certain situations, certain bootloaders in particular, a normal
230 * threading call is inappropriate. */
231 if (SLEEP_KERNEL_HOOK(ticks
))
232 return 0; /* Handled */
242 /* In certain situations, certain bootloaders in particular, a normal
243 * threading call is inappropriate. */
244 if (YIELD_KERNEL_HOOK())
245 return; /* handled */
250 /****************************************************************************
251 * Queue handling stuff
252 ****************************************************************************/
254 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
255 /****************************************************************************
256 * Sender thread queue structure that aids implementation of priority
257 * inheritance on queues because the send list structure is the same as
258 * for all other kernel objects:
261 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
262 * E3 was posted with queue_post
263 * 4 events remain enqueued (E1-E4)
266 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
267 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
269 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
270 * q->send->curr_sender: /\
272 * Thread has E0 in its own struct queue_event.
274 ****************************************************************************/
276 /* Puts the specified return value in the waiting thread's return value
277 * and wakes the thread.
279 * A sender should be confirmed to exist before calling which makes it
280 * more efficent to reject the majority of cases that don't need this
283 static void queue_release_sender(struct thread_entry
* volatile * sender
,
286 struct thread_entry
*thread
= *sender
;
288 *sender
= NULL
; /* Clear slot. */
289 #ifdef HAVE_WAKEUP_EXT_CB
290 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
292 thread
->retval
= retval
; /* Assign thread-local return value. */
293 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
294 wakeup_thread wakes the first thread in
296 wakeup_thread(thread
->bqp
);
299 /* Releases any waiting threads that are queued with queue_send -
302 static void queue_release_all_senders(struct event_queue
*q
)
307 for(i
= q
->read
; i
!= q
->write
; i
++)
309 struct thread_entry
**spp
=
310 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
314 queue_release_sender(spp
, 0);
320 /* Callback to do extra forced removal steps from sender list in addition
321 * to the normal blocking queue removal and priority dis-inherit */
322 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
324 *((struct thread_entry
**)thread
->retval
) = NULL
;
325 #ifdef HAVE_WAKEUP_EXT_CB
326 thread
->wakeup_ext_cb
= NULL
;
331 /* Enables queue_send on the specified queue - caller allocates the extra
332 * data structure. Only queues which are taken to be owned by a thread should
333 * enable this however an official owner is not compulsory but must be
334 * specified for priority inheritance to operate.
336 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
337 * messages results in an undefined order of message replies or possible default
338 * replies if two or more waits happen before a reply is done.
340 void queue_enable_queue_send(struct event_queue
*q
,
341 struct queue_sender_list
*send
,
342 unsigned int owner_id
)
344 int oldlevel
= disable_irq_save();
345 corelock_lock(&q
->cl
);
347 if(send
!= NULL
&& q
->send
== NULL
)
349 memset(send
, 0, sizeof(*send
));
350 #ifdef HAVE_PRIORITY_SCHEDULING
351 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
352 send
->blocker
.priority
= PRIORITY_IDLE
;
355 send
->blocker
.thread
= thread_id_entry(owner_id
);
356 q
->blocker_p
= &send
->blocker
;
362 corelock_unlock(&q
->cl
);
363 restore_irq(oldlevel
);
368 /* Unblock a blocked thread at a given event index */
369 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
374 struct thread_entry
**spp
= &send
->senders
[i
];
378 queue_release_sender(spp
, 0);
383 /* Perform the auto-reply sequence */
384 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
386 if(send
&& send
->curr_sender
)
389 queue_release_sender(&send
->curr_sender
, 0);
393 /* Moves waiting thread's refrence from the senders array to the
394 * current_sender which represents the thread waiting for a reponse to the
395 * last message removed from the queue. This also protects the thread from
396 * being bumped due to overflow which would not be a valid action since its
397 * message _is_ being processed at this point. */
398 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
403 struct thread_entry
**spp
= &send
->senders
[rd
];
407 /* Move thread reference from array to the next thread
408 that queue_reply will release */
409 send
->curr_sender
= *spp
;
410 (*spp
)->retval
= (intptr_t)spp
;
413 /* else message was posted asynchronously with queue_post */
417 /* Empty macros for when synchoronous sending is not made */
418 #define queue_release_all_senders(q)
419 #define queue_do_unblock_sender(send, i)
420 #define queue_do_auto_reply(send)
421 #define queue_do_fetch_sender(send, rd)
422 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
424 /* Queue must not be available for use during this call */
425 void queue_init(struct event_queue
*q
, bool register_queue
)
427 int oldlevel
= disable_irq_save();
431 corelock_lock(&all_queues
.cl
);
434 corelock_init(&q
->cl
);
436 /* What garbage is in write is irrelevant because of the masking design-
437 * any other functions the empty the queue do this as well so that
438 * queue_count and queue_empty return sane values in the case of a
439 * concurrent change without locking inside them. */
441 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
442 q
->send
= NULL
; /* No message sending by default */
443 IF_PRIO( q
->blocker_p
= NULL
; )
448 void **queues
= (void **)all_queues
.queues
;
449 void **p
= find_array_ptr(queues
, q
);
451 if(p
- queues
>= MAX_NUM_QUEUES
)
453 panicf("queue_init->out of queues");
458 /* Add it to the all_queues array */
460 corelock_unlock(&all_queues
.cl
);
464 restore_irq(oldlevel
);
467 /* Queue must not be available for use during this call */
468 void queue_delete(struct event_queue
*q
)
470 int oldlevel
= disable_irq_save();
471 corelock_lock(&all_queues
.cl
);
472 corelock_lock(&q
->cl
);
474 /* Remove the queue if registered */
475 remove_array_ptr((void **)all_queues
.queues
, q
);
477 corelock_unlock(&all_queues
.cl
);
479 /* Release thread(s) waiting on queue head */
480 thread_queue_wake(&q
->queue
);
482 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
485 /* Release threads waiting for replies */
486 queue_release_all_senders(q
);
488 /* Reply to any dequeued message waiting for one */
489 queue_do_auto_reply(q
->send
);
492 IF_PRIO( q
->blocker_p
= NULL
; )
498 corelock_unlock(&q
->cl
);
499 restore_irq(oldlevel
);
502 /* NOTE: multiple threads waiting on a queue head cannot have a well-
503 defined release order if timeouts are used. If multiple threads must
504 access the queue head, use a dispatcher or queue_wait only. */
505 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
510 #ifdef HAVE_PRIORITY_SCHEDULING
511 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
512 QUEUE_GET_THREAD(q
) == thread_self_entry(),
513 "queue_wait->wrong thread\n");
516 oldlevel
= disable_irq_save();
517 corelock_lock(&q
->cl
);
519 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
520 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
521 queue_do_auto_reply(q
->send
);
526 struct thread_entry
*current
;
529 if (rd
!= q
->write
) /* A waking message could disappear */
532 current
= thread_self_entry();
534 IF_COP( current
->obj_cl
= &q
->cl
; )
535 current
->bqp
= &q
->queue
;
537 block_thread(current
);
539 corelock_unlock(&q
->cl
);
543 corelock_lock(&q
->cl
);
546 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
551 rd
&= QUEUE_LENGTH_MASK
;
554 /* Get data for a waiting thread if one */
555 queue_do_fetch_sender(q
->send
, rd
);
557 /* else just waiting on non-empty */
559 corelock_unlock(&q
->cl
);
560 restore_irq(oldlevel
);
563 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
568 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
569 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
570 QUEUE_GET_THREAD(q
) == thread_self_entry(),
571 "queue_wait_w_tmo->wrong thread\n");
574 oldlevel
= disable_irq_save();
575 corelock_lock(&q
->cl
);
577 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
578 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
579 queue_do_auto_reply(q
->send
);
584 if (rd
== wr
&& ticks
> 0)
586 struct thread_entry
*current
= thread_self_entry();
588 IF_COP( current
->obj_cl
= &q
->cl
; )
589 current
->bqp
= &q
->queue
;
591 block_thread_w_tmo(current
, ticks
);
592 corelock_unlock(&q
->cl
);
597 corelock_lock(&q
->cl
);
603 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
607 /* no worry about a removed message here - status is checked inside
608 locks - perhaps verify if timeout or false alarm */
612 rd
&= QUEUE_LENGTH_MASK
;
614 /* Get data for a waiting thread if one */
615 queue_do_fetch_sender(q
->send
, rd
);
619 ev
->id
= SYS_TIMEOUT
;
622 /* else just waiting on non-empty */
624 corelock_unlock(&q
->cl
);
625 restore_irq(oldlevel
);
628 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
633 oldlevel
= disable_irq_save();
634 corelock_lock(&q
->cl
);
636 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
638 KERNEL_ASSERT((q
->write
- q
->read
) <= QUEUE_LENGTH
,
639 "queue_post ovf q=%08lX", (long)q
);
641 q
->events
[wr
].id
= id
;
642 q
->events
[wr
].data
= data
;
644 /* overflow protect - unblock any thread waiting at this index */
645 queue_do_unblock_sender(q
->send
, wr
);
647 /* Wakeup a waiting thread if any */
648 wakeup_thread(&q
->queue
);
650 corelock_unlock(&q
->cl
);
651 restore_irq(oldlevel
);
654 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
655 /* IRQ handlers are not allowed use of this function - we only aim to
656 protect the queue integrity by turning them off. */
657 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
662 oldlevel
= disable_irq_save();
663 corelock_lock(&q
->cl
);
665 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
667 KERNEL_ASSERT((q
->write
- q
->read
) <= QUEUE_LENGTH
,
668 "queue_send ovf q=%08lX", (long)q
);
670 q
->events
[wr
].id
= id
;
671 q
->events
[wr
].data
= data
;
675 struct queue_sender_list
*send
= q
->send
;
676 struct thread_entry
**spp
= &send
->senders
[wr
];
677 struct thread_entry
*current
= thread_self_entry();
681 /* overflow protect - unblock any thread waiting at this index */
682 queue_release_sender(spp
, 0);
685 /* Wakeup a waiting thread if any */
686 wakeup_thread(&q
->queue
);
688 /* Save thread in slot, add to list and wait for reply */
690 IF_COP( current
->obj_cl
= &q
->cl
; )
691 IF_PRIO( current
->blocker
= q
->blocker_p
; )
692 #ifdef HAVE_WAKEUP_EXT_CB
693 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
695 current
->retval
= (intptr_t)spp
;
696 current
->bqp
= &send
->list
;
698 block_thread(current
);
700 corelock_unlock(&q
->cl
);
703 return current
->retval
;
706 /* Function as queue_post if sending is not enabled */
707 wakeup_thread(&q
->queue
);
709 corelock_unlock(&q
->cl
);
710 restore_irq(oldlevel
);
715 #if 0 /* not used now but probably will be later */
716 /* Query if the last message dequeued was added by queue_send or not */
717 bool queue_in_queue_send(struct event_queue
*q
)
722 int oldlevel
= disable_irq_save();
723 corelock_lock(&q
->cl
);
726 in_send
= q
->send
&& q
->send
->curr_sender
;
729 corelock_unlock(&q
->cl
);
730 restore_irq(oldlevel
);
737 /* Replies with retval to the last dequeued message sent with queue_send */
738 void queue_reply(struct event_queue
*q
, intptr_t retval
)
740 if(q
->send
&& q
->send
->curr_sender
)
742 struct queue_sender_list
*sender
;
744 int oldlevel
= disable_irq_save();
745 corelock_lock(&q
->cl
);
749 /* Double-check locking */
750 if(LIKELY(sender
&& sender
->curr_sender
))
751 queue_release_sender(&sender
->curr_sender
, retval
);
753 corelock_unlock(&q
->cl
);
754 restore_irq(oldlevel
);
757 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
759 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
760 /* Scan the even queue from head to tail, returning any event from the
761 filter list that was found, optionally removing the event. If an
762 event is returned, synchronous events are handled in the same manner as
763 with queue_wait(_w_tmo); if discarded, then as queue_clear.
764 If filters are NULL, any event matches. If filters exist, the default
765 is to search the full queue depth.
766 Earlier filters take precedence.
768 Return true if an event was found, false otherwise. */
769 bool queue_peek_ex(struct event_queue
*q
, struct queue_event
*ev
,
770 unsigned int flags
, const long (*filters
)[2])
776 if(LIKELY(q
->read
== q
->write
))
777 return false; /* Empty: do nothing further */
781 oldlevel
= disable_irq_save();
782 corelock_lock(&q
->cl
);
784 /* Starting at the head, find first match */
785 for(rd
= q
->read
, wr
= q
->write
; rd
!= wr
; rd
++)
787 struct queue_event
*e
= &q
->events
[rd
& QUEUE_LENGTH_MASK
];
791 /* Have filters - find the first thing that passes */
792 const long (* f
)[2] = filters
;
793 const long (* const f_last
)[2] =
794 &filters
[flags
& QPEEK_FILTER_COUNT_MASK
];
799 if(UNLIKELY(id
>= (*f
)[0] && id
<= (*f
)[1]))
802 while(++f
<= f_last
);
804 if(LIKELY(!(flags
& QPEEK_FILTER_HEAD_ONLY
)))
805 continue; /* No match; test next event */
807 break; /* Only check the head */
809 /* else - anything passes */
813 /* Found a matching event */
817 *ev
= *e
; /* Caller wants the event */
819 if(flags
& QPEEK_REMOVE_EVENTS
)
821 /* Do event removal */
822 unsigned int r
= q
->read
;
823 q
->read
= r
+ 1; /* Advance head */
828 queue_do_auto_reply(q
->send
);
829 /* Get the thread waiting for reply, if any */
830 queue_do_fetch_sender(q
->send
, rd
& QUEUE_LENGTH_MASK
);
834 /* Release any thread waiting on this message */
835 queue_do_unblock_sender(q
->send
, rd
& QUEUE_LENGTH_MASK
);
838 /* Slide messages forward into the gap if not at the head */
841 unsigned int dst
= rd
& QUEUE_LENGTH_MASK
;
842 unsigned int src
= --rd
& QUEUE_LENGTH_MASK
;
844 q
->events
[dst
] = q
->events
[src
];
845 /* Keep sender wait list in sync */
847 q
->send
->senders
[dst
] = q
->send
->senders
[src
];
854 corelock_unlock(&q
->cl
);
855 restore_irq(oldlevel
);
860 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
862 return queue_peek_ex(q
, ev
, 0, NULL
);
865 void queue_remove_from_head(struct event_queue
*q
, long id
)
867 const long f
[2] = { id
, id
};
868 while (queue_peek_ex(q
, NULL
,
869 QPEEK_FILTER_HEAD_ONLY
| QPEEK_REMOVE_EVENTS
, &f
));
871 #else /* !HAVE_EXTENDED_MESSAGING_AND_NAME */
872 /* The more powerful routines aren't required */
873 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
877 if(q
->read
== q
->write
)
880 bool have_msg
= false;
882 int oldlevel
= disable_irq_save();
883 corelock_lock(&q
->cl
);
888 *ev
= q
->events
[rd
& QUEUE_LENGTH_MASK
];
892 corelock_unlock(&q
->cl
);
893 restore_irq(oldlevel
);
898 void queue_remove_from_head(struct event_queue
*q
, long id
)
902 oldlevel
= disable_irq_save();
903 corelock_lock(&q
->cl
);
905 while(q
->read
!= q
->write
)
907 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
909 if(q
->events
[rd
].id
!= id
)
914 /* Release any thread waiting on this message */
915 queue_do_unblock_sender(q
->send
, rd
);
920 corelock_unlock(&q
->cl
);
921 restore_irq(oldlevel
);
923 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
925 /* Poll queue to see if a message exists - careful in using the result if
926 * queue_remove_from_head is called when messages are posted - possibly use
927 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
928 * unsignals the queue may cause an unwanted block */
929 bool queue_empty(const struct event_queue
* q
)
931 return ( q
->read
== q
->write
);
934 void queue_clear(struct event_queue
* q
)
938 oldlevel
= disable_irq_save();
939 corelock_lock(&q
->cl
);
941 /* Release all threads waiting in the queue for a reply -
942 dequeued sent message will be handled by owning thread */
943 queue_release_all_senders(q
);
947 corelock_unlock(&q
->cl
);
948 restore_irq(oldlevel
);
952 * The number of events waiting in the queue.
954 * @param struct of event_queue
955 * @return number of events in the queue
957 int queue_count(const struct event_queue
*q
)
959 return q
->write
- q
->read
;
962 int queue_broadcast(long id
, intptr_t data
)
964 struct event_queue
**p
= all_queues
.queues
;
965 struct event_queue
*q
;
968 int oldlevel
= disable_irq_save();
969 corelock_lock(&all_queues
.cl
);
972 for(q
= *p
; q
!= NULL
; q
= *(++p
))
974 queue_post(q
, id
, data
);
978 corelock_unlock(&all_queues
.cl
);
979 restore_irq(oldlevel
);
982 return p
- all_queues
.queues
;
985 /****************************************************************************
986 * Simple mutex functions ;)
987 ****************************************************************************/
989 static inline void __attribute__((always_inline
))
990 mutex_set_thread(struct mutex
*mtx
, struct thread_entry
*td
)
992 #ifdef HAVE_PRIORITY_SCHEDULING
993 mtx
->blocker
.thread
= td
;
999 static inline struct thread_entry
* __attribute__((always_inline
))
1000 mutex_get_thread(volatile struct mutex
*mtx
)
1002 #ifdef HAVE_PRIORITY_SCHEDULING
1003 return mtx
->blocker
.thread
;
1009 /* Initialize a mutex object - call before any use and do not call again once
1010 * the object is available to other threads */
1011 void mutex_init(struct mutex
*m
)
1013 corelock_init(&m
->cl
);
1016 mutex_set_thread(m
, NULL
);
1017 #ifdef HAVE_PRIORITY_SCHEDULING
1018 m
->blocker
.priority
= PRIORITY_IDLE
;
1019 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
1020 m
->no_preempt
= false;
1024 /* Gain ownership of a mutex object or block until it becomes free */
1025 void mutex_lock(struct mutex
*m
)
1027 struct thread_entry
*current
= thread_self_entry();
1029 if(current
== mutex_get_thread(m
))
1031 /* current thread already owns this mutex */
1036 /* lock out other cores */
1037 corelock_lock(&m
->cl
);
1039 /* must read thread again inside cs (a multiprocessor concern really) */
1040 if(LIKELY(mutex_get_thread(m
) == NULL
))
1043 mutex_set_thread(m
, current
);
1044 corelock_unlock(&m
->cl
);
1048 /* block until the lock is open... */
1049 IF_COP( current
->obj_cl
= &m
->cl
; )
1050 IF_PRIO( current
->blocker
= &m
->blocker
; )
1051 current
->bqp
= &m
->queue
;
1054 block_thread(current
);
1056 corelock_unlock(&m
->cl
);
1058 /* ...and turn control over to next thread */
1062 /* Release ownership of a mutex object - only owning thread must call this */
1063 void mutex_unlock(struct mutex
*m
)
1065 /* unlocker not being the owner is an unlocking violation */
1066 KERNEL_ASSERT(mutex_get_thread(m
) == thread_self_entry(),
1067 "mutex_unlock->wrong thread (%s != %s)\n",
1068 mutex_get_thread(m
)->name
,
1069 thread_self_entry()->name
);
1071 if(m
->recursion
> 0)
1073 /* this thread still owns lock */
1078 /* lock out other cores */
1079 corelock_lock(&m
->cl
);
1081 /* transfer to next queued thread if any */
1082 if(LIKELY(m
->queue
== NULL
))
1084 /* no threads waiting - open the lock */
1085 mutex_set_thread(m
, NULL
);
1086 corelock_unlock(&m
->cl
);
1091 const int oldlevel
= disable_irq_save();
1092 /* Tranfer of owning thread is handled in the wakeup protocol
1093 * if priorities are enabled otherwise just set it from the
1095 IFN_PRIO( mutex_set_thread(m
, m
->queue
); )
1096 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
1097 restore_irq(oldlevel
);
1099 corelock_unlock(&m
->cl
);
1101 #ifdef HAVE_PRIORITY_SCHEDULING
1102 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
1108 /****************************************************************************
1109 * Simple semaphore functions ;)
1110 ****************************************************************************/
1111 #ifdef HAVE_SEMAPHORE_OBJECTS
1112 /* Initialize the semaphore object.
1113 * max = maximum up count the semaphore may assume (max >= 1)
1114 * start = initial count of semaphore (0 <= count <= max) */
1115 void semaphore_init(struct semaphore
*s
, int max
, int start
)
1117 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
1118 "semaphore_init->inv arg\n");
1122 corelock_init(&s
->cl
);
1125 /* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
1126 * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
1127 * safely be used in an ISR. */
1128 int semaphore_wait(struct semaphore
*s
, int timeout
)
1134 oldlevel
= disable_irq_save();
1135 corelock_lock(&s
->cl
);
1139 if(LIKELY(count
> 0))
1141 /* count is not zero; down it */
1142 s
->count
= count
- 1;
1143 ret
= OBJ_WAIT_SUCCEEDED
;
1145 else if(timeout
== 0)
1147 /* just polling it */
1148 ret
= OBJ_WAIT_TIMEDOUT
;
1152 /* too many waits - block until count is upped... */
1153 struct thread_entry
* current
= thread_self_entry();
1154 IF_COP( current
->obj_cl
= &s
->cl
; )
1155 current
->bqp
= &s
->queue
;
1156 /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
1157 * explicit in semaphore_release */
1158 current
->retval
= OBJ_WAIT_TIMEDOUT
;
1161 block_thread_w_tmo(current
, timeout
); /* ...or timed out... */
1163 block_thread(current
); /* -timeout = infinite */
1165 corelock_unlock(&s
->cl
);
1167 /* ...and turn control over to next thread */
1170 return current
->retval
;
1173 corelock_unlock(&s
->cl
);
1174 restore_irq(oldlevel
);
1179 /* Up the semaphore's count and release any thread waiting at the head of the
1180 * queue. The count is saturated to the value of the 'max' parameter specified
1181 * in 'semaphore_init'. */
1182 void semaphore_release(struct semaphore
*s
)
1184 #if defined(HAVE_PRIORITY_SCHEDULING) && defined(irq_enabled_checkval)
1185 unsigned int result
= THREAD_NONE
;
1189 oldlevel
= disable_irq_save();
1190 corelock_lock(&s
->cl
);
1192 if(LIKELY(s
->queue
!= NULL
))
1194 /* a thread was queued - wake it up and keep count at 0 */
1195 KERNEL_ASSERT(s
->count
== 0,
1196 "semaphore_release->threads queued but count=%d!\n", s
->count
);
1197 s
->queue
->retval
= OBJ_WAIT_SUCCEEDED
; /* indicate explicit wake */
1198 #if defined(HAVE_PRIORITY_SCHEDULING) && defined(irq_enabled_checkval)
1199 result
= wakeup_thread(&s
->queue
);
1201 wakeup_thread(&s
->queue
);
1206 int count
= s
->count
;
1209 /* nothing waiting - up it */
1210 s
->count
= count
+ 1;
1214 corelock_unlock(&s
->cl
);
1215 restore_irq(oldlevel
);
1217 #if defined(HAVE_PRIORITY_SCHEDULING) && defined(irq_enabled_checkval)
1218 /* No thread switch if IRQ disabled - it's probably called via ISR.
1219 * switch_thread would as well enable them anyway. */
1220 if((result
& THREAD_SWITCH
) && irq_enabled_checkval(oldlevel
))
1224 #endif /* HAVE_SEMAPHORE_OBJECTS */