1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
24 #include "system-sdl.h"
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
36 #define KERNEL_OBJECT_CHECKS 0
39 #if KERNEL_OBJECT_CHECKS
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
48 #define KERNEL_ASSERT(exp, msg...) ({})
51 #if !defined(CPU_PP) || !defined(BOOTLOADER)
52 volatile long current_tick SHAREDDATA_ATTR
= 0;
55 void (*tick_funcs
[MAX_NUM_TICK_TASKS
])(void);
57 extern struct core_entry cores
[NUM_CORES
];
59 /* This array holds all queues that are initiated. It is used for broadcast. */
63 struct event_queue
*queues
[MAX_NUM_QUEUES
];
64 IF_COP( struct corelock cl
; )
65 } all_queues SHAREDBSS_ATTR
;
67 /****************************************************************************
68 * Standard kernel stuff
69 ****************************************************************************/
70 void kernel_init(void)
72 /* Init the threading API */
75 /* Other processors will not reach this point in a multicore build.
76 * In a single-core build with multiple cores they fall-through and
77 * sleep in cop_main without returning. */
78 if (CURRENT_CORE
== CPU
)
80 memset(tick_funcs
, 0, sizeof(tick_funcs
));
81 memset(&all_queues
, 0, sizeof(all_queues
));
82 corelock_init(&all_queues
.cl
);
90 /****************************************************************************
92 ****************************************************************************/
93 #if CONFIG_CPU == SH7034
94 void tick_start(unsigned int interval_in_ms
)
98 count
= CPU_FREQ
* interval_in_ms
/ 1000 / 8;
102 panicf("Error! The tick interval is too long (%d ms)\n",
107 /* We are using timer 0 */
109 TSTR
&= ~0x01; /* Stop the timer */
110 TSNC
&= ~0x01; /* No synchronization */
111 TMDR
&= ~0x01; /* Operate normally */
113 TCNT0
= 0; /* Start counting at 0 */
114 GRA0
= (unsigned short)(count
- 1);
115 TCR0
= 0x23; /* Clear at GRA match, sysclock/8 */
117 /* Enable interrupt on level 1 */
118 IPRC
= (IPRC
& ~0x00f0) | 0x0010;
121 TIER0
= 0xf9; /* Enable GRA match interrupt */
123 TSTR
|= 0x01; /* Start timer 1 */
126 void IMIA0(void) __attribute__ ((interrupt_handler
));
131 /* Run through the list of tick tasks */
132 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
144 #elif defined(CPU_COLDFIRE)
145 void tick_start(unsigned int interval_in_ms
)
150 count
= CPU_FREQ
/2 * interval_in_ms
/ 1000 / 16;
154 panicf("Error! The tick interval is too long (%d ms)\n",
159 prescale
= cpu_frequency
/ CPU_FREQ
;
160 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
161 changes within timer.c */
163 /* We are using timer 0 */
165 TRR0
= (unsigned short)(count
- 1); /* The reference count */
166 TCN0
= 0; /* reset the timer */
167 TMR0
= 0x001d | ((unsigned short)(prescale
- 1) << 8);
168 /* restart, CLK/16, enabled, prescaler */
170 TER0
= 0xff; /* Clear all events */
172 ICR1
= 0x8c; /* Interrupt on level 3.0 */
176 void TIMER0(void) __attribute__ ((interrupt_handler
));
181 /* Run through the list of tick tasks */
182 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
192 TER0
= 0xff; /* Clear all events */
195 #elif defined(CPU_PP)
202 /* Run through the list of tick tasks (using main core) */
203 TIMER1_VAL
; /* Read value to ack IRQ */
205 /* Run through the list of tick tasks using main CPU core -
206 wake up the COP through its control interface to provide pulse */
207 for (i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
218 #endif /* NUM_CORES */
224 /* Must be last function called init kernel/thread initialization */
225 void tick_start(unsigned int interval_in_ms
)
231 TIMER1_CFG
= 0xc0000000 | (interval_in_ms
*1000 - 1);
232 /* unmask interrupt source */
233 CPU_INT_EN
= TIMER1_MASK
;
235 /* We don't enable interrupts in the bootloader */
236 (void)interval_in_ms
;
240 #elif CONFIG_CPU == PNX0101
242 void timer_handler(void)
246 /* Run through the list of tick tasks */
247 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
258 void tick_start(unsigned int interval_in_ms
)
260 TIMER0
.ctrl
&= ~0x80; /* Disable the counter */
261 TIMER0
.ctrl
|= 0x40; /* Reload after counting down to zero */
262 TIMER0
.load
= 3000000 * interval_in_ms
/ 1000;
263 TIMER0
.ctrl
&= ~0xc; /* No prescaler */
264 TIMER0
.clr
= 1; /* Clear the interrupt request */
266 irq_set_int_handler(IRQ_TIMER0
, timer_handler
);
267 irq_enable_int(IRQ_TIMER0
);
269 TIMER0
.ctrl
|= 0x80; /* Enable the counter */
273 int tick_add_task(void (*f
)(void))
276 int oldlevel
= disable_irq_save();
278 /* Add a task if there is room */
279 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
281 if(tick_funcs
[i
] == NULL
)
284 restore_irq(oldlevel
);
288 restore_irq(oldlevel
);
289 panicf("Error! tick_add_task(): out of tasks");
293 int tick_remove_task(void (*f
)(void))
296 int oldlevel
= disable_irq_save();
298 /* Remove a task if it is there */
299 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
301 if(tick_funcs
[i
] == f
)
303 tick_funcs
[i
] = NULL
;
304 restore_irq(oldlevel
);
309 restore_irq(oldlevel
);
313 /****************************************************************************
314 * Tick-based interval timers/one-shots - be mindful this is not really
315 * intended for continuous timers but for events that need to run for a short
316 * time and be cancelled without further software intervention.
317 ****************************************************************************/
318 #ifdef INCLUDE_TIMEOUT_API
319 static struct timeout
*tmo_list
= NULL
; /* list of active timeout events */
321 /* timeout tick task - calls event handlers when they expire
322 * Event handlers may alter ticks, callback and data during operation.
324 static void timeout_tick(void)
326 unsigned long tick
= current_tick
;
327 struct timeout
*curr
, *next
;
329 for (curr
= tmo_list
; curr
!= NULL
; curr
= next
)
331 next
= (struct timeout
*)curr
->next
;
333 if (TIME_BEFORE(tick
, curr
->expires
))
336 /* this event has expired - call callback */
337 if (curr
->callback(curr
))
338 *(long *)&curr
->expires
= tick
+ curr
->ticks
; /* reload */
340 timeout_cancel(curr
); /* cancel */
344 /* Cancels a timeout callback - can be called from the ISR */
345 void timeout_cancel(struct timeout
*tmo
)
347 int oldlevel
= disable_irq_save();
349 if (tmo_list
!= NULL
)
351 struct timeout
*curr
= tmo_list
;
352 struct timeout
*prev
= NULL
;
354 while (curr
!= tmo
&& curr
!= NULL
)
357 curr
= (struct timeout
*)curr
->next
;
364 tmo_list
= (struct timeout
*)curr
->next
;
366 *(const struct timeout
**)&prev
->next
= curr
->next
;
368 if (tmo_list
== NULL
)
369 tick_remove_task(timeout_tick
); /* last one - remove task */
371 /* not in list or tmo == NULL */
374 restore_irq(oldlevel
);
377 /* Adds a timeout callback - calling with an active timeout resets the
378 interval - can be called from the ISR */
379 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
380 int ticks
, intptr_t data
)
383 struct timeout
*curr
;
388 oldlevel
= disable_irq_save();
390 /* see if this one is already registered */
392 while (curr
!= tmo
&& curr
!= NULL
)
393 curr
= (struct timeout
*)curr
->next
;
397 /* not found - add it */
398 if (tmo_list
== NULL
)
399 tick_add_task(timeout_tick
); /* first one - add task */
401 *(struct timeout
**)&tmo
->next
= tmo_list
;
405 tmo
->callback
= callback
;
408 *(long *)&tmo
->expires
= current_tick
+ ticks
;
410 restore_irq(oldlevel
);
413 #endif /* INCLUDE_TIMEOUT_API */
415 /****************************************************************************
417 ****************************************************************************/
418 void sleep(int ticks
)
420 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
421 volatile int counter
;
422 TCON
&= ~(1 << 20); // stop timer 4
423 // TODO: this constant depends on dividers settings inherited from
424 // firmware. Set them explicitly somwhere.
425 TCNTB4
= 12193 * ticks
/ HZ
;
426 TCON
|= 1 << 21; // set manual bit
427 TCON
&= ~(1 << 21); // reset manual bit
428 TCON
&= ~(1 << 22); //autoreload Off
429 TCON
|= (1 << 20); // start timer 4
432 } while(counter
> 0);
434 #elif defined(CPU_PP) && defined(BOOTLOADER)
435 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
436 while (TIME_BEFORE(USEC_TIMER
, stop
))
438 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
440 long sleep_ticks
= current_tick
+ ticks
+ 1;
441 while (sleep_ticks
> current_tick
)
452 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
453 /* Some targets don't like yielding in the bootloader */
459 /****************************************************************************
460 * Queue handling stuff
461 ****************************************************************************/
463 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
464 /****************************************************************************
465 * Sender thread queue structure that aids implementation of priority
466 * inheritance on queues because the send list structure is the same as
467 * for all other kernel objects:
470 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
471 * E3 was posted with queue_post
472 * 4 events remain enqueued (E1-E4)
475 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
476 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
478 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
479 * q->send->curr_sender: /\
481 * Thread has E0 in its own struct queue_event.
483 ****************************************************************************/
485 /* Puts the specified return value in the waiting thread's return value
486 * and wakes the thread.
488 * A sender should be confirmed to exist before calling which makes it
489 * more efficent to reject the majority of cases that don't need this
492 static void queue_release_sender(struct thread_entry
**sender
,
495 struct thread_entry
*thread
= *sender
;
497 *sender
= NULL
; /* Clear slot. */
498 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
499 thread
->retval
= retval
; /* Assign thread-local return value. */
500 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
501 wakeup_thread wakes the first thread in
503 wakeup_thread(thread
->bqp
);
506 /* Releases any waiting threads that are queued with queue_send -
509 static void queue_release_all_senders(struct event_queue
*q
)
514 for(i
= q
->read
; i
!= q
->write
; i
++)
516 struct thread_entry
**spp
=
517 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
521 queue_release_sender(spp
, 0);
527 /* Callback to do extra forced removal steps from sender list in addition
528 * to the normal blocking queue removal and priority dis-inherit */
529 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
531 *((struct thread_entry
**)thread
->retval
) = NULL
;
532 thread
->wakeup_ext_cb
= NULL
;
536 /* Enables queue_send on the specified queue - caller allocates the extra
537 * data structure. Only queues which are taken to be owned by a thread should
538 * enable this however an official owner is not compulsory but must be
539 * specified for priority inheritance to operate.
541 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
542 * messages results in an undefined order of message replies.
544 void queue_enable_queue_send(struct event_queue
*q
,
545 struct queue_sender_list
*send
,
546 struct thread_entry
*owner
)
548 int oldlevel
= disable_irq_save();
549 corelock_lock(&q
->cl
);
551 if(send
!= NULL
&& q
->send
== NULL
)
553 memset(send
, 0, sizeof(*send
));
554 #ifdef HAVE_PRIORITY_SCHEDULING
555 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
556 send
->blocker
.priority
= PRIORITY_IDLE
;
557 send
->blocker
.thread
= owner
;
559 q
->blocker_p
= &send
->blocker
;
564 corelock_unlock(&q
->cl
);
565 restore_irq(oldlevel
);
570 /* Unblock a blocked thread at a given event index */
571 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
576 struct thread_entry
**spp
= &send
->senders
[i
];
580 queue_release_sender(spp
, 0);
585 /* Perform the auto-reply sequence */
586 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
588 if(send
&& send
->curr_sender
)
591 queue_release_sender(&send
->curr_sender
, 0);
595 /* Moves waiting thread's refrence from the senders array to the
596 * current_sender which represents the thread waiting for a reponse to the
597 * last message removed from the queue. This also protects the thread from
598 * being bumped due to overflow which would not be a valid action since its
599 * message _is_ being processed at this point. */
600 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
605 struct thread_entry
**spp
= &send
->senders
[rd
];
609 /* Move thread reference from array to the next thread
610 that queue_reply will release */
611 send
->curr_sender
= *spp
;
612 (*spp
)->retval
= (intptr_t)spp
;
615 /* else message was posted asynchronously with queue_post */
619 /* Empty macros for when synchoronous sending is not made */
620 #define queue_release_all_senders(q)
621 #define queue_do_unblock_sender(send, i)
622 #define queue_do_auto_reply(send)
623 #define queue_do_fetch_sender(send, rd)
624 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
626 /* Queue must not be available for use during this call */
627 void queue_init(struct event_queue
*q
, bool register_queue
)
629 int oldlevel
= disable_irq_save();
633 corelock_lock(&all_queues
.cl
);
636 corelock_init(&q
->cl
);
640 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
641 q
->send
= NULL
; /* No message sending by default */
642 IF_PRIO( q
->blocker_p
= NULL
; )
647 if(all_queues
.count
>= MAX_NUM_QUEUES
)
649 panicf("queue_init->out of queues");
651 /* Add it to the all_queues array */
652 all_queues
.queues
[all_queues
.count
++] = q
;
653 corelock_unlock(&all_queues
.cl
);
656 restore_irq(oldlevel
);
659 /* Queue must not be available for use during this call */
660 void queue_delete(struct event_queue
*q
)
665 oldlevel
= disable_irq_save();
666 corelock_lock(&all_queues
.cl
);
667 corelock_lock(&q
->cl
);
669 /* Find the queue to be deleted */
670 for(i
= 0;i
< all_queues
.count
;i
++)
672 if(all_queues
.queues
[i
] == q
)
674 /* Move the following queues up in the list */
677 for(;i
< all_queues
.count
;i
++)
679 all_queues
.queues
[i
] = all_queues
.queues
[i
+1];
686 corelock_unlock(&all_queues
.cl
);
688 /* Release thread(s) waiting on queue head */
689 thread_queue_wake(&q
->queue
);
691 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
694 /* Release threads waiting for replies */
695 queue_release_all_senders(q
);
697 /* Reply to any dequeued message waiting for one */
698 queue_do_auto_reply(q
->send
);
701 IF_PRIO( q
->blocker_p
= NULL
; )
708 corelock_unlock(&q
->cl
);
709 restore_irq(oldlevel
);
712 /* NOTE: multiple threads waiting on a queue head cannot have a well-
713 defined release order if timeouts are used. If multiple threads must
714 access the queue head, use a dispatcher or queue_wait only. */
715 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
720 #ifdef HAVE_PRIORITY_SCHEDULING
721 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
722 QUEUE_GET_THREAD(q
) == thread_get_current(),
723 "queue_wait->wrong thread\n");
726 oldlevel
= disable_irq_save();
727 corelock_lock(&q
->cl
);
730 queue_do_auto_reply(q
->send
);
732 if (q
->read
== q
->write
)
734 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
738 IF_COP( current
->obj_cl
= &q
->cl
; )
739 current
->bqp
= &q
->queue
;
741 block_thread(current
);
743 corelock_unlock(&q
->cl
);
746 oldlevel
= disable_irq_save();
747 corelock_lock(&q
->cl
);
749 /* A message that woke us could now be gone */
750 while (q
->read
== q
->write
);
753 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
756 /* Get data for a waiting thread if one */
757 queue_do_fetch_sender(q
->send
, rd
);
759 corelock_unlock(&q
->cl
);
760 restore_irq(oldlevel
);
763 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
767 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
768 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
769 QUEUE_GET_THREAD(q
) == thread_get_current(),
770 "queue_wait_w_tmo->wrong thread\n");
773 oldlevel
= disable_irq_save();
774 corelock_lock(&q
->cl
);
777 queue_do_auto_reply(q
->send
);
779 if (q
->read
== q
->write
&& ticks
> 0)
781 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
783 IF_COP( current
->obj_cl
= &q
->cl
; )
784 current
->bqp
= &q
->queue
;
786 block_thread_w_tmo(current
, ticks
);
787 corelock_unlock(&q
->cl
);
791 oldlevel
= disable_irq_save();
792 corelock_lock(&q
->cl
);
795 /* no worry about a removed message here - status is checked inside
796 locks - perhaps verify if timeout or false alarm */
797 if (q
->read
!= q
->write
)
799 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
801 /* Get data for a waiting thread if one */
802 queue_do_fetch_sender(q
->send
, rd
);
806 ev
->id
= SYS_TIMEOUT
;
809 corelock_unlock(&q
->cl
);
810 restore_irq(oldlevel
);
813 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
818 oldlevel
= disable_irq_save();
819 corelock_lock(&q
->cl
);
821 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
823 q
->events
[wr
].id
= id
;
824 q
->events
[wr
].data
= data
;
826 /* overflow protect - unblock any thread waiting at this index */
827 queue_do_unblock_sender(q
->send
, wr
);
829 /* Wakeup a waiting thread if any */
830 wakeup_thread(&q
->queue
);
832 corelock_unlock(&q
->cl
);
833 restore_irq(oldlevel
);
836 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
837 /* IRQ handlers are not allowed use of this function - we only aim to
838 protect the queue integrity by turning them off. */
839 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
844 oldlevel
= disable_irq_save();
845 corelock_lock(&q
->cl
);
847 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
849 q
->events
[wr
].id
= id
;
850 q
->events
[wr
].data
= data
;
854 struct queue_sender_list
*send
= q
->send
;
855 struct thread_entry
**spp
= &send
->senders
[wr
];
856 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
860 /* overflow protect - unblock any thread waiting at this index */
861 queue_release_sender(spp
, 0);
864 /* Wakeup a waiting thread if any */
865 wakeup_thread(&q
->queue
);
867 /* Save thread in slot, add to list and wait for reply */
869 IF_COP( current
->obj_cl
= &q
->cl
; )
870 IF_PRIO( current
->blocker
= q
->blocker_p
; )
871 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
872 current
->retval
= (intptr_t)spp
;
873 current
->bqp
= &send
->list
;
875 block_thread(current
);
877 corelock_unlock(&q
->cl
);
880 return current
->retval
;
883 /* Function as queue_post if sending is not enabled */
884 wakeup_thread(&q
->queue
);
886 corelock_unlock(&q
->cl
);
887 restore_irq(oldlevel
);
892 #if 0 /* not used now but probably will be later */
893 /* Query if the last message dequeued was added by queue_send or not */
894 bool queue_in_queue_send(struct event_queue
*q
)
899 int oldlevel
= disable_irq_save();
900 corelock_lock(&q
->cl
);
903 in_send
= q
->send
&& q
->send
->curr_sender
;
906 corelock_unlock(&q
->cl
);
907 restore_irq(oldlevel
);
914 /* Replies with retval to the last dequeued message sent with queue_send */
915 void queue_reply(struct event_queue
*q
, intptr_t retval
)
917 if(q
->send
&& q
->send
->curr_sender
)
919 int oldlevel
= disable_irq_save();
920 corelock_lock(&q
->cl
);
921 /* Double-check locking */
922 IF_COP( if(q
->send
&& q
->send
->curr_sender
) )
924 queue_release_sender(&q
->send
->curr_sender
, retval
);
927 corelock_unlock(&q
->cl
);
928 restore_irq(oldlevel
);
932 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
934 if(q
->read
== q
->write
)
937 bool have_msg
= false;
939 int oldlevel
= disable_irq_save();
940 corelock_lock(&q
->cl
);
942 if(q
->read
!= q
->write
)
944 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
948 corelock_unlock(&q
->cl
);
949 restore_irq(oldlevel
);
953 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
955 /* Poll queue to see if a message exists - careful in using the result if
956 * queue_remove_from_head is called when messages are posted - possibly use
957 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
958 * unsignals the queue may cause an unwanted block */
959 bool queue_empty(const struct event_queue
* q
)
961 return ( q
->read
== q
->write
);
964 void queue_clear(struct event_queue
* q
)
968 oldlevel
= disable_irq_save();
969 corelock_lock(&q
->cl
);
971 /* Release all threads waiting in the queue for a reply -
972 dequeued sent message will be handled by owning thread */
973 queue_release_all_senders(q
);
978 corelock_unlock(&q
->cl
);
979 restore_irq(oldlevel
);
982 void queue_remove_from_head(struct event_queue
*q
, long id
)
986 oldlevel
= disable_irq_save();
987 corelock_lock(&q
->cl
);
989 while(q
->read
!= q
->write
)
991 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
993 if(q
->events
[rd
].id
!= id
)
998 /* Release any thread waiting on this message */
999 queue_do_unblock_sender(q
->send
, rd
);
1004 corelock_unlock(&q
->cl
);
1005 restore_irq(oldlevel
);
1009 * The number of events waiting in the queue.
1011 * @param struct of event_queue
1012 * @return number of events in the queue
1014 int queue_count(const struct event_queue
*q
)
1016 return q
->write
- q
->read
;
1019 int queue_broadcast(long id
, intptr_t data
)
1024 int oldlevel
= disable_irq_save();
1025 corelock_lock(&all_queues
.cl
);
1028 for(i
= 0;i
< all_queues
.count
;i
++)
1030 queue_post(all_queues
.queues
[i
], id
, data
);
1034 corelock_unlock(&all_queues
.cl
);
1035 restore_irq(oldlevel
);
1041 /****************************************************************************
1042 * Simple mutex functions ;)
1043 ****************************************************************************/
1045 /* Initialize a mutex object - call before any use and do not call again once
1046 * the object is available to other threads */
1047 void mutex_init(struct mutex
*m
)
1049 corelock_init(&m
->cl
);
1053 MUTEX_SET_THREAD(m
, NULL
);
1054 #ifdef HAVE_PRIORITY_SCHEDULING
1055 m
->blocker
.priority
= PRIORITY_IDLE
;
1056 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
1057 m
->no_preempt
= false;
1061 /* Gain ownership of a mutex object or block until it becomes free */
1062 void mutex_lock(struct mutex
*m
)
1064 const unsigned int core
= CURRENT_CORE
;
1065 struct thread_entry
*current
= cores
[core
].running
;
1067 if(current
== MUTEX_GET_THREAD(m
))
1069 /* current thread already owns this mutex */
1074 /* lock out other cores */
1075 corelock_lock(&m
->cl
);
1080 MUTEX_SET_THREAD(m
, current
);
1082 corelock_unlock(&m
->cl
);
1086 /* block until the lock is open... */
1087 IF_COP( current
->obj_cl
= &m
->cl
; )
1088 IF_PRIO( current
->blocker
= &m
->blocker
; )
1089 current
->bqp
= &m
->queue
;
1092 block_thread(current
);
1094 corelock_unlock(&m
->cl
);
1096 /* ...and turn control over to next thread */
1100 /* Release ownership of a mutex object - only owning thread must call this */
1101 void mutex_unlock(struct mutex
*m
)
1103 /* unlocker not being the owner is an unlocking violation */
1104 KERNEL_ASSERT(MUTEX_GET_THREAD(m
) == thread_get_current(),
1105 "mutex_unlock->wrong thread (%s != %s)\n",
1106 MUTEX_GET_THREAD(m
)->name
,
1107 thread_get_current()->name
);
1111 /* this thread still owns lock */
1116 /* lock out other cores */
1117 corelock_lock(&m
->cl
);
1119 /* transfer to next queued thread if any */
1120 if(m
->queue
== NULL
)
1122 /* no threads waiting - open the lock */
1123 MUTEX_SET_THREAD(m
, NULL
);
1125 corelock_unlock(&m
->cl
);
1130 const int oldlevel
= disable_irq_save();
1131 /* Tranfer of owning thread is handled in the wakeup protocol
1132 * if priorities are enabled otherwise just set it from the
1134 IFN_PRIO( MUTEX_SET_THREAD(m
, m
->queue
); )
1135 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
1136 restore_irq(oldlevel
);
1138 corelock_unlock(&m
->cl
);
1140 #ifdef HAVE_PRIORITY_SCHEDULING
1141 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
1147 /****************************************************************************
1148 * Simpl-er mutex functions ;)
1149 ****************************************************************************/
1151 void spinlock_init(struct spinlock
*l
)
1153 corelock_init(&l
->cl
);
1158 void spinlock_lock(struct spinlock
*l
)
1160 const unsigned int core
= CURRENT_CORE
;
1161 struct thread_entry
*current
= cores
[core
].running
;
1163 if(l
->thread
== current
)
1165 /* current core already owns it */
1170 /* lock against other processor cores */
1171 corelock_lock(&l
->cl
);
1173 /* take ownership */
1174 l
->thread
= current
;
1177 void spinlock_unlock(struct spinlock
*l
)
1179 /* unlocker not being the owner is an unlocking violation */
1180 KERNEL_ASSERT(l
->thread
== thread_get_current(),
1181 "spinlock_unlock->wrong thread\n");
1185 /* this core still owns lock */
1194 corelock_unlock(&l
->cl
);
1196 #endif /* NUM_CORES > 1 */
1198 /****************************************************************************
1199 * Simple semaphore functions ;)
1200 ****************************************************************************/
1201 #ifdef HAVE_SEMAPHORE_OBJECTS
1202 void semaphore_init(struct semaphore
*s
, int max
, int start
)
1204 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
1205 "semaphore_init->inv arg\n");
1209 corelock_init(&s
->cl
);
1212 void semaphore_wait(struct semaphore
*s
)
1214 struct thread_entry
*current
;
1216 corelock_lock(&s
->cl
);
1220 /* wait satisfied */
1221 corelock_unlock(&s
->cl
);
1225 /* too many waits - block until dequeued... */
1226 current
= cores
[CURRENT_CORE
].running
;
1228 IF_COP( current
->obj_cl
= &s
->cl
; )
1229 current
->bqp
= &s
->queue
;
1232 block_thread(current
);
1234 corelock_unlock(&s
->cl
);
1236 /* ...and turn control over to next thread */
1240 void semaphore_release(struct semaphore
*s
)
1242 IF_PRIO( unsigned int result
= THREAD_NONE
; )
1244 corelock_lock(&s
->cl
);
1246 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1248 /* there should be threads in this queue */
1249 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1250 /* a thread was queued - wake it up */
1251 int oldlevel
= disable_irq_save();
1252 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1253 restore_irq(oldlevel
);
1256 corelock_unlock(&s
->cl
);
1258 #ifdef HAVE_PRIORITY_SCHEDULING
1259 if(result
& THREAD_SWITCH
)
1263 #endif /* HAVE_SEMAPHORE_OBJECTS */
1265 /****************************************************************************
1266 * Simple event functions ;)
1267 ****************************************************************************/
1268 #ifdef HAVE_EVENT_OBJECTS
1269 void event_init(struct event
*e
, unsigned int flags
)
1271 e
->queues
[STATE_NONSIGNALED
] = NULL
;
1272 e
->queues
[STATE_SIGNALED
] = NULL
;
1273 e
->state
= flags
& STATE_SIGNALED
;
1274 e
->automatic
= (flags
& EVENT_AUTOMATIC
) ? 1 : 0;
1275 corelock_init(&e
->cl
);
1278 void event_wait(struct event
*e
, unsigned int for_state
)
1280 struct thread_entry
*current
;
1282 corelock_lock(&e
->cl
);
1284 if(e
->automatic
!= 0)
1286 /* wait for false always satisfied by definition
1287 or if it just changed to false */
1288 if(e
->state
== STATE_SIGNALED
|| for_state
== STATE_NONSIGNALED
)
1290 /* automatic - unsignal */
1291 e
->state
= STATE_NONSIGNALED
;
1292 corelock_unlock(&e
->cl
);
1295 /* block until state matches */
1297 else if(for_state
== e
->state
)
1299 /* the state being waited for is the current state */
1300 corelock_unlock(&e
->cl
);
1304 /* block until state matches what callers requests */
1305 current
= cores
[CURRENT_CORE
].running
;
1307 IF_COP( current
->obj_cl
= &e
->cl
; )
1308 current
->bqp
= &e
->queues
[for_state
];
1311 block_thread(current
);
1313 corelock_unlock(&e
->cl
);
1315 /* turn control over to next thread */
1319 void event_set_state(struct event
*e
, unsigned int state
)
1321 unsigned int result
;
1324 corelock_lock(&e
->cl
);
1326 if(e
->state
== state
)
1329 corelock_unlock(&e
->cl
);
1333 IF_PRIO( result
= THREAD_OK
; )
1335 oldlevel
= disable_irq_save();
1337 if(state
== STATE_SIGNALED
)
1339 if(e
->automatic
!= 0)
1341 /* no thread should have ever blocked for nonsignaled */
1342 KERNEL_ASSERT(e
->queues
[STATE_NONSIGNALED
] == NULL
,
1343 "set_event_state->queue[NS]:S\n");
1344 /* pass to next thread and keep unsignaled - "pulse" */
1345 result
= wakeup_thread(&e
->queues
[STATE_SIGNALED
]);
1346 e
->state
= (result
& THREAD_OK
) ? STATE_NONSIGNALED
: STATE_SIGNALED
;
1350 /* release all threads waiting for signaled */
1351 e
->state
= STATE_SIGNALED
;
1353 thread_queue_wake(&e
->queues
[STATE_SIGNALED
]);
1358 /* release all threads waiting for nonsignaled */
1360 /* no thread should have ever blocked if automatic */
1361 KERNEL_ASSERT(e
->queues
[STATE_NONSIGNALED
] == NULL
||
1362 e
->automatic
== 0, "set_event_state->queue[NS]:NS\n");
1364 e
->state
= STATE_NONSIGNALED
;
1366 thread_queue_wake(&e
->queues
[STATE_NONSIGNALED
]);
1369 restore_irq(oldlevel
);
1371 corelock_unlock(&e
->cl
);
1373 #ifdef HAVE_PRIORITY_SCHEDULING
1374 if(result
& THREAD_SWITCH
)
1378 #endif /* HAVE_EVENT_OBJECTS */
1381 #ifdef HAVE_WAKEUP_OBJECTS
1382 /****************************************************************************
1383 * Lightweight IRQ-compatible wakeup object
1386 /* Initialize the wakeup object */
1387 void wakeup_init(struct wakeup
*w
)
1391 IF_COP( corelock_init(&w
->cl
); )
1394 /* Wait for a signal blocking indefinitely or for a specified period */
1395 int wakeup_wait(struct wakeup
*w
, int timeout
)
1397 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1398 int oldlevel
= disable_irq_save();
1400 corelock_lock(&w
->cl
);
1402 if(w
->signalled
== 0 && timeout
!= TIMEOUT_NOBLOCK
)
1404 struct thread_entry
* current
= cores
[CURRENT_CORE
].running
;
1406 IF_COP( current
->obj_cl
= &w
->cl
; )
1407 current
->bqp
= &w
->queue
;
1409 if (timeout
!= TIMEOUT_BLOCK
)
1410 block_thread_w_tmo(current
, timeout
);
1412 block_thread(current
);
1414 corelock_unlock(&w
->cl
);
1417 oldlevel
= disable_irq_save();
1418 corelock_lock(&w
->cl
);
1421 if(w
->signalled
== 0)
1423 /* Timed-out or failed */
1424 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1425 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1428 w
->signalled
= 0; /* Reset */
1430 corelock_unlock(&w
->cl
);
1431 restore_irq(oldlevel
);
1436 /* Signal the thread waiting or leave the signal if the thread hasn't
1439 * returns THREAD_NONE or THREAD_OK
1441 int wakeup_signal(struct wakeup
*w
)
1443 int oldlevel
= disable_irq_save();
1446 corelock_lock(&w
->cl
);
1449 ret
= wakeup_thread(&w
->queue
);
1451 corelock_unlock(&w
->cl
);
1452 restore_irq(oldlevel
);
1456 #endif /* HAVE_WAKEUP_OBJECTS */