1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
24 #include "system-sdl.h"
32 /* Make this nonzero to enable more elaborate checks on objects */
33 #if defined(DEBUG) || defined(SIMULATOR)
34 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
36 #define KERNEL_OBJECT_CHECKS 0
39 #if KERNEL_OBJECT_CHECKS
41 #define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
44 #define KERNEL_ASSERT(exp, msg...) \
45 ({ if (!({ exp; })) panicf(msg); })
48 #define KERNEL_ASSERT(exp, msg...) ({})
51 #if !defined(CPU_PP) || !defined(BOOTLOADER)
52 volatile long current_tick SHAREDDATA_ATTR
= 0;
55 void (*tick_funcs
[MAX_NUM_TICK_TASKS
])(void);
57 extern struct core_entry cores
[NUM_CORES
];
59 /* This array holds all queues that are initiated. It is used for broadcast. */
63 struct event_queue
*queues
[MAX_NUM_QUEUES
];
64 IF_COP( struct corelock cl
; )
65 } all_queues SHAREDBSS_ATTR
;
67 /****************************************************************************
68 * Standard kernel stuff
69 ****************************************************************************/
70 void kernel_init(void)
72 /* Init the threading API */
75 /* Other processors will not reach this point in a multicore build.
76 * In a single-core build with multiple cores they fall-through and
77 * sleep in cop_main without returning. */
78 if (CURRENT_CORE
== CPU
)
80 memset(tick_funcs
, 0, sizeof(tick_funcs
));
81 memset(&all_queues
, 0, sizeof(all_queues
));
82 corelock_init(&all_queues
.cl
);
90 /****************************************************************************
92 ****************************************************************************/
93 #if CONFIG_CPU == SH7034
94 void tick_start(unsigned int interval_in_ms
)
98 count
= CPU_FREQ
* interval_in_ms
/ 1000 / 8;
102 panicf("Error! The tick interval is too long (%d ms)\n",
107 /* We are using timer 0 */
109 TSTR
&= ~0x01; /* Stop the timer */
110 TSNC
&= ~0x01; /* No synchronization */
111 TMDR
&= ~0x01; /* Operate normally */
113 TCNT0
= 0; /* Start counting at 0 */
114 GRA0
= (unsigned short)(count
- 1);
115 TCR0
= 0x23; /* Clear at GRA match, sysclock/8 */
117 /* Enable interrupt on level 1 */
118 IPRC
= (IPRC
& ~0x00f0) | 0x0010;
121 TIER0
= 0xf9; /* Enable GRA match interrupt */
123 TSTR
|= 0x01; /* Start timer 1 */
126 void IMIA0(void) __attribute__ ((interrupt_handler
));
131 /* Run through the list of tick tasks */
132 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
144 #elif defined(CPU_COLDFIRE)
145 void tick_start(unsigned int interval_in_ms
)
150 count
= CPU_FREQ
/2 * interval_in_ms
/ 1000 / 16;
154 panicf("Error! The tick interval is too long (%d ms)\n",
159 prescale
= cpu_frequency
/ CPU_FREQ
;
160 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
161 changes within timer.c */
163 /* We are using timer 0 */
165 TRR0
= (unsigned short)(count
- 1); /* The reference count */
166 TCN0
= 0; /* reset the timer */
167 TMR0
= 0x001d | ((unsigned short)(prescale
- 1) << 8);
168 /* restart, CLK/16, enabled, prescaler */
170 TER0
= 0xff; /* Clear all events */
172 ICR1
= 0x8c; /* Interrupt on level 3.0 */
176 void TIMER0(void) __attribute__ ((interrupt_handler
));
181 /* Run through the list of tick tasks */
182 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
192 TER0
= 0xff; /* Clear all events */
195 #elif defined(CPU_PP)
202 /* Run through the list of tick tasks (using main core) */
203 TIMER1_VAL
; /* Read value to ack IRQ */
205 /* Run through the list of tick tasks using main CPU core -
206 wake up the COP through its control interface to provide pulse */
207 for (i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
218 #endif /* NUM_CORES */
224 /* Must be last function called init kernel/thread initialization */
225 void tick_start(unsigned int interval_in_ms
)
231 TIMER1_CFG
= 0xc0000000 | (interval_in_ms
*1000 - 1);
232 /* unmask interrupt source */
233 CPU_INT_EN
= TIMER1_MASK
;
235 /* We don't enable interrupts in the bootloader */
236 (void)interval_in_ms
;
240 #elif CONFIG_CPU == PNX0101
242 void timer_handler(void)
246 /* Run through the list of tick tasks */
247 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
258 void tick_start(unsigned int interval_in_ms
)
260 TIMER0
.ctrl
&= ~0x80; /* Disable the counter */
261 TIMER0
.ctrl
|= 0x40; /* Reload after counting down to zero */
262 TIMER0
.load
= 3000000 * interval_in_ms
/ 1000;
263 TIMER0
.ctrl
&= ~0xc; /* No prescaler */
264 TIMER0
.clr
= 1; /* Clear the interrupt request */
266 irq_set_int_handler(IRQ_TIMER0
, timer_handler
);
267 irq_enable_int(IRQ_TIMER0
);
269 TIMER0
.ctrl
|= 0x80; /* Enable the counter */
273 int tick_add_task(void (*f
)(void))
276 int oldlevel
= disable_irq_save();
278 /* Add a task if there is room */
279 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
281 if(tick_funcs
[i
] == NULL
)
284 restore_irq(oldlevel
);
288 restore_irq(oldlevel
);
289 panicf("Error! tick_add_task(): out of tasks");
293 int tick_remove_task(void (*f
)(void))
296 int oldlevel
= disable_irq_save();
298 /* Remove a task if it is there */
299 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
301 if(tick_funcs
[i
] == f
)
303 tick_funcs
[i
] = NULL
;
304 restore_irq(oldlevel
);
309 restore_irq(oldlevel
);
313 /****************************************************************************
314 * Tick-based interval timers/one-shots - be mindful this is not really
315 * intended for continuous timers but for events that need to run for a short
316 * time and be cancelled without further software intervention.
317 ****************************************************************************/
318 #ifdef INCLUDE_TIMEOUT_API
319 static struct timeout
*tmo_list
= NULL
; /* list of active timeout events */
321 /* timeout tick task - calls event handlers when they expire
322 * Event handlers may alter ticks, callback and data during operation.
324 static void timeout_tick(void)
326 unsigned long tick
= current_tick
;
327 struct timeout
*curr
, *next
;
329 for (curr
= tmo_list
; curr
!= NULL
; curr
= next
)
331 next
= (struct timeout
*)curr
->next
;
333 if (TIME_BEFORE(tick
, curr
->expires
))
336 /* this event has expired - call callback */
337 if (curr
->callback(curr
))
338 *(long *)&curr
->expires
= tick
+ curr
->ticks
; /* reload */
340 timeout_cancel(curr
); /* cancel */
344 /* Cancels a timeout callback - can be called from the ISR */
345 void timeout_cancel(struct timeout
*tmo
)
347 int oldlevel
= disable_irq_save();
349 if (tmo_list
!= NULL
)
351 struct timeout
*curr
= tmo_list
;
352 struct timeout
*prev
= NULL
;
354 while (curr
!= tmo
&& curr
!= NULL
)
357 curr
= (struct timeout
*)curr
->next
;
364 tmo_list
= (struct timeout
*)curr
->next
;
366 *(const struct timeout
**)&prev
->next
= curr
->next
;
368 if (tmo_list
== NULL
)
369 tick_remove_task(timeout_tick
); /* last one - remove task */
371 /* not in list or tmo == NULL */
374 restore_irq(oldlevel
);
377 /* Adds a timeout callback - calling with an active timeout resets the
378 interval - can be called from the ISR */
379 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
380 int ticks
, intptr_t data
)
383 struct timeout
*curr
;
388 oldlevel
= disable_irq_save();
390 /* see if this one is already registered */
392 while (curr
!= tmo
&& curr
!= NULL
)
393 curr
= (struct timeout
*)curr
->next
;
397 /* not found - add it */
398 if (tmo_list
== NULL
)
399 tick_add_task(timeout_tick
); /* first one - add task */
401 *(struct timeout
**)&tmo
->next
= tmo_list
;
405 tmo
->callback
= callback
;
408 *(long *)&tmo
->expires
= current_tick
+ ticks
;
410 restore_irq(oldlevel
);
413 #endif /* INCLUDE_TIMEOUT_API */
415 /****************************************************************************
417 ****************************************************************************/
418 void sleep(int ticks
)
420 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
421 volatile int counter
;
422 TCON
&= ~(1 << 20); // stop timer 4
423 // TODO: this constant depends on dividers settings inherited from
424 // firmware. Set them explicitly somwhere.
425 TCNTB4
= 12193 * ticks
/ HZ
;
426 TCON
|= 1 << 21; // set manual bit
427 TCON
&= ~(1 << 21); // reset manual bit
428 TCON
&= ~(1 << 22); //autoreload Off
429 TCON
|= (1 << 20); // start timer 4
432 } while(counter
> 0);
434 #elif defined(CPU_PP) && defined(BOOTLOADER)
435 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
436 while (TIME_BEFORE(USEC_TIMER
, stop
))
447 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
448 /* Some targets don't like yielding in the bootloader */
454 /****************************************************************************
455 * Queue handling stuff
456 ****************************************************************************/
458 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
459 /****************************************************************************
460 * Sender thread queue structure that aids implementation of priority
461 * inheritance on queues because the send list structure is the same as
462 * for all other kernel objects:
465 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
466 * E3 was posted with queue_post
467 * 4 events remain enqueued (E1-E4)
470 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
471 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
473 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
474 * q->send->curr_sender: /\
476 * Thread has E0 in its own struct queue_event.
478 ****************************************************************************/
480 /* Puts the specified return value in the waiting thread's return value
481 * and wakes the thread.
483 * A sender should be confirmed to exist before calling which makes it
484 * more efficent to reject the majority of cases that don't need this
487 static void queue_release_sender(struct thread_entry
**sender
,
490 struct thread_entry
*thread
= *sender
;
492 *sender
= NULL
; /* Clear slot. */
493 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
494 thread
->retval
= retval
; /* Assign thread-local return value. */
495 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
496 wakeup_thread wakes the first thread in
498 wakeup_thread(thread
->bqp
);
501 /* Releases any waiting threads that are queued with queue_send -
504 static void queue_release_all_senders(struct event_queue
*q
)
509 for(i
= q
->read
; i
!= q
->write
; i
++)
511 struct thread_entry
**spp
=
512 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
516 queue_release_sender(spp
, 0);
522 /* Callback to do extra forced removal steps from sender list in addition
523 * to the normal blocking queue removal and priority dis-inherit */
524 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
526 *((struct thread_entry
**)thread
->retval
) = NULL
;
527 thread
->wakeup_ext_cb
= NULL
;
531 /* Enables queue_send on the specified queue - caller allocates the extra
532 * data structure. Only queues which are taken to be owned by a thread should
533 * enable this however an official owner is not compulsory but must be
534 * specified for priority inheritance to operate.
536 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
537 * messages results in an undefined order of message replies.
539 void queue_enable_queue_send(struct event_queue
*q
,
540 struct queue_sender_list
*send
,
541 struct thread_entry
*owner
)
543 int oldlevel
= disable_irq_save();
544 corelock_lock(&q
->cl
);
546 if(send
!= NULL
&& q
->send
== NULL
)
548 memset(send
, 0, sizeof(*send
));
549 #ifdef HAVE_PRIORITY_SCHEDULING
550 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
551 send
->blocker
.priority
= PRIORITY_IDLE
;
552 send
->blocker
.thread
= owner
;
554 q
->blocker_p
= &send
->blocker
;
559 corelock_unlock(&q
->cl
);
560 restore_irq(oldlevel
);
565 /* Unblock a blocked thread at a given event index */
566 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
571 struct thread_entry
**spp
= &send
->senders
[i
];
575 queue_release_sender(spp
, 0);
580 /* Perform the auto-reply sequence */
581 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
583 if(send
&& send
->curr_sender
)
586 queue_release_sender(&send
->curr_sender
, 0);
590 /* Moves waiting thread's refrence from the senders array to the
591 * current_sender which represents the thread waiting for a reponse to the
592 * last message removed from the queue. This also protects the thread from
593 * being bumped due to overflow which would not be a valid action since its
594 * message _is_ being processed at this point. */
595 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
600 struct thread_entry
**spp
= &send
->senders
[rd
];
604 /* Move thread reference from array to the next thread
605 that queue_reply will release */
606 send
->curr_sender
= *spp
;
607 (*spp
)->retval
= (intptr_t)spp
;
610 /* else message was posted asynchronously with queue_post */
614 /* Empty macros for when synchoronous sending is not made */
615 #define queue_release_all_senders(q)
616 #define queue_do_unblock_sender(send, i)
617 #define queue_do_auto_reply(send)
618 #define queue_do_fetch_sender(send, rd)
619 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
621 /* Queue must not be available for use during this call */
622 void queue_init(struct event_queue
*q
, bool register_queue
)
624 int oldlevel
= disable_irq_save();
628 corelock_lock(&all_queues
.cl
);
631 corelock_init(&q
->cl
);
635 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
636 q
->send
= NULL
; /* No message sending by default */
637 IF_PRIO( q
->blocker_p
= NULL
; )
642 if(all_queues
.count
>= MAX_NUM_QUEUES
)
644 panicf("queue_init->out of queues");
646 /* Add it to the all_queues array */
647 all_queues
.queues
[all_queues
.count
++] = q
;
648 corelock_unlock(&all_queues
.cl
);
651 restore_irq(oldlevel
);
654 /* Queue must not be available for use during this call */
655 void queue_delete(struct event_queue
*q
)
660 oldlevel
= disable_irq_save();
661 corelock_lock(&all_queues
.cl
);
662 corelock_lock(&q
->cl
);
664 /* Find the queue to be deleted */
665 for(i
= 0;i
< all_queues
.count
;i
++)
667 if(all_queues
.queues
[i
] == q
)
669 /* Move the following queues up in the list */
672 for(;i
< all_queues
.count
;i
++)
674 all_queues
.queues
[i
] = all_queues
.queues
[i
+1];
681 corelock_unlock(&all_queues
.cl
);
683 /* Release thread(s) waiting on queue head */
684 thread_queue_wake(&q
->queue
);
686 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
689 /* Release threads waiting for replies */
690 queue_release_all_senders(q
);
692 /* Reply to any dequeued message waiting for one */
693 queue_do_auto_reply(q
->send
);
696 IF_PRIO( q
->blocker_p
= NULL
; )
703 corelock_unlock(&q
->cl
);
704 restore_irq(oldlevel
);
707 /* NOTE: multiple threads waiting on a queue head cannot have a well-
708 defined release order if timeouts are used. If multiple threads must
709 access the queue head, use a dispatcher or queue_wait only. */
710 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
715 #ifdef HAVE_PRIORITY_SCHEDULING
716 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
717 QUEUE_GET_THREAD(q
) == thread_get_current(),
718 "queue_wait->wrong thread\n");
721 oldlevel
= disable_irq_save();
722 corelock_lock(&q
->cl
);
725 queue_do_auto_reply(q
->send
);
727 if (q
->read
== q
->write
)
729 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
733 IF_COP( current
->obj_cl
= &q
->cl
; )
734 current
->bqp
= &q
->queue
;
736 block_thread(current
);
738 corelock_unlock(&q
->cl
);
741 oldlevel
= disable_irq_save();
742 corelock_lock(&q
->cl
);
744 /* A message that woke us could now be gone */
745 while (q
->read
== q
->write
);
748 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
751 /* Get data for a waiting thread if one */
752 queue_do_fetch_sender(q
->send
, rd
);
754 corelock_unlock(&q
->cl
);
755 restore_irq(oldlevel
);
758 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
762 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
763 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
764 QUEUE_GET_THREAD(q
) == thread_get_current(),
765 "queue_wait_w_tmo->wrong thread\n");
768 oldlevel
= disable_irq_save();
769 corelock_lock(&q
->cl
);
772 queue_do_auto_reply(q
->send
);
774 if (q
->read
== q
->write
&& ticks
> 0)
776 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
778 IF_COP( current
->obj_cl
= &q
->cl
; )
779 current
->bqp
= &q
->queue
;
781 block_thread_w_tmo(current
, ticks
);
782 corelock_unlock(&q
->cl
);
786 oldlevel
= disable_irq_save();
787 corelock_lock(&q
->cl
);
790 /* no worry about a removed message here - status is checked inside
791 locks - perhaps verify if timeout or false alarm */
792 if (q
->read
!= q
->write
)
794 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
796 /* Get data for a waiting thread if one */
797 queue_do_fetch_sender(q
->send
, rd
);
801 ev
->id
= SYS_TIMEOUT
;
804 corelock_unlock(&q
->cl
);
805 restore_irq(oldlevel
);
808 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
813 oldlevel
= disable_irq_save();
814 corelock_lock(&q
->cl
);
816 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
818 q
->events
[wr
].id
= id
;
819 q
->events
[wr
].data
= data
;
821 /* overflow protect - unblock any thread waiting at this index */
822 queue_do_unblock_sender(q
->send
, wr
);
824 /* Wakeup a waiting thread if any */
825 wakeup_thread(&q
->queue
);
827 corelock_unlock(&q
->cl
);
828 restore_irq(oldlevel
);
831 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
832 /* IRQ handlers are not allowed use of this function - we only aim to
833 protect the queue integrity by turning them off. */
834 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
839 oldlevel
= disable_irq_save();
840 corelock_lock(&q
->cl
);
842 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
844 q
->events
[wr
].id
= id
;
845 q
->events
[wr
].data
= data
;
849 struct queue_sender_list
*send
= q
->send
;
850 struct thread_entry
**spp
= &send
->senders
[wr
];
851 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
855 /* overflow protect - unblock any thread waiting at this index */
856 queue_release_sender(spp
, 0);
859 /* Wakeup a waiting thread if any */
860 wakeup_thread(&q
->queue
);
862 /* Save thread in slot, add to list and wait for reply */
864 IF_COP( current
->obj_cl
= &q
->cl
; )
865 IF_PRIO( current
->blocker
= q
->blocker_p
; )
866 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
867 current
->retval
= (intptr_t)spp
;
868 current
->bqp
= &send
->list
;
870 block_thread(current
);
872 corelock_unlock(&q
->cl
);
875 return current
->retval
;
878 /* Function as queue_post if sending is not enabled */
879 wakeup_thread(&q
->queue
);
881 corelock_unlock(&q
->cl
);
882 restore_irq(oldlevel
);
887 #if 0 /* not used now but probably will be later */
888 /* Query if the last message dequeued was added by queue_send or not */
889 bool queue_in_queue_send(struct event_queue
*q
)
894 int oldlevel
= disable_irq_save();
895 corelock_lock(&q
->cl
);
898 in_send
= q
->send
&& q
->send
->curr_sender
;
901 corelock_unlock(&q
->cl
);
902 restore_irq(oldlevel
);
909 /* Replies with retval to the last dequeued message sent with queue_send */
910 void queue_reply(struct event_queue
*q
, intptr_t retval
)
912 if(q
->send
&& q
->send
->curr_sender
)
914 int oldlevel
= disable_irq_save();
915 corelock_lock(&q
->cl
);
916 /* Double-check locking */
917 IF_COP( if(q
->send
&& q
->send
->curr_sender
) )
919 queue_release_sender(&q
->send
->curr_sender
, retval
);
922 corelock_unlock(&q
->cl
);
923 restore_irq(oldlevel
);
927 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
929 if(q
->read
== q
->write
)
932 bool have_msg
= false;
934 int oldlevel
= disable_irq_save();
935 corelock_lock(&q
->cl
);
937 if(q
->read
!= q
->write
)
939 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
943 corelock_unlock(&q
->cl
);
944 restore_irq(oldlevel
);
948 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
950 /* Poll queue to see if a message exists - careful in using the result if
951 * queue_remove_from_head is called when messages are posted - possibly use
952 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
953 * unsignals the queue may cause an unwanted block */
954 bool queue_empty(const struct event_queue
* q
)
956 return ( q
->read
== q
->write
);
959 void queue_clear(struct event_queue
* q
)
963 oldlevel
= disable_irq_save();
964 corelock_lock(&q
->cl
);
966 /* Release all threads waiting in the queue for a reply -
967 dequeued sent message will be handled by owning thread */
968 queue_release_all_senders(q
);
973 corelock_unlock(&q
->cl
);
974 restore_irq(oldlevel
);
977 void queue_remove_from_head(struct event_queue
*q
, long id
)
981 oldlevel
= disable_irq_save();
982 corelock_lock(&q
->cl
);
984 while(q
->read
!= q
->write
)
986 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
988 if(q
->events
[rd
].id
!= id
)
993 /* Release any thread waiting on this message */
994 queue_do_unblock_sender(q
->send
, rd
);
999 corelock_unlock(&q
->cl
);
1000 restore_irq(oldlevel
);
1004 * The number of events waiting in the queue.
1006 * @param struct of event_queue
1007 * @return number of events in the queue
1009 int queue_count(const struct event_queue
*q
)
1011 return q
->write
- q
->read
;
1014 int queue_broadcast(long id
, intptr_t data
)
1019 int oldlevel
= disable_irq_save();
1020 corelock_lock(&all_queues
.cl
);
1023 for(i
= 0;i
< all_queues
.count
;i
++)
1025 queue_post(all_queues
.queues
[i
], id
, data
);
1029 corelock_unlock(&all_queues
.cl
);
1030 restore_irq(oldlevel
);
1036 /****************************************************************************
1037 * Simple mutex functions ;)
1038 ****************************************************************************/
1040 /* Initialize a mutex object - call before any use and do not call again once
1041 * the object is available to other threads */
1042 void mutex_init(struct mutex
*m
)
1044 corelock_init(&m
->cl
);
1048 MUTEX_SET_THREAD(m
, NULL
);
1049 #ifdef HAVE_PRIORITY_SCHEDULING
1050 m
->blocker
.priority
= PRIORITY_IDLE
;
1051 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
1052 m
->no_preempt
= false;
1056 /* Gain ownership of a mutex object or block until it becomes free */
1057 void mutex_lock(struct mutex
*m
)
1059 const unsigned int core
= CURRENT_CORE
;
1060 struct thread_entry
*current
= cores
[core
].running
;
1062 if(current
== MUTEX_GET_THREAD(m
))
1064 /* current thread already owns this mutex */
1069 /* lock out other cores */
1070 corelock_lock(&m
->cl
);
1075 MUTEX_SET_THREAD(m
, current
);
1077 corelock_unlock(&m
->cl
);
1081 /* block until the lock is open... */
1082 IF_COP( current
->obj_cl
= &m
->cl
; )
1083 IF_PRIO( current
->blocker
= &m
->blocker
; )
1084 current
->bqp
= &m
->queue
;
1087 block_thread(current
);
1089 corelock_unlock(&m
->cl
);
1091 /* ...and turn control over to next thread */
1095 /* Release ownership of a mutex object - only owning thread must call this */
1096 void mutex_unlock(struct mutex
*m
)
1098 /* unlocker not being the owner is an unlocking violation */
1099 KERNEL_ASSERT(MUTEX_GET_THREAD(m
) == thread_get_current(),
1100 "mutex_unlock->wrong thread (%s != %s)\n",
1101 MUTEX_GET_THREAD(m
)->name
,
1102 thread_get_current()->name
);
1106 /* this thread still owns lock */
1111 /* lock out other cores */
1112 corelock_lock(&m
->cl
);
1114 /* transfer to next queued thread if any */
1115 if(m
->queue
== NULL
)
1117 /* no threads waiting - open the lock */
1118 MUTEX_SET_THREAD(m
, NULL
);
1120 corelock_unlock(&m
->cl
);
1125 const int oldlevel
= disable_irq_save();
1126 /* Tranfer of owning thread is handled in the wakeup protocol
1127 * if priorities are enabled otherwise just set it from the
1129 IFN_PRIO( MUTEX_SET_THREAD(m
, m
->queue
); )
1130 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
1131 restore_irq(oldlevel
);
1133 corelock_unlock(&m
->cl
);
1135 #ifdef HAVE_PRIORITY_SCHEDULING
1136 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
1142 /****************************************************************************
1143 * Simpl-er mutex functions ;)
1144 ****************************************************************************/
1146 void spinlock_init(struct spinlock
*l
)
1148 corelock_init(&l
->cl
);
1153 void spinlock_lock(struct spinlock
*l
)
1155 const unsigned int core
= CURRENT_CORE
;
1156 struct thread_entry
*current
= cores
[core
].running
;
1158 if(l
->thread
== current
)
1160 /* current core already owns it */
1165 /* lock against other processor cores */
1166 corelock_lock(&l
->cl
);
1168 /* take ownership */
1169 l
->thread
= current
;
1172 void spinlock_unlock(struct spinlock
*l
)
1174 /* unlocker not being the owner is an unlocking violation */
1175 KERNEL_ASSERT(l
->thread
== thread_get_current(),
1176 "spinlock_unlock->wrong thread\n");
1180 /* this core still owns lock */
1189 corelock_unlock(&l
->cl
);
1191 #endif /* NUM_CORES > 1 */
1193 /****************************************************************************
1194 * Simple semaphore functions ;)
1195 ****************************************************************************/
1196 #ifdef HAVE_SEMAPHORE_OBJECTS
1197 void semaphore_init(struct semaphore
*s
, int max
, int start
)
1199 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
1200 "semaphore_init->inv arg\n");
1204 corelock_init(&s
->cl
);
1207 void semaphore_wait(struct semaphore
*s
)
1209 struct thread_entry
*current
;
1211 corelock_lock(&s
->cl
);
1215 /* wait satisfied */
1216 corelock_unlock(&s
->cl
);
1220 /* too many waits - block until dequeued... */
1221 current
= cores
[CURRENT_CORE
].running
;
1223 IF_COP( current
->obj_cl
= &s
->cl
; )
1224 current
->bqp
= &s
->queue
;
1227 block_thread(current
);
1229 corelock_unlock(&s
->cl
);
1231 /* ...and turn control over to next thread */
1235 void semaphore_release(struct semaphore
*s
)
1237 IF_PRIO( unsigned int result
= THREAD_NONE
; )
1239 corelock_lock(&s
->cl
);
1241 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1243 /* there should be threads in this queue */
1244 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1245 /* a thread was queued - wake it up */
1246 int oldlevel
= disable_irq_save();
1247 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1248 restore_irq(oldlevel
);
1251 corelock_unlock(&s
->cl
);
1253 #ifdef HAVE_PRIORITY_SCHEDULING
1254 if(result
& THREAD_SWITCH
)
1258 #endif /* HAVE_SEMAPHORE_OBJECTS */
1260 /****************************************************************************
1261 * Simple event functions ;)
1262 ****************************************************************************/
1263 #ifdef HAVE_EVENT_OBJECTS
1264 void event_init(struct event
*e
, unsigned int flags
)
1266 e
->queues
[STATE_NONSIGNALED
] = NULL
;
1267 e
->queues
[STATE_SIGNALED
] = NULL
;
1268 e
->state
= flags
& STATE_SIGNALED
;
1269 e
->automatic
= (flags
& EVENT_AUTOMATIC
) ? 1 : 0;
1270 corelock_init(&e
->cl
);
1273 void event_wait(struct event
*e
, unsigned int for_state
)
1275 struct thread_entry
*current
;
1277 corelock_lock(&e
->cl
);
1279 if(e
->automatic
!= 0)
1281 /* wait for false always satisfied by definition
1282 or if it just changed to false */
1283 if(e
->state
== STATE_SIGNALED
|| for_state
== STATE_NONSIGNALED
)
1285 /* automatic - unsignal */
1286 e
->state
= STATE_NONSIGNALED
;
1287 corelock_unlock(&e
->cl
);
1290 /* block until state matches */
1292 else if(for_state
== e
->state
)
1294 /* the state being waited for is the current state */
1295 corelock_unlock(&e
->cl
);
1299 /* block until state matches what callers requests */
1300 current
= cores
[CURRENT_CORE
].running
;
1302 IF_COP( current
->obj_cl
= &e
->cl
; )
1303 current
->bqp
= &e
->queues
[for_state
];
1306 block_thread(current
);
1308 corelock_unlock(&e
->cl
);
1310 /* turn control over to next thread */
1314 void event_set_state(struct event
*e
, unsigned int state
)
1316 unsigned int result
;
1319 corelock_lock(&e
->cl
);
1321 if(e
->state
== state
)
1324 corelock_unlock(&e
->cl
);
1328 IF_PRIO( result
= THREAD_OK
; )
1330 oldlevel
= disable_irq_save();
1332 if(state
== STATE_SIGNALED
)
1334 if(e
->automatic
!= 0)
1336 /* no thread should have ever blocked for nonsignaled */
1337 KERNEL_ASSERT(e
->queues
[STATE_NONSIGNALED
] == NULL
,
1338 "set_event_state->queue[NS]:S\n");
1339 /* pass to next thread and keep unsignaled - "pulse" */
1340 result
= wakeup_thread(&e
->queues
[STATE_SIGNALED
]);
1341 e
->state
= (result
& THREAD_OK
) ? STATE_NONSIGNALED
: STATE_SIGNALED
;
1345 /* release all threads waiting for signaled */
1346 e
->state
= STATE_SIGNALED
;
1348 thread_queue_wake(&e
->queues
[STATE_SIGNALED
]);
1353 /* release all threads waiting for nonsignaled */
1355 /* no thread should have ever blocked if automatic */
1356 KERNEL_ASSERT(e
->queues
[STATE_NONSIGNALED
] == NULL
||
1357 e
->automatic
== 0, "set_event_state->queue[NS]:NS\n");
1359 e
->state
= STATE_NONSIGNALED
;
1361 thread_queue_wake(&e
->queues
[STATE_NONSIGNALED
]);
1364 restore_irq(oldlevel
);
1366 corelock_unlock(&e
->cl
);
1368 #ifdef HAVE_PRIORITY_SCHEDULING
1369 if(result
& THREAD_SWITCH
)
1373 #endif /* HAVE_EVENT_OBJECTS */
1376 #ifdef HAVE_WAKEUP_OBJECTS
1377 /****************************************************************************
1378 * Lightweight IRQ-compatible wakeup object
1381 /* Initialize the wakeup object */
1382 void wakeup_init(struct wakeup
*w
)
1386 IF_COP( corelock_init(&w
->cl
); )
1389 /* Wait for a signal blocking indefinitely or for a specified period */
1390 int wakeup_wait(struct wakeup
*w
, int timeout
)
1392 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1393 int oldlevel
= disable_irq_save();
1395 corelock_lock(&w
->cl
);
1397 if(w
->signalled
== 0 && timeout
!= TIMEOUT_NOBLOCK
)
1399 struct thread_entry
* current
= cores
[CURRENT_CORE
].running
;
1401 IF_COP( current
->obj_cl
= &w
->cl
; )
1402 current
->bqp
= &w
->queue
;
1404 if (timeout
!= TIMEOUT_BLOCK
)
1405 block_thread_w_tmo(current
, timeout
);
1407 block_thread(current
);
1409 corelock_unlock(&w
->cl
);
1412 oldlevel
= disable_irq_save();
1413 corelock_lock(&w
->cl
);
1416 if(w
->signalled
== 0)
1418 /* Timed-out or failed */
1419 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1420 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1423 w
->signalled
= 0; /* Reset */
1425 corelock_unlock(&w
->cl
);
1426 restore_irq(oldlevel
);
1431 /* Signal the thread waiting or leave the signal if the thread hasn't
1434 * returns THREAD_NONE or THREAD_OK
1436 int wakeup_signal(struct wakeup
*w
)
1438 int oldlevel
= disable_irq_save();
1441 corelock_lock(&w
->cl
);
1444 ret
= wakeup_thread(&w
->queue
);
1446 corelock_unlock(&w
->cl
);
1447 restore_irq(oldlevel
);
1451 #endif /* HAVE_WAKEUP_OBJECTS */