1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
26 #include "system-sdl.h"
34 /* Make this nonzero to enable more elaborate checks on objects */
35 #if defined(DEBUG) || defined(SIMULATOR)
36 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
38 #define KERNEL_OBJECT_CHECKS 0
41 #if KERNEL_OBJECT_CHECKS
43 #define KERNEL_ASSERT(exp, msg...) \
44 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
46 #define KERNEL_ASSERT(exp, msg...) \
47 ({ if (!({ exp; })) panicf(msg); })
50 #define KERNEL_ASSERT(exp, msg...) ({})
53 #if !defined(CPU_PP) || !defined(BOOTLOADER)
54 volatile long current_tick SHAREDDATA_ATTR
= 0;
57 /* List of tick tasks - final element always NULL for termination */
58 void (*tick_funcs
[MAX_NUM_TICK_TASKS
+1])(void);
59 static int num_tick_funcs
= 0;
61 extern struct core_entry cores
[NUM_CORES
];
63 /* This array holds all queues that are initiated. It is used for broadcast. */
67 struct event_queue
*queues
[MAX_NUM_QUEUES
];
68 IF_COP( struct corelock cl
; )
69 } all_queues SHAREDBSS_ATTR
;
71 /****************************************************************************
72 * Standard kernel stuff
73 ****************************************************************************/
74 void kernel_init(void)
76 /* Init the threading API */
79 /* Other processors will not reach this point in a multicore build.
80 * In a single-core build with multiple cores they fall-through and
81 * sleep in cop_main without returning. */
82 if (CURRENT_CORE
== CPU
)
84 memset(tick_funcs
, 0, sizeof(tick_funcs
));
85 memset(&all_queues
, 0, sizeof(all_queues
));
86 corelock_init(&all_queues
.cl
);
94 /****************************************************************************
95 * Timer tick - Timer initialization and interrupt handler is defined at
97 ****************************************************************************/
98 int tick_add_task(void (*f
)(void))
100 int oldlevel
= disable_irq_save();
102 /* Add a task if there is room */
103 if(num_tick_funcs
< MAX_NUM_TICK_TASKS
)
105 tick_funcs
[num_tick_funcs
++] = f
;
106 restore_irq(oldlevel
);
110 restore_irq(oldlevel
);
111 panicf("Error! tick_add_task(): out of tasks");
115 int tick_remove_task(void (*f
)(void))
118 int oldlevel
= disable_irq_save();
120 /* Remove a task if it is there */
121 for(i
= 0;i
< num_tick_funcs
;i
++)
123 if(tick_funcs
[i
] == f
)
125 /* Compact function list - propagates NULL-terminator as well */
126 for(; i
< num_tick_funcs
; i
++)
127 tick_funcs
[i
] = tick_funcs
[i
+1];
131 restore_irq(oldlevel
);
136 restore_irq(oldlevel
);
140 /****************************************************************************
141 * Tick-based interval timers/one-shots - be mindful this is not really
142 * intended for continuous timers but for events that need to run for a short
143 * time and be cancelled without further software intervention.
144 ****************************************************************************/
145 #ifdef INCLUDE_TIMEOUT_API
146 static struct timeout
*tmo_list
= NULL
; /* list of active timeout events */
148 /* timeout tick task - calls event handlers when they expire
149 * Event handlers may alter ticks, callback and data during operation.
151 static void timeout_tick(void)
153 unsigned long tick
= current_tick
;
154 struct timeout
*curr
, *next
;
156 for (curr
= tmo_list
; curr
!= NULL
; curr
= next
)
158 next
= (struct timeout
*)curr
->next
;
160 if (TIME_BEFORE(tick
, curr
->expires
))
163 /* this event has expired - call callback */
164 if (curr
->callback(curr
))
165 *(long *)&curr
->expires
= tick
+ curr
->ticks
; /* reload */
167 timeout_cancel(curr
); /* cancel */
171 /* Cancels a timeout callback - can be called from the ISR */
172 void timeout_cancel(struct timeout
*tmo
)
174 int oldlevel
= disable_irq_save();
176 if (tmo_list
!= NULL
)
178 struct timeout
*curr
= tmo_list
;
179 struct timeout
*prev
= NULL
;
181 while (curr
!= tmo
&& curr
!= NULL
)
184 curr
= (struct timeout
*)curr
->next
;
191 tmo_list
= (struct timeout
*)curr
->next
;
193 *(const struct timeout
**)&prev
->next
= curr
->next
;
195 if (tmo_list
== NULL
)
196 tick_remove_task(timeout_tick
); /* last one - remove task */
198 /* not in list or tmo == NULL */
201 restore_irq(oldlevel
);
204 /* Adds a timeout callback - calling with an active timeout resets the
205 interval - can be called from the ISR */
206 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
207 int ticks
, intptr_t data
)
210 struct timeout
*curr
;
215 oldlevel
= disable_irq_save();
217 /* see if this one is already registered */
219 while (curr
!= tmo
&& curr
!= NULL
)
220 curr
= (struct timeout
*)curr
->next
;
224 /* not found - add it */
225 if (tmo_list
== NULL
)
226 tick_add_task(timeout_tick
); /* first one - add task */
228 *(struct timeout
**)&tmo
->next
= tmo_list
;
232 tmo
->callback
= callback
;
235 *(long *)&tmo
->expires
= current_tick
+ ticks
;
237 restore_irq(oldlevel
);
240 #endif /* INCLUDE_TIMEOUT_API */
242 /****************************************************************************
244 ****************************************************************************/
245 void sleep(int ticks
)
247 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
248 volatile int counter
;
249 TCON
&= ~(1 << 20); // stop timer 4
250 // TODO: this constant depends on dividers settings inherited from
251 // firmware. Set them explicitly somwhere.
252 TCNTB4
= 12193 * ticks
/ HZ
;
253 TCON
|= 1 << 21; // set manual bit
254 TCON
&= ~(1 << 21); // reset manual bit
255 TCON
&= ~(1 << 22); //autoreload Off
256 TCON
|= (1 << 20); // start timer 4
259 } while(counter
> 0);
261 #elif defined(CPU_PP) && defined(BOOTLOADER)
262 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
263 while (TIME_BEFORE(USEC_TIMER
, stop
))
265 #elif defined(CREATIVE_ZVx) && defined(BOOTLOADER)
267 long sleep_ticks
= current_tick
+ ticks
+ 1;
268 while (sleep_ticks
> current_tick
)
279 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
280 /* Some targets don't like yielding in the bootloader */
286 /****************************************************************************
287 * Queue handling stuff
288 ****************************************************************************/
290 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
291 /****************************************************************************
292 * Sender thread queue structure that aids implementation of priority
293 * inheritance on queues because the send list structure is the same as
294 * for all other kernel objects:
297 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
298 * E3 was posted with queue_post
299 * 4 events remain enqueued (E1-E4)
302 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
303 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
305 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
306 * q->send->curr_sender: /\
308 * Thread has E0 in its own struct queue_event.
310 ****************************************************************************/
312 /* Puts the specified return value in the waiting thread's return value
313 * and wakes the thread.
315 * A sender should be confirmed to exist before calling which makes it
316 * more efficent to reject the majority of cases that don't need this
319 static void queue_release_sender(struct thread_entry
**sender
,
322 struct thread_entry
*thread
= *sender
;
324 *sender
= NULL
; /* Clear slot. */
325 thread
->wakeup_ext_cb
= NULL
; /* Clear callback. */
326 thread
->retval
= retval
; /* Assign thread-local return value. */
327 *thread
->bqp
= thread
; /* Move blocking queue head to thread since
328 wakeup_thread wakes the first thread in
330 wakeup_thread(thread
->bqp
);
333 /* Releases any waiting threads that are queued with queue_send -
336 static void queue_release_all_senders(struct event_queue
*q
)
341 for(i
= q
->read
; i
!= q
->write
; i
++)
343 struct thread_entry
**spp
=
344 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
348 queue_release_sender(spp
, 0);
354 /* Callback to do extra forced removal steps from sender list in addition
355 * to the normal blocking queue removal and priority dis-inherit */
356 static void queue_remove_sender_thread_cb(struct thread_entry
*thread
)
358 *((struct thread_entry
**)thread
->retval
) = NULL
;
359 thread
->wakeup_ext_cb
= NULL
;
363 /* Enables queue_send on the specified queue - caller allocates the extra
364 * data structure. Only queues which are taken to be owned by a thread should
365 * enable this however an official owner is not compulsory but must be
366 * specified for priority inheritance to operate.
368 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
369 * messages results in an undefined order of message replies.
371 void queue_enable_queue_send(struct event_queue
*q
,
372 struct queue_sender_list
*send
,
373 struct thread_entry
*owner
)
375 int oldlevel
= disable_irq_save();
376 corelock_lock(&q
->cl
);
378 if(send
!= NULL
&& q
->send
== NULL
)
380 memset(send
, 0, sizeof(*send
));
381 #ifdef HAVE_PRIORITY_SCHEDULING
382 send
->blocker
.wakeup_protocol
= wakeup_priority_protocol_release
;
383 send
->blocker
.priority
= PRIORITY_IDLE
;
384 send
->blocker
.thread
= owner
;
386 q
->blocker_p
= &send
->blocker
;
391 corelock_unlock(&q
->cl
);
392 restore_irq(oldlevel
);
397 /* Unblock a blocked thread at a given event index */
398 static inline void queue_do_unblock_sender(struct queue_sender_list
*send
,
403 struct thread_entry
**spp
= &send
->senders
[i
];
407 queue_release_sender(spp
, 0);
412 /* Perform the auto-reply sequence */
413 static inline void queue_do_auto_reply(struct queue_sender_list
*send
)
415 if(send
&& send
->curr_sender
)
418 queue_release_sender(&send
->curr_sender
, 0);
422 /* Moves waiting thread's refrence from the senders array to the
423 * current_sender which represents the thread waiting for a reponse to the
424 * last message removed from the queue. This also protects the thread from
425 * being bumped due to overflow which would not be a valid action since its
426 * message _is_ being processed at this point. */
427 static inline void queue_do_fetch_sender(struct queue_sender_list
*send
,
432 struct thread_entry
**spp
= &send
->senders
[rd
];
436 /* Move thread reference from array to the next thread
437 that queue_reply will release */
438 send
->curr_sender
= *spp
;
439 (*spp
)->retval
= (intptr_t)spp
;
442 /* else message was posted asynchronously with queue_post */
446 /* Empty macros for when synchoronous sending is not made */
447 #define queue_release_all_senders(q)
448 #define queue_do_unblock_sender(send, i)
449 #define queue_do_auto_reply(send)
450 #define queue_do_fetch_sender(send, rd)
451 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
453 /* Queue must not be available for use during this call */
454 void queue_init(struct event_queue
*q
, bool register_queue
)
456 int oldlevel
= disable_irq_save();
460 corelock_lock(&all_queues
.cl
);
463 corelock_init(&q
->cl
);
467 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
468 q
->send
= NULL
; /* No message sending by default */
469 IF_PRIO( q
->blocker_p
= NULL
; )
474 if(all_queues
.count
>= MAX_NUM_QUEUES
)
476 panicf("queue_init->out of queues");
478 /* Add it to the all_queues array */
479 all_queues
.queues
[all_queues
.count
++] = q
;
480 corelock_unlock(&all_queues
.cl
);
483 restore_irq(oldlevel
);
486 /* Queue must not be available for use during this call */
487 void queue_delete(struct event_queue
*q
)
492 oldlevel
= disable_irq_save();
493 corelock_lock(&all_queues
.cl
);
494 corelock_lock(&q
->cl
);
496 /* Find the queue to be deleted */
497 for(i
= 0;i
< all_queues
.count
;i
++)
499 if(all_queues
.queues
[i
] == q
)
501 /* Move the following queues up in the list */
504 for(;i
< all_queues
.count
;i
++)
506 all_queues
.queues
[i
] = all_queues
.queues
[i
+1];
513 corelock_unlock(&all_queues
.cl
);
515 /* Release thread(s) waiting on queue head */
516 thread_queue_wake(&q
->queue
);
518 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
521 /* Release threads waiting for replies */
522 queue_release_all_senders(q
);
524 /* Reply to any dequeued message waiting for one */
525 queue_do_auto_reply(q
->send
);
528 IF_PRIO( q
->blocker_p
= NULL
; )
535 corelock_unlock(&q
->cl
);
536 restore_irq(oldlevel
);
539 /* NOTE: multiple threads waiting on a queue head cannot have a well-
540 defined release order if timeouts are used. If multiple threads must
541 access the queue head, use a dispatcher or queue_wait only. */
542 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
547 #ifdef HAVE_PRIORITY_SCHEDULING
548 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
549 QUEUE_GET_THREAD(q
) == thread_get_current(),
550 "queue_wait->wrong thread\n");
553 oldlevel
= disable_irq_save();
554 corelock_lock(&q
->cl
);
557 queue_do_auto_reply(q
->send
);
559 if (q
->read
== q
->write
)
561 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
565 IF_COP( current
->obj_cl
= &q
->cl
; )
566 current
->bqp
= &q
->queue
;
568 block_thread(current
);
570 corelock_unlock(&q
->cl
);
573 oldlevel
= disable_irq_save();
574 corelock_lock(&q
->cl
);
576 /* A message that woke us could now be gone */
577 while (q
->read
== q
->write
);
580 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
583 /* Get data for a waiting thread if one */
584 queue_do_fetch_sender(q
->send
, rd
);
586 corelock_unlock(&q
->cl
);
587 restore_irq(oldlevel
);
590 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
594 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
595 KERNEL_ASSERT(QUEUE_GET_THREAD(q
) == NULL
||
596 QUEUE_GET_THREAD(q
) == thread_get_current(),
597 "queue_wait_w_tmo->wrong thread\n");
600 oldlevel
= disable_irq_save();
601 corelock_lock(&q
->cl
);
604 queue_do_auto_reply(q
->send
);
606 if (q
->read
== q
->write
&& ticks
> 0)
608 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
610 IF_COP( current
->obj_cl
= &q
->cl
; )
611 current
->bqp
= &q
->queue
;
613 block_thread_w_tmo(current
, ticks
);
614 corelock_unlock(&q
->cl
);
618 oldlevel
= disable_irq_save();
619 corelock_lock(&q
->cl
);
622 /* no worry about a removed message here - status is checked inside
623 locks - perhaps verify if timeout or false alarm */
624 if (q
->read
!= q
->write
)
626 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
628 /* Get data for a waiting thread if one */
629 queue_do_fetch_sender(q
->send
, rd
);
633 ev
->id
= SYS_TIMEOUT
;
636 corelock_unlock(&q
->cl
);
637 restore_irq(oldlevel
);
640 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
645 oldlevel
= disable_irq_save();
646 corelock_lock(&q
->cl
);
648 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
650 q
->events
[wr
].id
= id
;
651 q
->events
[wr
].data
= data
;
653 /* overflow protect - unblock any thread waiting at this index */
654 queue_do_unblock_sender(q
->send
, wr
);
656 /* Wakeup a waiting thread if any */
657 wakeup_thread(&q
->queue
);
659 corelock_unlock(&q
->cl
);
660 restore_irq(oldlevel
);
663 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
664 /* IRQ handlers are not allowed use of this function - we only aim to
665 protect the queue integrity by turning them off. */
666 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
671 oldlevel
= disable_irq_save();
672 corelock_lock(&q
->cl
);
674 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
676 q
->events
[wr
].id
= id
;
677 q
->events
[wr
].data
= data
;
681 struct queue_sender_list
*send
= q
->send
;
682 struct thread_entry
**spp
= &send
->senders
[wr
];
683 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
687 /* overflow protect - unblock any thread waiting at this index */
688 queue_release_sender(spp
, 0);
691 /* Wakeup a waiting thread if any */
692 wakeup_thread(&q
->queue
);
694 /* Save thread in slot, add to list and wait for reply */
696 IF_COP( current
->obj_cl
= &q
->cl
; )
697 IF_PRIO( current
->blocker
= q
->blocker_p
; )
698 current
->wakeup_ext_cb
= queue_remove_sender_thread_cb
;
699 current
->retval
= (intptr_t)spp
;
700 current
->bqp
= &send
->list
;
702 block_thread(current
);
704 corelock_unlock(&q
->cl
);
707 return current
->retval
;
710 /* Function as queue_post if sending is not enabled */
711 wakeup_thread(&q
->queue
);
713 corelock_unlock(&q
->cl
);
714 restore_irq(oldlevel
);
719 #if 0 /* not used now but probably will be later */
720 /* Query if the last message dequeued was added by queue_send or not */
721 bool queue_in_queue_send(struct event_queue
*q
)
726 int oldlevel
= disable_irq_save();
727 corelock_lock(&q
->cl
);
730 in_send
= q
->send
&& q
->send
->curr_sender
;
733 corelock_unlock(&q
->cl
);
734 restore_irq(oldlevel
);
741 /* Replies with retval to the last dequeued message sent with queue_send */
742 void queue_reply(struct event_queue
*q
, intptr_t retval
)
744 if(q
->send
&& q
->send
->curr_sender
)
746 int oldlevel
= disable_irq_save();
747 corelock_lock(&q
->cl
);
748 /* Double-check locking */
749 IF_COP( if(q
->send
&& q
->send
->curr_sender
) )
751 queue_release_sender(&q
->send
->curr_sender
, retval
);
754 corelock_unlock(&q
->cl
);
755 restore_irq(oldlevel
);
759 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
761 if(q
->read
== q
->write
)
764 bool have_msg
= false;
766 int oldlevel
= disable_irq_save();
767 corelock_lock(&q
->cl
);
769 if(q
->read
!= q
->write
)
771 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
775 corelock_unlock(&q
->cl
);
776 restore_irq(oldlevel
);
780 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
782 /* Poll queue to see if a message exists - careful in using the result if
783 * queue_remove_from_head is called when messages are posted - possibly use
784 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
785 * unsignals the queue may cause an unwanted block */
786 bool queue_empty(const struct event_queue
* q
)
788 return ( q
->read
== q
->write
);
791 void queue_clear(struct event_queue
* q
)
795 oldlevel
= disable_irq_save();
796 corelock_lock(&q
->cl
);
798 /* Release all threads waiting in the queue for a reply -
799 dequeued sent message will be handled by owning thread */
800 queue_release_all_senders(q
);
805 corelock_unlock(&q
->cl
);
806 restore_irq(oldlevel
);
809 void queue_remove_from_head(struct event_queue
*q
, long id
)
813 oldlevel
= disable_irq_save();
814 corelock_lock(&q
->cl
);
816 while(q
->read
!= q
->write
)
818 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
820 if(q
->events
[rd
].id
!= id
)
825 /* Release any thread waiting on this message */
826 queue_do_unblock_sender(q
->send
, rd
);
831 corelock_unlock(&q
->cl
);
832 restore_irq(oldlevel
);
836 * The number of events waiting in the queue.
838 * @param struct of event_queue
839 * @return number of events in the queue
841 int queue_count(const struct event_queue
*q
)
843 return q
->write
- q
->read
;
846 int queue_broadcast(long id
, intptr_t data
)
851 int oldlevel
= disable_irq_save();
852 corelock_lock(&all_queues
.cl
);
855 for(i
= 0;i
< all_queues
.count
;i
++)
857 queue_post(all_queues
.queues
[i
], id
, data
);
861 corelock_unlock(&all_queues
.cl
);
862 restore_irq(oldlevel
);
868 /****************************************************************************
869 * Simple mutex functions ;)
870 ****************************************************************************/
872 /* Initialize a mutex object - call before any use and do not call again once
873 * the object is available to other threads */
874 void mutex_init(struct mutex
*m
)
876 corelock_init(&m
->cl
);
880 MUTEX_SET_THREAD(m
, NULL
);
881 #ifdef HAVE_PRIORITY_SCHEDULING
882 m
->blocker
.priority
= PRIORITY_IDLE
;
883 m
->blocker
.wakeup_protocol
= wakeup_priority_protocol_transfer
;
884 m
->no_preempt
= false;
888 /* Gain ownership of a mutex object or block until it becomes free */
889 void mutex_lock(struct mutex
*m
)
891 const unsigned int core
= CURRENT_CORE
;
892 struct thread_entry
*current
= cores
[core
].running
;
894 if(current
== MUTEX_GET_THREAD(m
))
896 /* current thread already owns this mutex */
901 /* lock out other cores */
902 corelock_lock(&m
->cl
);
907 MUTEX_SET_THREAD(m
, current
);
909 corelock_unlock(&m
->cl
);
913 /* block until the lock is open... */
914 IF_COP( current
->obj_cl
= &m
->cl
; )
915 IF_PRIO( current
->blocker
= &m
->blocker
; )
916 current
->bqp
= &m
->queue
;
919 block_thread(current
);
921 corelock_unlock(&m
->cl
);
923 /* ...and turn control over to next thread */
927 /* Release ownership of a mutex object - only owning thread must call this */
928 void mutex_unlock(struct mutex
*m
)
930 /* unlocker not being the owner is an unlocking violation */
931 KERNEL_ASSERT(MUTEX_GET_THREAD(m
) == thread_get_current(),
932 "mutex_unlock->wrong thread (%s != %s)\n",
933 MUTEX_GET_THREAD(m
)->name
,
934 thread_get_current()->name
);
938 /* this thread still owns lock */
943 /* lock out other cores */
944 corelock_lock(&m
->cl
);
946 /* transfer to next queued thread if any */
949 /* no threads waiting - open the lock */
950 MUTEX_SET_THREAD(m
, NULL
);
952 corelock_unlock(&m
->cl
);
957 const int oldlevel
= disable_irq_save();
958 /* Tranfer of owning thread is handled in the wakeup protocol
959 * if priorities are enabled otherwise just set it from the
961 IFN_PRIO( MUTEX_SET_THREAD(m
, m
->queue
); )
962 IF_PRIO( unsigned int result
= ) wakeup_thread(&m
->queue
);
963 restore_irq(oldlevel
);
965 corelock_unlock(&m
->cl
);
967 #ifdef HAVE_PRIORITY_SCHEDULING
968 if((result
& THREAD_SWITCH
) && !m
->no_preempt
)
974 /****************************************************************************
975 * Simpl-er mutex functions ;)
976 ****************************************************************************/
978 void spinlock_init(struct spinlock
*l
)
980 corelock_init(&l
->cl
);
985 void spinlock_lock(struct spinlock
*l
)
987 const unsigned int core
= CURRENT_CORE
;
988 struct thread_entry
*current
= cores
[core
].running
;
990 if(l
->thread
== current
)
992 /* current core already owns it */
997 /* lock against other processor cores */
998 corelock_lock(&l
->cl
);
1000 /* take ownership */
1001 l
->thread
= current
;
1004 void spinlock_unlock(struct spinlock
*l
)
1006 /* unlocker not being the owner is an unlocking violation */
1007 KERNEL_ASSERT(l
->thread
== thread_get_current(),
1008 "spinlock_unlock->wrong thread\n");
1012 /* this core still owns lock */
1021 corelock_unlock(&l
->cl
);
1023 #endif /* NUM_CORES > 1 */
1025 /****************************************************************************
1026 * Simple semaphore functions ;)
1027 ****************************************************************************/
1028 #ifdef HAVE_SEMAPHORE_OBJECTS
1029 void semaphore_init(struct semaphore
*s
, int max
, int start
)
1031 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
1032 "semaphore_init->inv arg\n");
1036 corelock_init(&s
->cl
);
1039 void semaphore_wait(struct semaphore
*s
)
1041 struct thread_entry
*current
;
1043 corelock_lock(&s
->cl
);
1047 /* wait satisfied */
1048 corelock_unlock(&s
->cl
);
1052 /* too many waits - block until dequeued... */
1053 current
= cores
[CURRENT_CORE
].running
;
1055 IF_COP( current
->obj_cl
= &s
->cl
; )
1056 current
->bqp
= &s
->queue
;
1059 block_thread(current
);
1061 corelock_unlock(&s
->cl
);
1063 /* ...and turn control over to next thread */
1067 void semaphore_release(struct semaphore
*s
)
1069 IF_PRIO( unsigned int result
= THREAD_NONE
; )
1071 corelock_lock(&s
->cl
);
1073 if(s
->count
< s
->max
&& ++s
->count
<= 0)
1075 /* there should be threads in this queue */
1076 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup\n");
1077 /* a thread was queued - wake it up */
1078 int oldlevel
= disable_irq_save();
1079 IF_PRIO( result
= ) wakeup_thread(&s
->queue
);
1080 restore_irq(oldlevel
);
1083 corelock_unlock(&s
->cl
);
1085 #ifdef HAVE_PRIORITY_SCHEDULING
1086 if(result
& THREAD_SWITCH
)
1090 #endif /* HAVE_SEMAPHORE_OBJECTS */
1092 #ifdef HAVE_WAKEUP_OBJECTS
1093 /****************************************************************************
1094 * Lightweight IRQ-compatible wakeup object
1097 /* Initialize the wakeup object */
1098 void wakeup_init(struct wakeup
*w
)
1102 IF_COP( corelock_init(&w
->cl
); )
1105 /* Wait for a signal blocking indefinitely or for a specified period */
1106 int wakeup_wait(struct wakeup
*w
, int timeout
)
1108 int ret
= OBJ_WAIT_SUCCEEDED
; /* Presume success */
1109 int oldlevel
= disable_irq_save();
1111 corelock_lock(&w
->cl
);
1113 if(w
->signalled
== 0 && timeout
!= TIMEOUT_NOBLOCK
)
1115 struct thread_entry
* current
= cores
[CURRENT_CORE
].running
;
1117 IF_COP( current
->obj_cl
= &w
->cl
; )
1118 current
->bqp
= &w
->queue
;
1120 if (timeout
!= TIMEOUT_BLOCK
)
1121 block_thread_w_tmo(current
, timeout
);
1123 block_thread(current
);
1125 corelock_unlock(&w
->cl
);
1128 oldlevel
= disable_irq_save();
1129 corelock_lock(&w
->cl
);
1132 if(w
->signalled
== 0)
1134 /* Timed-out or failed */
1135 ret
= (timeout
!= TIMEOUT_BLOCK
) ?
1136 OBJ_WAIT_TIMEDOUT
: OBJ_WAIT_FAILED
;
1139 w
->signalled
= 0; /* Reset */
1141 corelock_unlock(&w
->cl
);
1142 restore_irq(oldlevel
);
1147 /* Signal the thread waiting or leave the signal if the thread hasn't
1150 * returns THREAD_NONE or THREAD_OK
1152 int wakeup_signal(struct wakeup
*w
)
1154 int oldlevel
= disable_irq_save();
1157 corelock_lock(&w
->cl
);
1160 ret
= wakeup_thread(&w
->queue
);
1162 corelock_unlock(&w
->cl
);
1163 restore_irq(oldlevel
);
1167 #endif /* HAVE_WAKEUP_OBJECTS */