1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
28 /* Make this nonzero to enable more elaborate checks on objects */
30 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */
32 #define KERNEL_OBJECT_CHECKS 0
35 #if KERNEL_OBJECT_CHECKS
36 #define KERNEL_ASSERT(exp, msg...) \
37 ({ if (!({ exp; })) panicf(msg); })
39 #define KERNEL_ASSERT(exp, msg...) ({})
42 #if !defined(CPU_PP) || !defined(BOOTLOADER)
43 volatile long current_tick NOCACHEDATA_ATTR
= 0;
46 void (*tick_funcs
[MAX_NUM_TICK_TASKS
])(void);
48 extern struct core_entry cores
[NUM_CORES
];
50 /* This array holds all queues that are initiated. It is used for broadcast. */
54 struct event_queue
*queues
[MAX_NUM_QUEUES
];
58 } all_queues NOCACHEBSS_ATTR
;
60 /****************************************************************************
61 * Standard kernel stuff
62 ****************************************************************************/
63 void kernel_init(void)
65 /* Init the threading API */
68 /* Other processors will not reach this point in a multicore build.
69 * In a single-core build with multiple cores they fall-through and
70 * sleep in cop_main without returning. */
71 if (CURRENT_CORE
== CPU
)
73 memset(tick_funcs
, 0, sizeof(tick_funcs
));
74 memset(&all_queues
, 0, sizeof(all_queues
));
75 corelock_init(&all_queues
.cl
);
82 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
84 TCON
&= ~(1 << 20); // stop timer 4
85 // TODO: this constant depends on dividers settings inherited from
86 // firmware. Set them explicitly somwhere.
87 TCNTB4
= 12193 * ticks
/ HZ
;
88 TCON
|= 1 << 21; // set manual bit
89 TCON
&= ~(1 << 21); // reset manual bit
90 TCON
&= ~(1 << 22); //autoreload Off
91 TCON
|= (1 << 20); // start timer 4
96 #elif defined(CPU_PP) && defined(BOOTLOADER)
97 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
98 while (TIME_BEFORE(USEC_TIMER
, stop
))
107 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
108 /* Some targets don't like yielding in the bootloader */
114 /****************************************************************************
115 * Queue handling stuff
116 ****************************************************************************/
118 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
119 /* Moves waiting thread's descriptor to the current sender when a
120 message is dequeued */
121 static void queue_fetch_sender(struct queue_sender_list
*send
,
124 struct thread_entry
**spp
= &send
->senders
[i
];
128 send
->curr_sender
= *spp
;
133 /* Puts the specified return value in the waiting thread's return value
134 * and wakes the thread.
135 * 1) A sender should be confirmed to exist before calling which makes it
136 * more efficent to reject the majority of cases that don't need this
138 * 2) Requires interrupts disabled since queue overflows can cause posts
139 * from interrupt handlers to wake threads. Not doing so could cause
140 * an attempt at multiple wakes or other problems.
142 static void queue_release_sender(struct thread_entry
**sender
,
145 (*sender
)->retval
= retval
;
146 wakeup_thread_no_listlock(sender
);
147 /* This should _never_ happen - there must never be multiple
148 threads in this list and it is a corrupt state */
149 KERNEL_ASSERT(*sender
== NULL
, "queue->send slot ovf: %08X", (int)*sender
);
152 /* Releases any waiting threads that are queued with queue_send -
154 * Disable IRQs and lock before calling since it uses
155 * queue_release_sender.
157 static void queue_release_all_senders(struct event_queue
*q
)
162 for(i
= q
->read
; i
!= q
->write
; i
++)
164 struct thread_entry
**spp
=
165 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
169 queue_release_sender(spp
, 0);
175 /* Enables queue_send on the specified queue - caller allocates the extra
176 data structure. Only queues which are taken to be owned by a thread should
177 enable this. Public waiting is not permitted. */
178 void queue_enable_queue_send(struct event_queue
*q
,
179 struct queue_sender_list
*send
)
181 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
182 corelock_lock(&q
->cl
);
187 memset(send
, 0, sizeof(*send
));
191 corelock_unlock(&q
->cl
);
192 set_irq_level(oldlevel
);
194 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
196 /* Queue must not be available for use during this call */
197 void queue_init(struct event_queue
*q
, bool register_queue
)
199 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
203 corelock_lock(&all_queues
.cl
);
206 corelock_init(&q
->cl
);
207 thread_queue_init(&q
->queue
);
210 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
211 q
->send
= NULL
; /* No message sending by default */
216 if(all_queues
.count
>= MAX_NUM_QUEUES
)
218 panicf("queue_init->out of queues");
220 /* Add it to the all_queues array */
221 all_queues
.queues
[all_queues
.count
++] = q
;
222 corelock_unlock(&all_queues
.cl
);
225 set_irq_level(oldlevel
);
228 /* Queue must not be available for use during this call */
229 void queue_delete(struct event_queue
*q
)
234 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
235 corelock_lock(&all_queues
.cl
);
236 corelock_lock(&q
->cl
);
238 /* Find the queue to be deleted */
239 for(i
= 0;i
< all_queues
.count
;i
++)
241 if(all_queues
.queues
[i
] == q
)
243 /* Move the following queues up in the list */
246 for(;i
< all_queues
.count
;i
++)
248 all_queues
.queues
[i
] = all_queues
.queues
[i
+1];
255 corelock_unlock(&all_queues
.cl
);
257 /* Release threads waiting on queue head */
258 thread_queue_wake(&q
->queue
);
260 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 /* Release waiting threads for reply and reply to any dequeued
262 message waiting for one. */
263 queue_release_all_senders(q
);
270 corelock_unlock(&q
->cl
);
271 set_irq_level(oldlevel
);
274 /* NOTE: multiple threads waiting on a queue head cannot have a well-
275 defined release order if timeouts are used. If multiple threads must
276 access the queue head, use a dispatcher or queue_wait only. */
277 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
282 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
283 corelock_lock(&q
->cl
);
285 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
286 if(q
->send
&& q
->send
->curr_sender
)
289 queue_release_sender(&q
->send
->curr_sender
, 0);
293 if (q
->read
== q
->write
)
297 #if CONFIG_CORELOCK == CORELOCK_NONE
298 #elif CONFIG_CORELOCK == SW_CORELOCK
299 const unsigned int core
= CURRENT_CORE
;
300 cores
[core
].blk_ops
.flags
= TBOP_UNLOCK_CORELOCK
;
301 cores
[core
].blk_ops
.cl_p
= &q
->cl
;
302 #elif CONFIG_CORELOCK == CORELOCK_SWAP
303 const unsigned int core
= CURRENT_CORE
;
304 cores
[core
].blk_ops
.flags
= TBOP_SET_VARu8
;
305 cores
[core
].blk_ops
.var_u8p
= &q
->cl
.locked
;
306 cores
[core
].blk_ops
.var_u8v
= 0;
307 #endif /* CONFIG_CORELOCK */
308 block_thread(&q
->queue
);
309 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
310 corelock_lock(&q
->cl
);
312 /* A message that woke us could now be gone */
313 while (q
->read
== q
->write
);
316 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
319 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
320 if(q
->send
&& q
->send
->senders
[rd
])
322 /* Get data for a waiting thread if one */
323 queue_fetch_sender(q
->send
, rd
);
327 corelock_unlock(&q
->cl
);
328 set_irq_level(oldlevel
);
331 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
335 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
336 corelock_lock(&q
->cl
);
338 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
339 if (q
->send
&& q
->send
->curr_sender
)
342 queue_release_sender(&q
->send
->curr_sender
, 0);
346 if (q
->read
== q
->write
&& ticks
> 0)
348 #if CONFIG_CORELOCK == CORELOCK_NONE
349 #elif CONFIG_CORELOCK == SW_CORELOCK
350 const unsigned int core
= CURRENT_CORE
;
351 cores
[core
].blk_ops
.flags
= TBOP_UNLOCK_CORELOCK
;
352 cores
[core
].blk_ops
.cl_p
= &q
->cl
;
353 #elif CONFIG_CORELOCK == CORELOCK_SWAP
354 const unsigned int core
= CURRENT_CORE
;
355 cores
[core
].blk_ops
.flags
= TBOP_SET_VARu8
;
356 cores
[core
].blk_ops
.var_u8p
= &q
->cl
.locked
;
357 cores
[core
].blk_ops
.var_u8v
= 0;
359 block_thread_w_tmo(&q
->queue
, ticks
);
360 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
361 corelock_lock(&q
->cl
);
364 /* no worry about a removed message here - status is checked inside
365 locks - perhaps verify if timeout or false alarm */
366 if (q
->read
!= q
->write
)
368 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
371 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
372 if(q
->send
&& q
->send
->senders
[rd
])
374 /* Get data for a waiting thread if one */
375 queue_fetch_sender(q
->send
, rd
);
381 ev
->id
= SYS_TIMEOUT
;
384 corelock_unlock(&q
->cl
);
385 set_irq_level(oldlevel
);
388 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
393 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
394 corelock_lock(&q
->cl
);
396 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
398 q
->events
[wr
].id
= id
;
399 q
->events
[wr
].data
= data
;
401 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
404 struct thread_entry
**spp
= &q
->send
->senders
[wr
];
408 /* overflow protect - unblock any thread waiting at this index */
409 queue_release_sender(spp
, 0);
414 /* Wakeup a waiting thread if any */
415 wakeup_thread(&q
->queue
);
417 corelock_unlock(&q
->cl
);
418 set_irq_level(oldlevel
);
421 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
422 /* IRQ handlers are not allowed use of this function - we only aim to
423 protect the queue integrity by turning them off. */
424 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
429 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
430 corelock_lock(&q
->cl
);
432 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
434 q
->events
[wr
].id
= id
;
435 q
->events
[wr
].data
= data
;
439 const unsigned int core
= CURRENT_CORE
;
440 struct thread_entry
**spp
= &q
->send
->senders
[wr
];
444 /* overflow protect - unblock any thread waiting at this index */
445 queue_release_sender(spp
, 0);
448 /* Wakeup a waiting thread if any */
449 wakeup_thread(&q
->queue
);
451 #if CONFIG_CORELOCK == CORELOCK_NONE
452 #elif CONFIG_CORELOCK == SW_CORELOCK
453 cores
[core
].blk_ops
.flags
= TBOP_UNLOCK_CORELOCK
;
454 cores
[core
].blk_ops
.cl_p
= &q
->cl
;
455 #elif CONFIG_CORELOCK == CORELOCK_SWAP
456 cores
[core
].blk_ops
.flags
= TBOP_SET_VARu8
;
457 cores
[core
].blk_ops
.var_u8p
= &q
->cl
.locked
;
458 cores
[core
].blk_ops
.var_u8v
= 0;
460 block_thread_no_listlock(spp
);
461 return cores
[core
].running
->retval
;
464 /* Function as queue_post if sending is not enabled */
465 wakeup_thread(&q
->queue
);
467 corelock_unlock(&q
->cl
);
468 set_irq_level(oldlevel
);
473 #if 0 /* not used now but probably will be later */
474 /* Query if the last message dequeued was added by queue_send or not */
475 bool queue_in_queue_send(struct event_queue
*q
)
480 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
481 corelock_lock(&q
->cl
);
484 in_send
= q
->send
&& q
->send
->curr_sender
;
487 corelock_unlock(&q
->cl
);
488 set_irq_level(oldlevel
);
495 /* Replies with retval to the last dequeued message sent with queue_send */
496 void queue_reply(struct event_queue
*q
, intptr_t retval
)
498 if(q
->send
&& q
->send
->curr_sender
)
501 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
502 corelock_lock(&q
->cl
);
503 /* Double-check locking */
504 if(q
->send
&& q
->send
->curr_sender
)
508 queue_release_sender(&q
->send
->curr_sender
, retval
);
512 corelock_unlock(&q
->cl
);
513 set_irq_level(oldlevel
);
517 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
519 /* Poll queue to see if a message exists - careful in using the result if
520 * queue_remove_from_head is called when messages are posted - possibly use
521 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
522 * unsignals the queue may cause an unwanted block */
523 bool queue_empty(const struct event_queue
* q
)
525 return ( q
->read
== q
->write
);
528 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
530 if (q
->read
== q
->write
)
533 bool have_msg
= false;
535 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
536 corelock_lock(&q
->cl
);
538 if (q
->read
!= q
->write
)
540 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
544 corelock_unlock(&q
->cl
);
545 set_irq_level(oldlevel
);
550 void queue_clear(struct event_queue
* q
)
554 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
555 corelock_lock(&q
->cl
);
557 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
558 /* Release all threads waiting in the queue for a reply -
559 dequeued sent message will be handled by owning thread */
560 queue_release_all_senders(q
);
566 corelock_unlock(&q
->cl
);
567 set_irq_level(oldlevel
);
570 void queue_remove_from_head(struct event_queue
*q
, long id
)
574 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
575 corelock_lock(&q
->cl
);
577 while(q
->read
!= q
->write
)
579 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
581 if(q
->events
[rd
].id
!= id
)
586 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
589 struct thread_entry
**spp
= &q
->send
->senders
[rd
];
593 /* Release any thread waiting on this message */
594 queue_release_sender(spp
, 0);
601 corelock_unlock(&q
->cl
);
602 set_irq_level(oldlevel
);
606 * The number of events waiting in the queue.
608 * @param struct of event_queue
609 * @return number of events in the queue
611 int queue_count(const struct event_queue
*q
)
613 return q
->write
- q
->read
;
616 int queue_broadcast(long id
, intptr_t data
)
621 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
622 corelock_lock(&all_queues
.cl
);
625 for(i
= 0;i
< all_queues
.count
;i
++)
627 queue_post(all_queues
.queues
[i
], id
, data
);
631 corelock_unlock(&all_queues
.cl
);
632 set_irq_level(oldlevel
);
638 /****************************************************************************
640 ****************************************************************************/
641 #if CONFIG_CPU == SH7034
642 void tick_start(unsigned int interval_in_ms
)
646 count
= CPU_FREQ
* interval_in_ms
/ 1000 / 8;
650 panicf("Error! The tick interval is too long (%d ms)\n",
655 /* We are using timer 0 */
657 TSTR
&= ~0x01; /* Stop the timer */
658 TSNC
&= ~0x01; /* No synchronization */
659 TMDR
&= ~0x01; /* Operate normally */
661 TCNT0
= 0; /* Start counting at 0 */
662 GRA0
= (unsigned short)(count
- 1);
663 TCR0
= 0x23; /* Clear at GRA match, sysclock/8 */
665 /* Enable interrupt on level 1 */
666 IPRC
= (IPRC
& ~0x00f0) | 0x0010;
669 TIER0
= 0xf9; /* Enable GRA match interrupt */
671 TSTR
|= 0x01; /* Start timer 1 */
674 void IMIA0(void) __attribute__ ((interrupt_handler
));
679 /* Run through the list of tick tasks */
680 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
692 #elif defined(CPU_COLDFIRE)
693 void tick_start(unsigned int interval_in_ms
)
698 count
= CPU_FREQ
/2 * interval_in_ms
/ 1000 / 16;
702 panicf("Error! The tick interval is too long (%d ms)\n",
707 prescale
= cpu_frequency
/ CPU_FREQ
;
708 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
709 changes within timer.c */
711 /* We are using timer 0 */
713 TRR0
= (unsigned short)(count
- 1); /* The reference count */
714 TCN0
= 0; /* reset the timer */
715 TMR0
= 0x001d | ((unsigned short)(prescale
- 1) << 8);
716 /* restart, CLK/16, enabled, prescaler */
718 TER0
= 0xff; /* Clear all events */
720 ICR1
= 0x8c; /* Interrupt on level 3.0 */
724 void TIMER0(void) __attribute__ ((interrupt_handler
));
729 /* Run through the list of tick tasks */
730 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
740 TER0
= 0xff; /* Clear all events */
743 #elif defined(CPU_PP)
750 /* Run through the list of tick tasks (using main core) */
751 TIMER1_VAL
; /* Read value to ack IRQ */
753 /* Run through the list of tick tasks using main CPU core -
754 wake up the COP through its control interface to provide pulse */
755 for (i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
766 #endif /* NUM_CORES */
772 /* Must be last function called init kernel/thread initialization */
773 void tick_start(unsigned int interval_in_ms
)
779 TIMER1_CFG
= 0xc0000000 | (interval_in_ms
*1000 - 1);
780 /* unmask interrupt source */
781 CPU_INT_EN
= TIMER1_MASK
;
783 /* We don't enable interrupts in the bootloader */
784 (void)interval_in_ms
;
788 #elif CONFIG_CPU == PNX0101
790 void timer_handler(void)
794 /* Run through the list of tick tasks */
795 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
806 void tick_start(unsigned int interval_in_ms
)
808 TIMER0
.ctrl
&= ~0x80; /* Disable the counter */
809 TIMER0
.ctrl
|= 0x40; /* Reload after counting down to zero */
810 TIMER0
.load
= 3000000 * interval_in_ms
/ 1000;
811 TIMER0
.ctrl
&= ~0xc; /* No prescaler */
812 TIMER0
.clr
= 1; /* Clear the interrupt request */
814 irq_set_int_handler(IRQ_TIMER0
, timer_handler
);
815 irq_enable_int(IRQ_TIMER0
);
817 TIMER0
.ctrl
|= 0x80; /* Enable the counter */
821 int tick_add_task(void (*f
)(void))
824 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
826 /* Add a task if there is room */
827 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
829 if(tick_funcs
[i
] == NULL
)
832 set_irq_level(oldlevel
);
836 set_irq_level(oldlevel
);
837 panicf("Error! tick_add_task(): out of tasks");
841 int tick_remove_task(void (*f
)(void))
844 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
846 /* Remove a task if it is there */
847 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
849 if(tick_funcs
[i
] == f
)
851 tick_funcs
[i
] = NULL
;
852 set_irq_level(oldlevel
);
857 set_irq_level(oldlevel
);
861 /****************************************************************************
862 * Tick-based interval timers/one-shots - be mindful this is not really
863 * intended for continuous timers but for events that need to run for a short
864 * time and be cancelled without further software intervention.
865 ****************************************************************************/
866 #ifdef INCLUDE_TIMEOUT_API
867 static struct timeout
*tmo_list
= NULL
; /* list of active timeout events */
869 /* timeout tick task - calls event handlers when they expire
870 * Event handlers may alter ticks, callback and data during operation.
872 static void timeout_tick(void)
874 unsigned long tick
= current_tick
;
875 struct timeout
*curr
, *next
;
877 for (curr
= tmo_list
; curr
!= NULL
; curr
= next
)
879 next
= (struct timeout
*)curr
->next
;
881 if (TIME_BEFORE(tick
, curr
->expires
))
884 /* this event has expired - call callback */
885 if (curr
->callback(curr
))
886 *(long *)&curr
->expires
= tick
+ curr
->ticks
; /* reload */
888 timeout_cancel(curr
); /* cancel */
892 /* Cancels a timeout callback - can be called from the ISR */
893 void timeout_cancel(struct timeout
*tmo
)
895 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
897 if (tmo_list
!= NULL
)
899 struct timeout
*curr
= tmo_list
;
900 struct timeout
*prev
= NULL
;
902 while (curr
!= tmo
&& curr
!= NULL
)
905 curr
= (struct timeout
*)curr
->next
;
912 tmo_list
= (struct timeout
*)curr
->next
;
914 *(const struct timeout
**)&prev
->next
= curr
->next
;
916 if (tmo_list
== NULL
)
917 tick_remove_task(timeout_tick
); /* last one - remove task */
919 /* not in list or tmo == NULL */
922 set_irq_level(oldlevel
);
925 /* Adds a timeout callback - calling with an active timeout resets the
926 interval - can be called from the ISR */
927 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
928 int ticks
, intptr_t data
)
931 struct timeout
*curr
;
936 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
938 /* see if this one is already registered */
940 while (curr
!= tmo
&& curr
!= NULL
)
941 curr
= (struct timeout
*)curr
->next
;
945 /* not found - add it */
946 if (tmo_list
== NULL
)
947 tick_add_task(timeout_tick
); /* first one - add task */
949 *(struct timeout
**)&tmo
->next
= tmo_list
;
953 tmo
->callback
= callback
;
956 *(long *)&tmo
->expires
= current_tick
+ ticks
;
958 set_irq_level(oldlevel
);
961 #endif /* INCLUDE_TIMEOUT_API */
963 /****************************************************************************
964 * Simple mutex functions ;)
965 ****************************************************************************/
966 void mutex_init(struct mutex
*m
)
972 #if CONFIG_CORELOCK == SW_CORELOCK
973 corelock_init(&m
->cl
);
977 void mutex_lock(struct mutex
*m
)
979 const unsigned int core
= CURRENT_CORE
;
980 struct thread_entry
*const thread
= cores
[core
].running
;
982 if(thread
== m
->thread
)
988 /* Repeat some stuff here or else all the variation is too difficult to
990 #if CONFIG_CORELOCK == CORELOCK_SWAP
991 /* peek at lock until it's no longer busy */
993 while ((locked
= xchg8(&m
->locked
, STATE_BUSYu8
)) == STATE_BUSYu8
);
1001 /* Block until the lock is open... */
1002 cores
[core
].blk_ops
.flags
= TBOP_SET_VARu8
;
1003 cores
[core
].blk_ops
.var_u8p
= &m
->locked
;
1004 cores
[core
].blk_ops
.var_u8v
= 1;
1006 corelock_lock(&m
->cl
);
1011 corelock_unlock(&m
->cl
);
1015 /* Block until the lock is open... */
1016 #if CONFIG_CORELOCK == SW_CORELOCK
1017 cores
[core
].blk_ops
.flags
= TBOP_UNLOCK_CORELOCK
;
1018 cores
[core
].blk_ops
.cl_p
= &m
->cl
;
1020 #endif /* CONFIG_CORELOCK */
1022 block_thread_no_listlock(&m
->queue
);
1025 void mutex_unlock(struct mutex
*m
)
1027 /* unlocker not being the owner is an unlocking violation */
1028 KERNEL_ASSERT(m
->thread
== cores
[CURRENT_CORE
].running
,
1029 "mutex_unlock->wrong thread (recurse)");
1033 /* this thread still owns lock */
1038 #if CONFIG_CORELOCK == SW_CORELOCK
1039 /* lock out other cores */
1040 corelock_lock(&m
->cl
);
1041 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1042 /* wait for peeker to move on */
1043 while (xchg8(&m
->locked
, STATE_BUSYu8
) == STATE_BUSYu8
);
1046 /* transfer to next queued thread if any */
1048 /* This can become busy using SWP but is safe since only one thread
1049 will be changing things at a time. Allowing timeout waits will
1050 change that however but not now. There is also a hazard the thread
1051 could be killed before performing the wakeup but that's just
1052 irresponsible. :-) */
1053 m
->thread
= m
->queue
;
1055 if(m
->thread
== NULL
)
1057 m
->locked
= 0; /* release lock */
1058 #if CONFIG_CORELOCK == SW_CORELOCK
1059 corelock_unlock(&m
->cl
);
1062 else /* another thread is waiting - remain locked */
1064 wakeup_thread_no_listlock(&m
->queue
);
1065 #if CONFIG_CORELOCK == SW_CORELOCK
1066 corelock_unlock(&m
->cl
);
1067 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1073 /****************************************************************************
1074 * Simpl-er mutex functions ;)
1075 ****************************************************************************/
1077 void spinlock_init(struct spinlock
*l
)
1079 corelock_init(&l
->cl
);
1084 void spinlock_lock(struct spinlock
*l
)
1086 struct thread_entry
*const thread
= cores
[CURRENT_CORE
].running
;
1088 if (l
->thread
== thread
)
1094 corelock_lock(&l
->cl
);
1099 void spinlock_unlock(struct spinlock
*l
)
1101 /* unlocker not being the owner is an unlocking violation */
1102 KERNEL_ASSERT(l
->thread
== cores
[CURRENT_CORE
].running
,
1103 "spinlock_unlock->wrong thread");
1107 /* this thread still owns lock */
1116 corelock_unlock(&l
->cl
);
1118 #endif /* NUM_CORES > 1 */
1120 /****************************************************************************
1121 * Simple semaphore functions ;)
1122 ****************************************************************************/
1123 #ifdef HAVE_SEMAPHORE_OBJECTS
1124 void semaphore_init(struct semaphore
*s
, int max
, int start
)
1126 KERNEL_ASSERT(max
> 0 && start
>= 0 && start
<= max
,
1127 "semaphore_init->inv arg");
1131 #if CONFIG_CORELOCK == SW_CORELOCK
1132 corelock_init(&s
->cl
);
1136 void semaphore_wait(struct semaphore
*s
)
1138 #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1139 corelock_lock(&s
->cl
);
1142 corelock_unlock(&s
->cl
);
1145 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1147 while ((count
= xchg32(&s
->count
, STATE_BUSYi
)) == STATE_BUSYi
);
1155 /* too many waits - block until dequeued */
1156 #if CONFIG_CORELOCK == SW_CORELOCK
1157 const unsigned int core
= CURRENT_CORE
;
1158 cores
[core
].blk_ops
.flags
= TBOP_UNLOCK_CORELOCK
;
1159 cores
[core
].blk_ops
.cl_p
= &s
->cl
;
1160 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1161 const unsigned int core
= CURRENT_CORE
;
1162 cores
[core
].blk_ops
.flags
= TBOP_SET_VARi
;
1163 cores
[core
].blk_ops
.var_ip
= &s
->count
;
1164 cores
[core
].blk_ops
.var_iv
= count
;
1166 block_thread_no_listlock(&s
->queue
);
1169 void semaphore_release(struct semaphore
*s
)
1171 #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1172 corelock_lock(&s
->cl
);
1173 if (s
->count
< s
->max
)
1175 if (++s
->count
<= 0)
1177 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1179 while ((count
= xchg32(&s
->count
, STATE_BUSYi
)) == STATE_BUSYi
);
1184 #endif /* CONFIG_CORELOCK */
1186 /* there should be threads in this queue */
1187 KERNEL_ASSERT(s
->queue
!= NULL
, "semaphore->wakeup");
1188 /* a thread was queued - wake it up */
1189 wakeup_thread_no_listlock(&s
->queue
);
1193 #if CONFIG_CORELOCK == SW_CORELOCK
1194 corelock_unlock(&s
->cl
);
1195 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1199 #endif /* HAVE_SEMAPHORE_OBJECTS */
1201 /****************************************************************************
1202 * Simple event functions ;)
1203 ****************************************************************************/
1204 #ifdef HAVE_EVENT_OBJECTS
1205 void event_init(struct event
*e
, unsigned int flags
)
1207 e
->queues
[STATE_NONSIGNALED
] = NULL
;
1208 e
->queues
[STATE_SIGNALED
] = NULL
;
1209 e
->state
= flags
& STATE_SIGNALED
;
1210 e
->automatic
= (flags
& EVENT_AUTOMATIC
) ? 1 : 0;
1211 #if CONFIG_CORELOCK == SW_CORELOCK
1212 corelock_init(&e
->cl
);
1216 void event_wait(struct event
*e
, unsigned int for_state
)
1218 unsigned int last_state
;
1219 #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1220 corelock_lock(&e
->cl
);
1221 last_state
= e
->state
;
1222 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1223 while ((last_state
= xchg8(&e
->state
, STATE_BUSYu8
)) == STATE_BUSYu8
);
1226 if(e
->automatic
!= 0)
1228 /* wait for false always satisfied by definition
1229 or if it just changed to false */
1230 if(last_state
== STATE_SIGNALED
|| for_state
== STATE_NONSIGNALED
)
1232 /* automatic - unsignal */
1233 e
->state
= STATE_NONSIGNALED
;
1234 #if CONFIG_CORELOCK == SW_CORELOCK
1235 corelock_unlock(&e
->cl
);
1239 /* block until state matches */
1241 else if(for_state
== last_state
)
1243 /* the state being waited for is the current state */
1244 #if CONFIG_CORELOCK == SW_CORELOCK
1245 corelock_unlock(&e
->cl
);
1246 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1247 e
->state
= last_state
;
1253 /* current state does not match wait-for state */
1254 #if CONFIG_CORELOCK == SW_CORELOCK
1255 const unsigned int core
= CURRENT_CORE
;
1256 cores
[core
].blk_ops
.flags
= TBOP_UNLOCK_CORELOCK
;
1257 cores
[core
].blk_ops
.cl_p
= &e
->cl
;
1258 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1259 const unsigned int core
= CURRENT_CORE
;
1260 cores
[core
].blk_ops
.flags
= TBOP_SET_VARu8
;
1261 cores
[core
].blk_ops
.var_u8p
= &e
->state
;
1262 cores
[core
].blk_ops
.var_u8v
= last_state
;
1264 block_thread_no_listlock(&e
->queues
[for_state
]);
1268 void event_set_state(struct event
*e
, unsigned int state
)
1270 unsigned int last_state
;
1271 #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1272 corelock_lock(&e
->cl
);
1273 last_state
= e
->state
;
1274 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1275 while ((last_state
= xchg8(&e
->state
, STATE_BUSYu8
)) == STATE_BUSYu8
);
1278 if(last_state
== state
)
1281 #if CONFIG_CORELOCK == SW_CORELOCK
1282 corelock_unlock(&e
->cl
);
1283 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1284 e
->state
= last_state
;
1289 if(state
== STATE_SIGNALED
)
1291 if(e
->automatic
!= 0)
1293 struct thread_entry
*thread
;
1294 /* no thread should have ever blocked for unsignaled */
1295 KERNEL_ASSERT(e
->queues
[STATE_NONSIGNALED
] == NULL
,
1296 "set_event_state->queue[NS]:S");
1297 /* pass to next thread and keep unsignaled - "pulse" */
1298 thread
= wakeup_thread_no_listlock(&e
->queues
[STATE_SIGNALED
]);
1299 e
->state
= thread
!= NULL
? STATE_NONSIGNALED
: STATE_SIGNALED
;
1303 /* release all threads waiting for signaled */
1304 thread_queue_wake_no_listlock(&e
->queues
[STATE_SIGNALED
]);
1305 e
->state
= STATE_SIGNALED
;
1310 /* release all threads waiting for unsignaled */
1312 /* no thread should have ever blocked if automatic */
1313 KERNEL_ASSERT(e
->queues
[STATE_NONSIGNALED
] == NULL
||
1314 e
->automatic
== 0, "set_event_state->queue[NS]:NS");
1316 thread_queue_wake_no_listlock(&e
->queues
[STATE_NONSIGNALED
]);
1317 e
->state
= STATE_NONSIGNALED
;
1320 #if CONFIG_CORELOCK == SW_CORELOCK
1321 corelock_unlock(&e
->cl
);
1324 #endif /* HAVE_EVENT_OBJECTS */