add global proxy / cache settings to httpget class. This removes the need of passing...
[Rockbox.git] / firmware / kernel.c
blob835181f1ae1ab538dc1f24248b0cb3edb7f3298e
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include <stdlib.h>
20 #include <string.h>
21 #include "config.h"
22 #include "kernel.h"
23 #include "thread.h"
24 #include "cpu.h"
25 #include "system.h"
26 #include "panic.h"
28 /* Make this nonzero to enable more elaborate checks on objects */
29 #ifdef DEBUG
30 #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */
31 #else
32 #define KERNEL_OBJECT_CHECKS 0
33 #endif
35 #if KERNEL_OBJECT_CHECKS
36 #define KERNEL_ASSERT(exp, msg...) \
37 ({ if (!({ exp; })) panicf(msg); })
38 #else
39 #define KERNEL_ASSERT(exp, msg...) ({})
40 #endif
42 #if !defined(CPU_PP) || !defined(BOOTLOADER)
43 volatile long current_tick NOCACHEDATA_ATTR = 0;
44 #endif
46 void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
48 extern struct core_entry cores[NUM_CORES];
50 /* This array holds all queues that are initiated. It is used for broadcast. */
51 static struct
53 int count;
54 struct event_queue *queues[MAX_NUM_QUEUES];
55 #if NUM_CORES > 1
56 struct corelock cl;
57 #endif
58 } all_queues NOCACHEBSS_ATTR;
60 /****************************************************************************
61 * Standard kernel stuff
62 ****************************************************************************/
63 void kernel_init(void)
65 /* Init the threading API */
66 init_threads();
68 /* Other processors will not reach this point in a multicore build.
69 * In a single-core build with multiple cores they fall-through and
70 * sleep in cop_main without returning. */
71 if (CURRENT_CORE == CPU)
73 memset(tick_funcs, 0, sizeof(tick_funcs));
74 memset(&all_queues, 0, sizeof(all_queues));
75 corelock_init(&all_queues.cl);
76 tick_start(1000/HZ);
80 void sleep(int ticks)
82 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
83 volatile int counter;
84 TCON &= ~(1 << 20); // stop timer 4
85 // TODO: this constant depends on dividers settings inherited from
86 // firmware. Set them explicitly somwhere.
87 TCNTB4 = 12193 * ticks / HZ;
88 TCON |= 1 << 21; // set manual bit
89 TCON &= ~(1 << 21); // reset manual bit
90 TCON &= ~(1 << 22); //autoreload Off
91 TCON |= (1 << 20); // start timer 4
92 do {
93 counter = TCNTO4;
94 } while(counter > 0);
96 #elif defined(CPU_PP) && defined(BOOTLOADER)
97 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
98 while (TIME_BEFORE(USEC_TIMER, stop))
99 switch_thread(NULL);
100 #else
101 sleep_thread(ticks);
102 #endif
105 void yield(void)
107 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
108 /* Some targets don't like yielding in the bootloader */
109 #else
110 switch_thread(NULL);
111 #endif
114 /****************************************************************************
115 * Queue handling stuff
116 ****************************************************************************/
118 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
119 /* Moves waiting thread's descriptor to the current sender when a
120 message is dequeued */
121 static void queue_fetch_sender(struct queue_sender_list *send,
122 unsigned int i)
124 struct thread_entry **spp = &send->senders[i];
126 if(*spp)
128 send->curr_sender = *spp;
129 *spp = NULL;
133 /* Puts the specified return value in the waiting thread's return value
134 * and wakes the thread.
135 * 1) A sender should be confirmed to exist before calling which makes it
136 * more efficent to reject the majority of cases that don't need this
137 called.
138 * 2) Requires interrupts disabled since queue overflows can cause posts
139 * from interrupt handlers to wake threads. Not doing so could cause
140 * an attempt at multiple wakes or other problems.
142 static void queue_release_sender(struct thread_entry **sender,
143 intptr_t retval)
145 (*sender)->retval = retval;
146 wakeup_thread_no_listlock(sender);
147 /* This should _never_ happen - there must never be multiple
148 threads in this list and it is a corrupt state */
149 KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender);
152 /* Releases any waiting threads that are queued with queue_send -
153 * reply with 0.
154 * Disable IRQs and lock before calling since it uses
155 * queue_release_sender.
157 static void queue_release_all_senders(struct event_queue *q)
159 if(q->send)
161 unsigned int i;
162 for(i = q->read; i != q->write; i++)
164 struct thread_entry **spp =
165 &q->send->senders[i & QUEUE_LENGTH_MASK];
167 if(*spp)
169 queue_release_sender(spp, 0);
175 /* Enables queue_send on the specified queue - caller allocates the extra
176 data structure. Only queues which are taken to be owned by a thread should
177 enable this. Public waiting is not permitted. */
178 void queue_enable_queue_send(struct event_queue *q,
179 struct queue_sender_list *send)
181 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
182 corelock_lock(&q->cl);
184 q->send = NULL;
185 if(send != NULL)
187 memset(send, 0, sizeof(*send));
188 q->send = send;
191 corelock_unlock(&q->cl);
192 set_irq_level(oldlevel);
194 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
196 /* Queue must not be available for use during this call */
197 void queue_init(struct event_queue *q, bool register_queue)
199 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
201 if(register_queue)
203 corelock_lock(&all_queues.cl);
206 corelock_init(&q->cl);
207 thread_queue_init(&q->queue);
208 q->read = 0;
209 q->write = 0;
210 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
211 q->send = NULL; /* No message sending by default */
212 #endif
214 if(register_queue)
216 if(all_queues.count >= MAX_NUM_QUEUES)
218 panicf("queue_init->out of queues");
220 /* Add it to the all_queues array */
221 all_queues.queues[all_queues.count++] = q;
222 corelock_unlock(&all_queues.cl);
225 set_irq_level(oldlevel);
228 /* Queue must not be available for use during this call */
229 void queue_delete(struct event_queue *q)
231 int oldlevel;
232 int i;
234 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
235 corelock_lock(&all_queues.cl);
236 corelock_lock(&q->cl);
238 /* Find the queue to be deleted */
239 for(i = 0;i < all_queues.count;i++)
241 if(all_queues.queues[i] == q)
243 /* Move the following queues up in the list */
244 all_queues.count--;
246 for(;i < all_queues.count;i++)
248 all_queues.queues[i] = all_queues.queues[i+1];
251 break;
255 corelock_unlock(&all_queues.cl);
257 /* Release threads waiting on queue head */
258 thread_queue_wake(&q->queue);
260 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 /* Release waiting threads for reply and reply to any dequeued
262 message waiting for one. */
263 queue_release_all_senders(q);
264 queue_reply(q, 0);
265 #endif
267 q->read = 0;
268 q->write = 0;
270 corelock_unlock(&q->cl);
271 set_irq_level(oldlevel);
274 /* NOTE: multiple threads waiting on a queue head cannot have a well-
275 defined release order if timeouts are used. If multiple threads must
276 access the queue head, use a dispatcher or queue_wait only. */
277 void queue_wait(struct event_queue *q, struct queue_event *ev)
279 int oldlevel;
280 unsigned int rd;
282 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
283 corelock_lock(&q->cl);
285 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
286 if(q->send && q->send->curr_sender)
288 /* auto-reply */
289 queue_release_sender(&q->send->curr_sender, 0);
291 #endif
293 if (q->read == q->write)
297 #if CONFIG_CORELOCK == CORELOCK_NONE
298 #elif CONFIG_CORELOCK == SW_CORELOCK
299 const unsigned int core = CURRENT_CORE;
300 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
301 cores[core].blk_ops.cl_p = &q->cl;
302 #elif CONFIG_CORELOCK == CORELOCK_SWAP
303 const unsigned int core = CURRENT_CORE;
304 cores[core].blk_ops.flags = TBOP_SET_VARu8;
305 cores[core].blk_ops.var_u8p = &q->cl.locked;
306 cores[core].blk_ops.var_u8v = 0;
307 #endif /* CONFIG_CORELOCK */
308 block_thread(&q->queue);
309 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
310 corelock_lock(&q->cl);
312 /* A message that woke us could now be gone */
313 while (q->read == q->write);
316 rd = q->read++ & QUEUE_LENGTH_MASK;
317 *ev = q->events[rd];
319 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
320 if(q->send && q->send->senders[rd])
322 /* Get data for a waiting thread if one */
323 queue_fetch_sender(q->send, rd);
325 #endif
327 corelock_unlock(&q->cl);
328 set_irq_level(oldlevel);
331 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
333 int oldlevel;
335 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
336 corelock_lock(&q->cl);
338 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
339 if (q->send && q->send->curr_sender)
341 /* auto-reply */
342 queue_release_sender(&q->send->curr_sender, 0);
344 #endif
346 if (q->read == q->write && ticks > 0)
348 #if CONFIG_CORELOCK == CORELOCK_NONE
349 #elif CONFIG_CORELOCK == SW_CORELOCK
350 const unsigned int core = CURRENT_CORE;
351 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
352 cores[core].blk_ops.cl_p = &q->cl;
353 #elif CONFIG_CORELOCK == CORELOCK_SWAP
354 const unsigned int core = CURRENT_CORE;
355 cores[core].blk_ops.flags = TBOP_SET_VARu8;
356 cores[core].blk_ops.var_u8p = &q->cl.locked;
357 cores[core].blk_ops.var_u8v = 0;
358 #endif
359 block_thread_w_tmo(&q->queue, ticks);
360 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
361 corelock_lock(&q->cl);
364 /* no worry about a removed message here - status is checked inside
365 locks - perhaps verify if timeout or false alarm */
366 if (q->read != q->write)
368 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
369 *ev = q->events[rd];
371 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
372 if(q->send && q->send->senders[rd])
374 /* Get data for a waiting thread if one */
375 queue_fetch_sender(q->send, rd);
377 #endif
379 else
381 ev->id = SYS_TIMEOUT;
384 corelock_unlock(&q->cl);
385 set_irq_level(oldlevel);
388 void queue_post(struct event_queue *q, long id, intptr_t data)
390 int oldlevel;
391 unsigned int wr;
393 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
394 corelock_lock(&q->cl);
396 wr = q->write++ & QUEUE_LENGTH_MASK;
398 q->events[wr].id = id;
399 q->events[wr].data = data;
401 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
402 if(q->send)
404 struct thread_entry **spp = &q->send->senders[wr];
406 if (*spp)
408 /* overflow protect - unblock any thread waiting at this index */
409 queue_release_sender(spp, 0);
412 #endif
414 /* Wakeup a waiting thread if any */
415 wakeup_thread(&q->queue);
417 corelock_unlock(&q->cl);
418 set_irq_level(oldlevel);
421 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
422 /* IRQ handlers are not allowed use of this function - we only aim to
423 protect the queue integrity by turning them off. */
424 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
426 int oldlevel;
427 unsigned int wr;
429 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
430 corelock_lock(&q->cl);
432 wr = q->write++ & QUEUE_LENGTH_MASK;
434 q->events[wr].id = id;
435 q->events[wr].data = data;
437 if(q->send)
439 const unsigned int core = CURRENT_CORE;
440 struct thread_entry **spp = &q->send->senders[wr];
442 if(*spp)
444 /* overflow protect - unblock any thread waiting at this index */
445 queue_release_sender(spp, 0);
448 /* Wakeup a waiting thread if any */
449 wakeup_thread(&q->queue);
451 #if CONFIG_CORELOCK == CORELOCK_NONE
452 #elif CONFIG_CORELOCK == SW_CORELOCK
453 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
454 cores[core].blk_ops.cl_p = &q->cl;
455 #elif CONFIG_CORELOCK == CORELOCK_SWAP
456 cores[core].blk_ops.flags = TBOP_SET_VARu8;
457 cores[core].blk_ops.var_u8p = &q->cl.locked;
458 cores[core].blk_ops.var_u8v = 0;
459 #endif
460 block_thread_no_listlock(spp);
461 return cores[core].running->retval;
464 /* Function as queue_post if sending is not enabled */
465 wakeup_thread(&q->queue);
467 corelock_unlock(&q->cl);
468 set_irq_level(oldlevel);
470 return 0;
473 #if 0 /* not used now but probably will be later */
474 /* Query if the last message dequeued was added by queue_send or not */
475 bool queue_in_queue_send(struct event_queue *q)
477 bool in_send;
479 #if NUM_CORES > 1
480 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
481 corelock_lock(&q->cl);
482 #endif
484 in_send = q->send && q->send->curr_sender;
486 #if NUM_CORES > 1
487 corelock_unlock(&q->cl);
488 set_irq_level(oldlevel);
489 #endif
491 return in_send;
493 #endif
495 /* Replies with retval to the last dequeued message sent with queue_send */
496 void queue_reply(struct event_queue *q, intptr_t retval)
498 if(q->send && q->send->curr_sender)
500 #if NUM_CORES > 1
501 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
502 corelock_lock(&q->cl);
503 /* Double-check locking */
504 if(q->send && q->send->curr_sender)
506 #endif
508 queue_release_sender(&q->send->curr_sender, retval);
510 #if NUM_CORES > 1
512 corelock_unlock(&q->cl);
513 set_irq_level(oldlevel);
514 #endif
517 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
519 /* Poll queue to see if a message exists - careful in using the result if
520 * queue_remove_from_head is called when messages are posted - possibly use
521 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
522 * unsignals the queue may cause an unwanted block */
523 bool queue_empty(const struct event_queue* q)
525 return ( q->read == q->write );
528 bool queue_peek(struct event_queue *q, struct queue_event *ev)
530 if (q->read == q->write)
531 return false;
533 bool have_msg = false;
535 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
536 corelock_lock(&q->cl);
538 if (q->read != q->write)
540 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
541 have_msg = true;
544 corelock_unlock(&q->cl);
545 set_irq_level(oldlevel);
547 return have_msg;
550 void queue_clear(struct event_queue* q)
552 int oldlevel;
554 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
555 corelock_lock(&q->cl);
557 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
558 /* Release all threads waiting in the queue for a reply -
559 dequeued sent message will be handled by owning thread */
560 queue_release_all_senders(q);
561 #endif
563 q->read = 0;
564 q->write = 0;
566 corelock_unlock(&q->cl);
567 set_irq_level(oldlevel);
570 void queue_remove_from_head(struct event_queue *q, long id)
572 int oldlevel;
574 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
575 corelock_lock(&q->cl);
577 while(q->read != q->write)
579 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
581 if(q->events[rd].id != id)
583 break;
586 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
587 if(q->send)
589 struct thread_entry **spp = &q->send->senders[rd];
591 if (*spp)
593 /* Release any thread waiting on this message */
594 queue_release_sender(spp, 0);
597 #endif
598 q->read++;
601 corelock_unlock(&q->cl);
602 set_irq_level(oldlevel);
606 * The number of events waiting in the queue.
608 * @param struct of event_queue
609 * @return number of events in the queue
611 int queue_count(const struct event_queue *q)
613 return q->write - q->read;
616 int queue_broadcast(long id, intptr_t data)
618 int i;
620 #if NUM_CORES > 1
621 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
622 corelock_lock(&all_queues.cl);
623 #endif
625 for(i = 0;i < all_queues.count;i++)
627 queue_post(all_queues.queues[i], id, data);
630 #if NUM_CORES > 1
631 corelock_unlock(&all_queues.cl);
632 set_irq_level(oldlevel);
633 #endif
635 return i;
638 /****************************************************************************
639 * Timer tick
640 ****************************************************************************/
641 #if CONFIG_CPU == SH7034
642 void tick_start(unsigned int interval_in_ms)
644 unsigned long count;
646 count = CPU_FREQ * interval_in_ms / 1000 / 8;
648 if(count > 0x10000)
650 panicf("Error! The tick interval is too long (%d ms)\n",
651 interval_in_ms);
652 return;
655 /* We are using timer 0 */
657 TSTR &= ~0x01; /* Stop the timer */
658 TSNC &= ~0x01; /* No synchronization */
659 TMDR &= ~0x01; /* Operate normally */
661 TCNT0 = 0; /* Start counting at 0 */
662 GRA0 = (unsigned short)(count - 1);
663 TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
665 /* Enable interrupt on level 1 */
666 IPRC = (IPRC & ~0x00f0) | 0x0010;
668 TSR0 &= ~0x01;
669 TIER0 = 0xf9; /* Enable GRA match interrupt */
671 TSTR |= 0x01; /* Start timer 1 */
674 void IMIA0(void) __attribute__ ((interrupt_handler));
675 void IMIA0(void)
677 int i;
679 /* Run through the list of tick tasks */
680 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
682 if(tick_funcs[i])
684 tick_funcs[i]();
688 current_tick++;
690 TSR0 &= ~0x01;
692 #elif defined(CPU_COLDFIRE)
693 void tick_start(unsigned int interval_in_ms)
695 unsigned long count;
696 int prescale;
698 count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
700 if(count > 0x10000)
702 panicf("Error! The tick interval is too long (%d ms)\n",
703 interval_in_ms);
704 return;
707 prescale = cpu_frequency / CPU_FREQ;
708 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
709 changes within timer.c */
711 /* We are using timer 0 */
713 TRR0 = (unsigned short)(count - 1); /* The reference count */
714 TCN0 = 0; /* reset the timer */
715 TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
716 /* restart, CLK/16, enabled, prescaler */
718 TER0 = 0xff; /* Clear all events */
720 ICR1 = 0x8c; /* Interrupt on level 3.0 */
721 IMR &= ~0x200;
724 void TIMER0(void) __attribute__ ((interrupt_handler));
725 void TIMER0(void)
727 int i;
729 /* Run through the list of tick tasks */
730 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
732 if(tick_funcs[i])
734 tick_funcs[i]();
738 current_tick++;
740 TER0 = 0xff; /* Clear all events */
743 #elif defined(CPU_PP)
745 #ifndef BOOTLOADER
746 void TIMER1(void)
748 int i;
750 /* Run through the list of tick tasks (using main core) */
751 TIMER1_VAL; /* Read value to ack IRQ */
753 /* Run through the list of tick tasks using main CPU core -
754 wake up the COP through its control interface to provide pulse */
755 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
757 if (tick_funcs[i])
759 tick_funcs[i]();
763 #if NUM_CORES > 1
764 /* Pulse the COP */
765 core_wake(COP);
766 #endif /* NUM_CORES */
768 current_tick++;
770 #endif
772 /* Must be last function called init kernel/thread initialization */
773 void tick_start(unsigned int interval_in_ms)
775 #ifndef BOOTLOADER
776 TIMER1_CFG = 0x0;
777 TIMER1_VAL;
778 /* enable timer */
779 TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
780 /* unmask interrupt source */
781 CPU_INT_EN = TIMER1_MASK;
782 #else
783 /* We don't enable interrupts in the bootloader */
784 (void)interval_in_ms;
785 #endif
788 #elif CONFIG_CPU == PNX0101
790 void timer_handler(void)
792 int i;
794 /* Run through the list of tick tasks */
795 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
797 if(tick_funcs[i])
798 tick_funcs[i]();
801 current_tick++;
803 TIMER0.clr = 0;
806 void tick_start(unsigned int interval_in_ms)
808 TIMER0.ctrl &= ~0x80; /* Disable the counter */
809 TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
810 TIMER0.load = 3000000 * interval_in_ms / 1000;
811 TIMER0.ctrl &= ~0xc; /* No prescaler */
812 TIMER0.clr = 1; /* Clear the interrupt request */
814 irq_set_int_handler(IRQ_TIMER0, timer_handler);
815 irq_enable_int(IRQ_TIMER0);
817 TIMER0.ctrl |= 0x80; /* Enable the counter */
819 #endif
821 int tick_add_task(void (*f)(void))
823 int i;
824 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
826 /* Add a task if there is room */
827 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
829 if(tick_funcs[i] == NULL)
831 tick_funcs[i] = f;
832 set_irq_level(oldlevel);
833 return 0;
836 set_irq_level(oldlevel);
837 panicf("Error! tick_add_task(): out of tasks");
838 return -1;
841 int tick_remove_task(void (*f)(void))
843 int i;
844 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
846 /* Remove a task if it is there */
847 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
849 if(tick_funcs[i] == f)
851 tick_funcs[i] = NULL;
852 set_irq_level(oldlevel);
853 return 0;
857 set_irq_level(oldlevel);
858 return -1;
861 /****************************************************************************
862 * Tick-based interval timers/one-shots - be mindful this is not really
863 * intended for continuous timers but for events that need to run for a short
864 * time and be cancelled without further software intervention.
865 ****************************************************************************/
866 #ifdef INCLUDE_TIMEOUT_API
867 static struct timeout *tmo_list = NULL; /* list of active timeout events */
869 /* timeout tick task - calls event handlers when they expire
870 * Event handlers may alter ticks, callback and data during operation.
872 static void timeout_tick(void)
874 unsigned long tick = current_tick;
875 struct timeout *curr, *next;
877 for (curr = tmo_list; curr != NULL; curr = next)
879 next = (struct timeout *)curr->next;
881 if (TIME_BEFORE(tick, curr->expires))
882 continue;
884 /* this event has expired - call callback */
885 if (curr->callback(curr))
886 *(long *)&curr->expires = tick + curr->ticks; /* reload */
887 else
888 timeout_cancel(curr); /* cancel */
892 /* Cancels a timeout callback - can be called from the ISR */
893 void timeout_cancel(struct timeout *tmo)
895 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
897 if (tmo_list != NULL)
899 struct timeout *curr = tmo_list;
900 struct timeout *prev = NULL;
902 while (curr != tmo && curr != NULL)
904 prev = curr;
905 curr = (struct timeout *)curr->next;
908 if (curr != NULL)
910 /* in list */
911 if (prev == NULL)
912 tmo_list = (struct timeout *)curr->next;
913 else
914 *(const struct timeout **)&prev->next = curr->next;
916 if (tmo_list == NULL)
917 tick_remove_task(timeout_tick); /* last one - remove task */
919 /* not in list or tmo == NULL */
922 set_irq_level(oldlevel);
925 /* Adds a timeout callback - calling with an active timeout resets the
926 interval - can be called from the ISR */
927 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
928 int ticks, intptr_t data)
930 int oldlevel;
931 struct timeout *curr;
933 if (tmo == NULL)
934 return;
936 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
938 /* see if this one is already registered */
939 curr = tmo_list;
940 while (curr != tmo && curr != NULL)
941 curr = (struct timeout *)curr->next;
943 if (curr == NULL)
945 /* not found - add it */
946 if (tmo_list == NULL)
947 tick_add_task(timeout_tick); /* first one - add task */
949 *(struct timeout **)&tmo->next = tmo_list;
950 tmo_list = tmo;
953 tmo->callback = callback;
954 tmo->ticks = ticks;
955 tmo->data = data;
956 *(long *)&tmo->expires = current_tick + ticks;
958 set_irq_level(oldlevel);
961 #endif /* INCLUDE_TIMEOUT_API */
963 /****************************************************************************
964 * Simple mutex functions ;)
965 ****************************************************************************/
966 void mutex_init(struct mutex *m)
968 m->queue = NULL;
969 m->thread = NULL;
970 m->count = 0;
971 m->locked = 0;
972 #if CONFIG_CORELOCK == SW_CORELOCK
973 corelock_init(&m->cl);
974 #endif
977 void mutex_lock(struct mutex *m)
979 const unsigned int core = CURRENT_CORE;
980 struct thread_entry *const thread = cores[core].running;
982 if(thread == m->thread)
984 m->count++;
985 return;
988 /* Repeat some stuff here or else all the variation is too difficult to
989 read */
990 #if CONFIG_CORELOCK == CORELOCK_SWAP
991 /* peek at lock until it's no longer busy */
992 unsigned int locked;
993 while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8);
994 if(locked == 0)
996 m->thread = thread;
997 m->locked = 1;
998 return;
1001 /* Block until the lock is open... */
1002 cores[core].blk_ops.flags = TBOP_SET_VARu8;
1003 cores[core].blk_ops.var_u8p = &m->locked;
1004 cores[core].blk_ops.var_u8v = 1;
1005 #else
1006 corelock_lock(&m->cl);
1007 if (m->locked == 0)
1009 m->locked = 1;
1010 m->thread = thread;
1011 corelock_unlock(&m->cl);
1012 return;
1015 /* Block until the lock is open... */
1016 #if CONFIG_CORELOCK == SW_CORELOCK
1017 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
1018 cores[core].blk_ops.cl_p = &m->cl;
1019 #endif
1020 #endif /* CONFIG_CORELOCK */
1022 block_thread_no_listlock(&m->queue);
1025 void mutex_unlock(struct mutex *m)
1027 /* unlocker not being the owner is an unlocking violation */
1028 KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running,
1029 "mutex_unlock->wrong thread (recurse)");
1031 if(m->count > 0)
1033 /* this thread still owns lock */
1034 m->count--;
1035 return;
1038 #if CONFIG_CORELOCK == SW_CORELOCK
1039 /* lock out other cores */
1040 corelock_lock(&m->cl);
1041 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1042 /* wait for peeker to move on */
1043 while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8);
1044 #endif
1046 /* transfer to next queued thread if any */
1048 /* This can become busy using SWP but is safe since only one thread
1049 will be changing things at a time. Allowing timeout waits will
1050 change that however but not now. There is also a hazard the thread
1051 could be killed before performing the wakeup but that's just
1052 irresponsible. :-) */
1053 m->thread = m->queue;
1055 if(m->thread == NULL)
1057 m->locked = 0; /* release lock */
1058 #if CONFIG_CORELOCK == SW_CORELOCK
1059 corelock_unlock(&m->cl);
1060 #endif
1062 else /* another thread is waiting - remain locked */
1064 wakeup_thread_no_listlock(&m->queue);
1065 #if CONFIG_CORELOCK == SW_CORELOCK
1066 corelock_unlock(&m->cl);
1067 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1068 m->locked = 1;
1069 #endif
1073 /****************************************************************************
1074 * Simpl-er mutex functions ;)
1075 ****************************************************************************/
1076 #if NUM_CORES > 1
1077 void spinlock_init(struct spinlock *l)
1079 corelock_init(&l->cl);
1080 l->thread = NULL;
1081 l->count = 0;
1084 void spinlock_lock(struct spinlock *l)
1086 struct thread_entry *const thread = cores[CURRENT_CORE].running;
1088 if (l->thread == thread)
1090 l->count++;
1091 return;
1094 corelock_lock(&l->cl);
1096 l->thread = thread;
1099 void spinlock_unlock(struct spinlock *l)
1101 /* unlocker not being the owner is an unlocking violation */
1102 KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
1103 "spinlock_unlock->wrong thread");
1105 if (l->count > 0)
1107 /* this thread still owns lock */
1108 l->count--;
1109 return;
1112 /* clear owner */
1113 l->thread = NULL;
1115 /* release lock */
1116 corelock_unlock(&l->cl);
1118 #endif /* NUM_CORES > 1 */
1120 /****************************************************************************
1121 * Simple semaphore functions ;)
1122 ****************************************************************************/
1123 #ifdef HAVE_SEMAPHORE_OBJECTS
1124 void semaphore_init(struct semaphore *s, int max, int start)
1126 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1127 "semaphore_init->inv arg");
1128 s->queue = NULL;
1129 s->max = max;
1130 s->count = start;
1131 #if CONFIG_CORELOCK == SW_CORELOCK
1132 corelock_init(&s->cl);
1133 #endif
1136 void semaphore_wait(struct semaphore *s)
1138 #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1139 corelock_lock(&s->cl);
1140 if(--s->count >= 0)
1142 corelock_unlock(&s->cl);
1143 return;
1145 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1146 int count;
1147 while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
1148 if(--count >= 0)
1150 s->count = count;
1151 return;
1153 #endif
1155 /* too many waits - block until dequeued */
1156 #if CONFIG_CORELOCK == SW_CORELOCK
1157 const unsigned int core = CURRENT_CORE;
1158 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
1159 cores[core].blk_ops.cl_p = &s->cl;
1160 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1161 const unsigned int core = CURRENT_CORE;
1162 cores[core].blk_ops.flags = TBOP_SET_VARi;
1163 cores[core].blk_ops.var_ip = &s->count;
1164 cores[core].blk_ops.var_iv = count;
1165 #endif
1166 block_thread_no_listlock(&s->queue);
1169 void semaphore_release(struct semaphore *s)
1171 #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1172 corelock_lock(&s->cl);
1173 if (s->count < s->max)
1175 if (++s->count <= 0)
1177 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1178 int count;
1179 while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
1180 if(count < s->max)
1182 if(++count <= 0)
1184 #endif /* CONFIG_CORELOCK */
1186 /* there should be threads in this queue */
1187 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup");
1188 /* a thread was queued - wake it up */
1189 wakeup_thread_no_listlock(&s->queue);
1193 #if CONFIG_CORELOCK == SW_CORELOCK
1194 corelock_unlock(&s->cl);
1195 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1196 s->count = count;
1197 #endif
1199 #endif /* HAVE_SEMAPHORE_OBJECTS */
1201 /****************************************************************************
1202 * Simple event functions ;)
1203 ****************************************************************************/
1204 #ifdef HAVE_EVENT_OBJECTS
1205 void event_init(struct event *e, unsigned int flags)
1207 e->queues[STATE_NONSIGNALED] = NULL;
1208 e->queues[STATE_SIGNALED] = NULL;
1209 e->state = flags & STATE_SIGNALED;
1210 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
1211 #if CONFIG_CORELOCK == SW_CORELOCK
1212 corelock_init(&e->cl);
1213 #endif
1216 void event_wait(struct event *e, unsigned int for_state)
1218 unsigned int last_state;
1219 #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1220 corelock_lock(&e->cl);
1221 last_state = e->state;
1222 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1223 while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
1224 #endif
1226 if(e->automatic != 0)
1228 /* wait for false always satisfied by definition
1229 or if it just changed to false */
1230 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
1232 /* automatic - unsignal */
1233 e->state = STATE_NONSIGNALED;
1234 #if CONFIG_CORELOCK == SW_CORELOCK
1235 corelock_unlock(&e->cl);
1236 #endif
1237 return;
1239 /* block until state matches */
1241 else if(for_state == last_state)
1243 /* the state being waited for is the current state */
1244 #if CONFIG_CORELOCK == SW_CORELOCK
1245 corelock_unlock(&e->cl);
1246 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1247 e->state = last_state;
1248 #endif
1249 return;
1253 /* current state does not match wait-for state */
1254 #if CONFIG_CORELOCK == SW_CORELOCK
1255 const unsigned int core = CURRENT_CORE;
1256 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
1257 cores[core].blk_ops.cl_p = &e->cl;
1258 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1259 const unsigned int core = CURRENT_CORE;
1260 cores[core].blk_ops.flags = TBOP_SET_VARu8;
1261 cores[core].blk_ops.var_u8p = &e->state;
1262 cores[core].blk_ops.var_u8v = last_state;
1263 #endif
1264 block_thread_no_listlock(&e->queues[for_state]);
1268 void event_set_state(struct event *e, unsigned int state)
1270 unsigned int last_state;
1271 #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1272 corelock_lock(&e->cl);
1273 last_state = e->state;
1274 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1275 while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
1276 #endif
1278 if(last_state == state)
1280 /* no change */
1281 #if CONFIG_CORELOCK == SW_CORELOCK
1282 corelock_unlock(&e->cl);
1283 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1284 e->state = last_state;
1285 #endif
1286 return;
1289 if(state == STATE_SIGNALED)
1291 if(e->automatic != 0)
1293 struct thread_entry *thread;
1294 /* no thread should have ever blocked for unsignaled */
1295 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL,
1296 "set_event_state->queue[NS]:S");
1297 /* pass to next thread and keep unsignaled - "pulse" */
1298 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
1299 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
1301 else
1303 /* release all threads waiting for signaled */
1304 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
1305 e->state = STATE_SIGNALED;
1308 else
1310 /* release all threads waiting for unsignaled */
1312 /* no thread should have ever blocked if automatic */
1313 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL ||
1314 e->automatic == 0, "set_event_state->queue[NS]:NS");
1316 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
1317 e->state = STATE_NONSIGNALED;
1320 #if CONFIG_CORELOCK == SW_CORELOCK
1321 corelock_unlock(&e->cl);
1322 #endif
1324 #endif /* HAVE_EVENT_OBJECTS */