Integer setting changes to make all those settings consistant on the target.
[Rockbox.git] / firmware / kernel.c
blob1b6e9f933bec3185216193adbf46db1be8ef793d
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include <stdlib.h>
20 #include <string.h>
21 #include "config.h"
22 #include "kernel.h"
23 #include "thread.h"
24 #include "cpu.h"
25 #include "system.h"
26 #include "panic.h"
27 #if CONFIG_CPU == IMX31L
28 #include "avic-imx31.h"
29 #endif
31 #if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER)
32 volatile long current_tick NOCACHEDATA_ATTR = 0;
33 #endif
35 void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
37 /* This array holds all queues that are initiated. It is used for broadcast. */
38 static struct event_queue *all_queues[32] NOCACHEBSS_ATTR;
39 static int num_queues NOCACHEBSS_ATTR;
41 /****************************************************************************
42 * Standard kernel stuff
43 ****************************************************************************/
44 void kernel_init(void)
46 /* Init the threading API */
47 init_threads();
49 /* Other processors will not reach this point in a multicore build.
50 * In a single-core build with multiple cores they fall-through and
51 * sleep in cop_main without returning. */
52 if (CURRENT_CORE == CPU)
54 memset(tick_funcs, 0, sizeof(tick_funcs));
55 num_queues = 0;
56 memset(all_queues, 0, sizeof(all_queues));
57 tick_start(1000/HZ);
61 void sleep(int ticks)
63 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
64 volatile int counter;
65 TCON &= ~(1 << 20); // stop timer 4
66 // TODO: this constant depends on dividers settings inherited from
67 // firmware. Set them explicitly somwhere.
68 TCNTB4 = 12193 * ticks / HZ;
69 TCON |= 1 << 21; // set manual bit
70 TCON &= ~(1 << 21); // reset manual bit
71 TCON &= ~(1 << 22); //autoreload Off
72 TCON |= (1 << 20); // start timer 4
73 do {
74 counter = TCNTO4;
75 } while(counter > 0);
77 #elif defined(CPU_PP) && defined(BOOTLOADER)
78 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
79 while (TIME_BEFORE(USEC_TIMER, stop))
80 switch_thread(true,NULL);
81 #else
82 sleep_thread(ticks);
83 #endif
86 void yield(void)
88 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER))
89 /* Some targets don't like yielding in the bootloader */
90 #else
91 switch_thread(true, NULL);
92 #endif
95 /****************************************************************************
96 * Queue handling stuff
97 ****************************************************************************/
99 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
100 /* Moves waiting thread's descriptor to the current sender when a
101 message is dequeued */
102 static void queue_fetch_sender(struct queue_sender_list *send,
103 unsigned int i)
105 struct thread_entry **spp = &send->senders[i];
107 if (*spp)
109 send->curr_sender = *spp;
110 *spp = NULL;
114 /* Puts the specified return value in the waiting thread's return value
115 * and wakes the thread.
116 * 1) A sender should be confirmed to exist before calling which makes it
117 * more efficent to reject the majority of cases that don't need this
118 called.
119 * 2) Requires interrupts disabled since queue overflows can cause posts
120 * from interrupt handlers to wake threads. Not doing so could cause
121 * an attempt at multiple wakes or other problems.
123 static void queue_release_sender(struct thread_entry **sender,
124 intptr_t retval)
126 (*sender)->retval = retval;
127 wakeup_thread_irq_safe(sender);
128 #if 0
129 /* This should _never_ happen - there must never be multiple
130 threads in this list and it is a corrupt state */
131 if (*sender != NULL)
132 panicf("Queue: send slot ovf");
133 #endif
136 /* Releases any waiting threads that are queued with queue_send -
137 * reply with 0.
138 * Disable IRQs before calling since it uses queue_release_sender.
140 static void queue_release_all_senders(struct event_queue *q)
142 if(q->send)
144 unsigned int i;
145 for(i = q->read; i != q->write; i++)
147 struct thread_entry **spp =
148 &q->send->senders[i & QUEUE_LENGTH_MASK];
150 if(*spp)
152 queue_release_sender(spp, 0);
158 /* Enables queue_send on the specified queue - caller allocates the extra
159 data structure */
160 void queue_enable_queue_send(struct event_queue *q,
161 struct queue_sender_list *send)
163 q->send = send;
164 memset(send, 0, sizeof(struct queue_sender_list));
166 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
169 void queue_init(struct event_queue *q, bool register_queue)
171 q->read = 0;
172 q->write = 0;
173 q->thread = NULL;
174 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
175 q->send = NULL; /* No message sending by default */
176 #endif
178 if(register_queue)
180 /* Add it to the all_queues array */
181 all_queues[num_queues++] = q;
185 void queue_delete(struct event_queue *q)
187 int i;
188 bool found = false;
190 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
192 /* Release theads waiting on queue */
193 wakeup_thread(&q->thread);
195 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
196 /* Release waiting threads and reply to any dequeued message
197 waiting for one. */
198 queue_release_all_senders(q);
199 queue_reply(q, 0);
200 #endif
202 /* Find the queue to be deleted */
203 for(i = 0;i < num_queues;i++)
205 if(all_queues[i] == q)
207 found = true;
208 break;
212 if(found)
214 /* Move the following queues up in the list */
215 for(;i < num_queues-1;i++)
217 all_queues[i] = all_queues[i+1];
220 num_queues--;
223 set_irq_level(oldlevel);
226 void queue_wait(struct event_queue *q, struct event *ev)
228 int oldlevel;
229 unsigned int rd;
231 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
233 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
234 if(q->send && q->send->curr_sender)
236 /* auto-reply */
237 queue_release_sender(&q->send->curr_sender, 0);
239 #endif
241 if (q->read == q->write)
243 set_irq_level_and_block_thread(&q->thread, oldlevel);
244 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
247 rd = q->read++ & QUEUE_LENGTH_MASK;
248 *ev = q->events[rd];
250 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
251 if(q->send && q->send->senders[rd])
253 /* Get data for a waiting thread if one */
254 queue_fetch_sender(q->send, rd);
256 #endif
258 set_irq_level(oldlevel);
261 void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
263 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
265 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
266 if (q->send && q->send->curr_sender)
268 /* auto-reply */
269 queue_release_sender(&q->send->curr_sender, 0);
271 #endif
273 if (q->read == q->write && ticks > 0)
275 set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel);
276 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
279 if (q->read != q->write)
281 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
282 *ev = q->events[rd];
284 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
285 if(q->send && q->send->senders[rd])
287 /* Get data for a waiting thread if one */
288 queue_fetch_sender(q->send, rd);
290 #endif
292 else
294 ev->id = SYS_TIMEOUT;
297 set_irq_level(oldlevel);
300 void queue_post(struct event_queue *q, long id, intptr_t data)
302 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
303 unsigned int wr;
305 wr = q->write++ & QUEUE_LENGTH_MASK;
307 q->events[wr].id = id;
308 q->events[wr].data = data;
310 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
311 if(q->send)
313 struct thread_entry **spp = &q->send->senders[wr];
315 if (*spp)
317 /* overflow protect - unblock any thread waiting at this index */
318 queue_release_sender(spp, 0);
321 #endif
323 wakeup_thread_irq_safe(&q->thread);
324 set_irq_level(oldlevel);
328 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
329 /* No wakeup_thread_irq_safe here because IRQ handlers are not allowed
330 use of this function - we only aim to protect the queue integrity by
331 turning them off. */
332 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
334 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
335 unsigned int wr;
337 wr = q->write++ & QUEUE_LENGTH_MASK;
339 q->events[wr].id = id;
340 q->events[wr].data = data;
342 if(q->send)
344 struct thread_entry **spp = &q->send->senders[wr];
346 if (*spp)
348 /* overflow protect - unblock any thread waiting at this index */
349 queue_release_sender(spp, 0);
352 wakeup_thread(&q->thread);
353 set_irq_level_and_block_thread(spp, oldlevel);
354 return thread_get_current()->retval;
357 /* Function as queue_post if sending is not enabled */
358 wakeup_thread(&q->thread);
359 set_irq_level(oldlevel);
361 return 0;
364 #if 0 /* not used now but probably will be later */
365 /* Query if the last message dequeued was added by queue_send or not */
366 bool queue_in_queue_send(struct event_queue *q)
368 return q->send && q->send->curr_sender;
370 #endif
372 /* Replies with retval to any dequeued message sent with queue_send */
373 void queue_reply(struct event_queue *q, intptr_t retval)
375 /* No IRQ lock here since IRQs cannot change this */
376 if(q->send && q->send->curr_sender)
378 queue_release_sender(&q->send->curr_sender, retval);
381 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
383 bool queue_empty(const struct event_queue* q)
385 return ( q->read == q->write );
388 void queue_clear(struct event_queue* q)
390 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
392 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
393 /* Release all thread waiting in the queue for a reply -
394 dequeued sent message will be handled by owning thread */
395 queue_release_all_senders(q);
396 #endif
398 q->read = 0;
399 q->write = 0;
401 set_irq_level(oldlevel);
404 void queue_remove_from_head(struct event_queue *q, long id)
406 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
408 while(q->read != q->write)
410 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
412 if(q->events[rd].id != id)
414 break;
417 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
418 if(q->send)
420 struct thread_entry **spp = &q->send->senders[rd];
422 if (*spp)
424 /* Release any thread waiting on this message */
425 queue_release_sender(spp, 0);
428 #endif
429 q->read++;
432 set_irq_level(oldlevel);
436 * The number of events waiting in the queue.
438 * @param struct of event_queue
439 * @return number of events in the queue
441 int queue_count(const struct event_queue *q)
443 return q->write - q->read;
446 int queue_broadcast(long id, intptr_t data)
448 int i;
450 for(i = 0;i < num_queues;i++)
452 queue_post(all_queues[i], id, data);
455 return num_queues;
458 /****************************************************************************
459 * Timer tick
460 ****************************************************************************/
461 #if CONFIG_CPU == SH7034
462 void tick_start(unsigned int interval_in_ms)
464 unsigned long count;
466 count = CPU_FREQ * interval_in_ms / 1000 / 8;
468 if(count > 0x10000)
470 panicf("Error! The tick interval is too long (%d ms)\n",
471 interval_in_ms);
472 return;
475 /* We are using timer 0 */
477 TSTR &= ~0x01; /* Stop the timer */
478 TSNC &= ~0x01; /* No synchronization */
479 TMDR &= ~0x01; /* Operate normally */
481 TCNT0 = 0; /* Start counting at 0 */
482 GRA0 = (unsigned short)(count - 1);
483 TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
485 /* Enable interrupt on level 1 */
486 IPRC = (IPRC & ~0x00f0) | 0x0010;
488 TSR0 &= ~0x01;
489 TIER0 = 0xf9; /* Enable GRA match interrupt */
491 TSTR |= 0x01; /* Start timer 1 */
494 void IMIA0(void) __attribute__ ((interrupt_handler));
495 void IMIA0(void)
497 int i;
499 /* Run through the list of tick tasks */
500 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
502 if(tick_funcs[i])
504 tick_funcs[i]();
508 current_tick++;
510 TSR0 &= ~0x01;
512 #elif defined(CPU_COLDFIRE)
513 void tick_start(unsigned int interval_in_ms)
515 unsigned long count;
516 int prescale;
518 count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
520 if(count > 0x10000)
522 panicf("Error! The tick interval is too long (%d ms)\n",
523 interval_in_ms);
524 return;
527 prescale = cpu_frequency / CPU_FREQ;
528 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
529 changes within timer.c */
531 /* We are using timer 0 */
533 TRR0 = (unsigned short)(count - 1); /* The reference count */
534 TCN0 = 0; /* reset the timer */
535 TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
536 /* restart, CLK/16, enabled, prescaler */
538 TER0 = 0xff; /* Clear all events */
540 ICR1 = 0x8c; /* Interrupt on level 3.0 */
541 IMR &= ~0x200;
544 void TIMER0(void) __attribute__ ((interrupt_handler));
545 void TIMER0(void)
547 int i;
549 /* Run through the list of tick tasks */
550 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
552 if(tick_funcs[i])
554 tick_funcs[i]();
558 current_tick++;
560 TER0 = 0xff; /* Clear all events */
563 #elif defined(CPU_PP)
565 #ifndef BOOTLOADER
566 void TIMER1(void)
568 int i;
570 TIMER1_VAL; /* Read value to ack IRQ */
572 /* Run through the list of tick tasks using main CPU core -
573 wake up the COP through its control interface to provide pulse */
574 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
576 if (tick_funcs[i])
578 tick_funcs[i]();
582 #if NUM_CORES > 1
583 #ifdef CPU_PP502x
585 /* If COP is sleeping - give it a kick */
586 /* TODO: Use a mailbox in addition to make sure it doesn't go to
587 * sleep if kicked just as it's headed to rest to make sure its
588 * tick checks won't be jittery. Don't bother at all if it owns no
589 * threads. */
590 unsigned int cop_ctl;
592 cop_ctl = COP_CTL;
593 if (cop_ctl & PROC_SLEEP)
595 COP_CTL = cop_ctl & ~PROC_SLEEP;
598 #else
599 /* TODO: PP5002 */
600 #endif
601 #endif /* NUM_CORES */
603 current_tick++;
605 #endif
607 /* Must be last function called init kernel/thread initialization */
608 void tick_start(unsigned int interval_in_ms)
610 #ifndef BOOTLOADER
611 TIMER1_CFG = 0x0;
612 TIMER1_VAL;
613 /* enable timer */
614 TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
615 /* unmask interrupt source */
616 CPU_INT_EN = TIMER1_MASK;
617 #else
618 /* We don't enable interrupts in the bootloader */
619 (void)interval_in_ms;
620 #endif
623 #elif CONFIG_CPU == PNX0101
625 void timer_handler(void)
627 int i;
629 /* Run through the list of tick tasks */
630 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
632 if(tick_funcs[i])
633 tick_funcs[i]();
636 current_tick++;
638 TIMER0.clr = 0;
641 void tick_start(unsigned int interval_in_ms)
643 TIMER0.ctrl &= ~0x80; /* Disable the counter */
644 TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
645 TIMER0.load = 3000000 * interval_in_ms / 1000;
646 TIMER0.ctrl &= ~0xc; /* No prescaler */
647 TIMER0.clr = 1; /* Clear the interrupt request */
649 irq_set_int_handler(IRQ_TIMER0, timer_handler);
650 irq_enable_int(IRQ_TIMER0);
652 TIMER0.ctrl |= 0x80; /* Enable the counter */
654 #elif CONFIG_CPU == IMX31L
655 void tick_start(unsigned int interval_in_ms)
657 EPITCR1 &= ~0x1; /* Disable the counter */
659 EPITCR1 &= ~0xE; /* Disable interrupt, count down from 0xFFFFFFFF */
660 EPITCR1 &= ~0xFFF0; /* Clear prescaler */
661 #ifdef BOOTLOADER
662 EPITCR1 |= (2700 << 2); /* Prescaler = 2700 */
663 #endif
664 EPITCR1 &= ~(0x3 << 24);
665 EPITCR1 |= (0x2 << 24); /* Set clock source to external clock (27mhz) */
666 EPITSR1 = 1; /* Clear the interrupt request */
667 #ifndef BOOTLOADER
668 EPITLR1 = 27000000 * interval_in_ms / 1000;
669 EPITCMPR1 = 27000000 * interval_in_ms / 1000;
670 #else
671 (void)interval_in_ms;
672 #endif
674 //avic_enable_int(EPIT1, IRQ, EPIT_HANDLER);
676 EPITCR1 |= 0x1; /* Enable the counter */
679 #ifndef BOOTLOADER
680 void EPIT_HANDLER(void) __attribute__((interrupt("IRQ")));
681 void EPIT_HANDLER(void) {
682 int i;
684 /* Run through the list of tick tasks */
685 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
687 if(tick_funcs[i])
688 tick_funcs[i]();
691 current_tick++;
693 EPITSR1 = 1; /* Clear the interrupt request */
695 #endif
696 #endif
698 int tick_add_task(void (*f)(void))
700 int i;
701 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
703 /* Add a task if there is room */
704 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
706 if(tick_funcs[i] == NULL)
708 tick_funcs[i] = f;
709 set_irq_level(oldlevel);
710 return 0;
713 set_irq_level(oldlevel);
714 panicf("Error! tick_add_task(): out of tasks");
715 return -1;
718 int tick_remove_task(void (*f)(void))
720 int i;
721 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
723 /* Remove a task if it is there */
724 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
726 if(tick_funcs[i] == f)
728 tick_funcs[i] = NULL;
729 set_irq_level(oldlevel);
730 return 0;
734 set_irq_level(oldlevel);
735 return -1;
738 /****************************************************************************
739 * Tick-based interval timers/one-shots - be mindful this is not really
740 * intended for continuous timers but for events that need to run for a short
741 * time and be cancelled without further software intervention.
742 ****************************************************************************/
743 #ifdef INCLUDE_TIMEOUT_API
744 static struct timeout *tmo_list = NULL; /* list of active timeout events */
746 /* timeout tick task - calls event handlers when they expire
747 * Event handlers may alter ticks, callback and data during operation.
749 static void timeout_tick(void)
751 unsigned long tick = current_tick;
752 struct timeout *curr, *next;
754 for (curr = tmo_list; curr != NULL; curr = next)
756 next = (struct timeout *)curr->next;
758 if (TIME_BEFORE(tick, curr->expires))
759 continue;
761 /* this event has expired - call callback */
762 if (curr->callback(curr))
763 *(long *)&curr->expires = tick + curr->ticks; /* reload */
764 else
765 timeout_cancel(curr); /* cancel */
769 /* Cancels a timeout callback - can be called from the ISR */
770 void timeout_cancel(struct timeout *tmo)
772 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
774 if (tmo_list != NULL)
776 struct timeout *curr = tmo_list;
777 struct timeout *prev = NULL;
779 while (curr != tmo && curr != NULL)
781 prev = curr;
782 curr = (struct timeout *)curr->next;
785 if (curr != NULL)
787 /* in list */
788 if (prev == NULL)
789 tmo_list = (struct timeout *)curr->next;
790 else
791 *(const struct timeout **)&prev->next = curr->next;
793 if (tmo_list == NULL)
794 tick_remove_task(timeout_tick); /* last one - remove task */
796 /* not in list or tmo == NULL */
799 set_irq_level(oldlevel);
802 /* Adds a timeout callback - calling with an active timeout resets the
803 interval - can be called from the ISR */
804 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
805 int ticks, intptr_t data)
807 int oldlevel;
808 struct timeout *curr;
810 if (tmo == NULL)
811 return;
813 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
815 /* see if this one is already registered */
816 curr = tmo_list;
817 while (curr != tmo && curr != NULL)
818 curr = (struct timeout *)curr->next;
820 if (curr == NULL)
822 /* not found - add it */
823 if (tmo_list == NULL)
824 tick_add_task(timeout_tick); /* first one - add task */
826 *(struct timeout **)&tmo->next = tmo_list;
827 tmo_list = tmo;
830 tmo->callback = callback;
831 tmo->ticks = ticks;
832 tmo->data = data;
833 *(long *)&tmo->expires = current_tick + ticks;
835 set_irq_level(oldlevel);
838 #endif /* INCLUDE_TIMEOUT_API */
840 #ifndef SIMULATOR
842 * Simulator versions in uisimulator/SIMVER/
845 /****************************************************************************
846 * Simple mutex functions
847 ****************************************************************************/
848 void mutex_init(struct mutex *m)
850 m->locked = false;
851 m->thread = NULL;
854 void mutex_lock(struct mutex *m)
856 if (test_and_set(&m->locked, 1))
858 /* Wait until the lock is open... */
859 block_thread(&m->thread);
863 void mutex_unlock(struct mutex *m)
865 if (m->thread == NULL)
866 m->locked = 0;
867 else
868 wakeup_thread(&m->thread);
871 void spinlock_lock(struct mutex *m)
873 while (test_and_set(&m->locked, 1))
875 /* wait until the lock is open... */
876 switch_thread(true, NULL);
880 void spinlock_unlock(struct mutex *m)
882 m->locked = 0;
885 #endif /* ndef SIMULATOR */