"Mosaic" is in fact spelled "Mosaique". Just change the name for now.
[Rockbox.git] / firmware / thread.c
blob205375a44d3e9897295cf0425ba8c3a94f059cf5
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "system.h"
24 #include "kernel.h"
25 #include "cpu.h"
26 #include "string.h"
27 #ifdef RB_PROFILE
28 #include <profile.h>
29 #endif
31 #define DEADBEEF ((unsigned int)0xdeadbeef)
32 /* Cast to the the machine int type, whose size could be < 4. */
34 struct core_entry cores[NUM_CORES] IBSS_ATTR;
35 #ifdef HAVE_PRIORITY_SCHEDULING
36 static unsigned short highest_priority IBSS_ATTR;
37 #endif
39 /* Define to enable additional checks for blocking violations etc. */
40 // #define THREAD_EXTRA_CHECKS
42 static const char main_thread_name[] = "main";
44 extern int stackbegin[];
45 extern int stackend[];
47 #ifdef CPU_PP
48 #ifndef BOOTLOADER
49 extern int cop_stackbegin[];
50 extern int cop_stackend[];
51 #else
52 /* The coprocessor stack is not set up in the bootloader code, but the
53 threading is. No threads are run on the coprocessor, so set up some dummy
54 stack */
55 int *cop_stackbegin = stackbegin;
56 int *cop_stackend = stackend;
57 #endif
58 #endif
60 /* Conserve IRAM
61 static void add_to_list(struct thread_entry **list,
62 struct thread_entry *thread) ICODE_ATTR;
63 static void remove_from_list(struct thread_entry **list,
64 struct thread_entry *thread) ICODE_ATTR;
67 void switch_thread(bool save_context, struct thread_entry **blocked_list)
68 ICODE_ATTR;
70 static inline void store_context(void* addr) __attribute__ ((always_inline));
71 static inline void load_context(const void* addr) __attribute__ ((always_inline));
73 #if defined(CPU_ARM)
74 /*---------------------------------------------------------------------------
75 * Store non-volatile context.
76 *---------------------------------------------------------------------------
78 static inline void store_context(void* addr)
80 asm volatile(
81 "stmia %0, { r4-r11, sp, lr }\n"
82 : : "r" (addr)
86 /*---------------------------------------------------------------------------
87 * Load non-volatile context.
88 *---------------------------------------------------------------------------
90 static inline void load_context(const void* addr)
92 asm volatile(
93 "ldmia %0, { r4-r11, sp, lr }\n" /* load regs r4 to r14 from context */
94 "ldr r0, [%0, #40] \n" /* load start pointer */
95 "mov r1, #0 \n"
96 "cmp r0, r1 \n" /* check for NULL */
97 "strne r1, [%0, #40] \n" /* if it's NULL, we're already running */
98 "movne pc, r0 \n" /* not already running, so jump to start */
99 : : "r" (addr) : "r0", "r1"
103 #elif defined(CPU_COLDFIRE)
104 /*---------------------------------------------------------------------------
105 * Store non-volatile context.
106 *---------------------------------------------------------------------------
108 static inline void store_context(void* addr)
110 asm volatile (
111 "move.l %%macsr,%%d0 \n"
112 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
113 : : "a" (addr) : "d0" /* only! */
117 /*---------------------------------------------------------------------------
118 * Load non-volatile context.
119 *---------------------------------------------------------------------------
121 static inline void load_context(const void* addr)
123 asm volatile (
124 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
125 "move.l %%d0,%%macsr \n"
126 "move.l (52,%0),%%d0 \n" /* Get start address */
127 "beq.b .running \n" /* NULL -> already running */
128 "clr.l (52,%0) \n" /* Clear start address.. */
129 "move.l %%d0,%0 \n"
130 "jmp (%0) \n" /* ..and start the thread */
131 ".running: \n"
132 : : "a" (addr) : "d0" /* only! */
136 #elif CONFIG_CPU == SH7034
137 /*---------------------------------------------------------------------------
138 * Store non-volatile context.
139 *---------------------------------------------------------------------------
141 static inline void store_context(void* addr)
143 asm volatile (
144 "add #36,%0 \n"
145 "sts.l pr, @-%0 \n"
146 "mov.l r15,@-%0 \n"
147 "mov.l r14,@-%0 \n"
148 "mov.l r13,@-%0 \n"
149 "mov.l r12,@-%0 \n"
150 "mov.l r11,@-%0 \n"
151 "mov.l r10,@-%0 \n"
152 "mov.l r9, @-%0 \n"
153 "mov.l r8, @-%0 \n"
154 : : "r" (addr)
158 /*---------------------------------------------------------------------------
159 * Load non-volatile context.
160 *---------------------------------------------------------------------------
162 static inline void load_context(const void* addr)
164 asm volatile (
165 "mov.l @%0+,r8 \n"
166 "mov.l @%0+,r9 \n"
167 "mov.l @%0+,r10 \n"
168 "mov.l @%0+,r11 \n"
169 "mov.l @%0+,r12 \n"
170 "mov.l @%0+,r13 \n"
171 "mov.l @%0+,r14 \n"
172 "mov.l @%0+,r15 \n"
173 "lds.l @%0+,pr \n"
174 "mov.l @%0,r0 \n" /* Get start address */
175 "tst r0,r0 \n"
176 "bt .running \n" /* NULL -> already running */
177 "lds r0,pr \n"
178 "mov #0,r0 \n"
179 "rts \n" /* Start the thread */
180 "mov.l r0,@%0 \n" /* Clear start address */
181 ".running: \n"
182 : : "r" (addr) : "r0" /* only! */
186 #elif CONFIG_CPU == TCC730
187 /*---------------------------------------------------------------------------
188 * Store non-volatile context.
189 *---------------------------------------------------------------------------
191 #define store_context(addr) \
192 __asm__ volatile ( \
193 "push r0,r1\n\t" \
194 "push r2,r3\n\t" \
195 "push r4,r5\n\t" \
196 "push r6,r7\n\t" \
197 "push a8,a9\n\t" \
198 "push a10,a11\n\t" \
199 "push a12,a13\n\t" \
200 "push a14\n\t" \
201 "ldw @[%0+0], a15\n\t" : : "a" (addr) );
203 /*---------------------------------------------------------------------------
204 * Load non-volatile context.
205 *---------------------------------------------------------------------------
207 #define load_context(addr) \
209 if (!(addr)->started) { \
210 (addr)->started = 1; \
211 __asm__ volatile ( \
212 "ldw a15, @[%0+0]\n\t" \
213 "ldw a14, @[%0+4]\n\t" \
214 "jmp a14\n\t" : : "a" (addr) \
215 ); \
216 } else \
217 __asm__ volatile ( \
218 "ldw a15, @[%0+0]\n\t" \
219 "pop a14\n\t" \
220 "pop a13,a12\n\t" \
221 "pop a11,a10\n\t" \
222 "pop a9,a8\n\t" \
223 "pop r7,r6\n\t" \
224 "pop r5,r4\n\t" \
225 "pop r3,r2\n\t" \
226 "pop r1,r0\n\t" : : "a" (addr) \
227 ); \
231 #endif
233 static void add_to_list(struct thread_entry **list,
234 struct thread_entry *thread)
236 if (*list == NULL)
238 thread->next = thread;
239 thread->prev = thread;
240 *list = thread;
242 else
244 /* Insert last */
245 thread->next = *list;
246 thread->prev = (*list)->prev;
247 thread->prev->next = thread;
248 (*list)->prev = thread;
250 /* Insert next
251 thread->next = (*list)->next;
252 thread->prev = *list;
253 thread->next->prev = thread;
254 (*list)->next = thread;
259 static void remove_from_list(struct thread_entry **list,
260 struct thread_entry *thread)
262 if (list != NULL)
264 if (thread == thread->next)
266 *list = NULL;
267 return;
270 if (thread == *list)
271 *list = thread->next;
274 /* Fix links to jump over the removed entry. */
275 thread->prev->next = thread->next;
276 thread->next->prev = thread->prev;
279 /* Compiler trick: Don't declare as static to prevent putting
280 * function in IRAM. */
281 void check_sleepers(void)
283 struct thread_entry *current, *next;
285 /* Check sleeping threads. */
286 current = cores[CURRENT_CORE].sleeping;
287 if (current == NULL)
288 return ;
290 for (;;)
292 next = current->next;
294 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
296 /* Sleep timeout has been reached so bring the thread
297 * back to life again. */
298 remove_from_list(&cores[CURRENT_CORE].sleeping, current);
299 add_to_list(&cores[CURRENT_CORE].running, current);
301 /* If there is no more processes in the list, break the loop. */
302 if (cores[CURRENT_CORE].sleeping == NULL)
303 break;
305 current = next;
306 continue;
309 current = next;
311 /* Break the loop once we have walked through the list of all
312 * sleeping processes. */
313 if (current == cores[CURRENT_CORE].sleeping)
314 break;
318 static inline void sleep_core(void)
320 static long last_tick = 0;
322 for (;;)
324 if (last_tick != current_tick)
326 check_sleepers();
327 last_tick = current_tick;
330 /* We must sleep until there is at least one process in the list
331 * of running processes. */
332 if (cores[CURRENT_CORE].running != NULL)
333 break;
335 /* Enter sleep mode to reduce power usage, woken up on interrupt */
336 #ifdef CPU_COLDFIRE
337 asm volatile ("stop #0x2000");
338 #elif CONFIG_CPU == SH7034
339 and_b(0x7F, &SBYCR);
340 asm volatile ("sleep");
341 #elif CONFIG_CPU == PP5020
342 /* This should sleep the CPU. It appears to wake by itself on
343 interrupts */
344 CPU_CTL = 0x80000000;
345 #elif CONFIG_CPU == TCC730
346 /* Sleep mode is triggered by the SYS instr on CalmRisc16.
347 * Unfortunately, the manual doesn't specify which arg to use.
348 __asm__ volatile ("sys #0x0f");
349 0x1f seems to trigger a reset;
350 0x0f is the only one other argument used by Archos.
352 #elif CONFIG_CPU == S3C2440
353 CLKCON |= 2;
354 #endif
358 #ifdef RB_PROFILE
359 static int get_threadnum(struct thread_entry *thread)
361 int i;
363 for (i = 0; i < MAXTHREADS; i++)
365 if (&cores[CURRENT_CORE].threads[i] == thread)
366 return i;
369 return -1;
372 void profile_thread(void) {
373 profstart(get_threadnum(cores[CURRENT_CORE].running));
375 #endif
377 /* Compiler trick: Don't declare as static to prevent putting
378 * function in IRAM. */
379 void change_thread_state(struct thread_entry **blocked_list)
381 struct thread_entry *old;
383 /* Remove the thread from the list of running threads. */
384 old = cores[CURRENT_CORE].running;
385 remove_from_list(&cores[CURRENT_CORE].running, old);
387 /* And put the thread into a new list of inactive threads. */
388 if (GET_STATE(old->statearg) == STATE_BLOCKED)
389 add_to_list(blocked_list, old);
390 else
391 add_to_list(&cores[CURRENT_CORE].sleeping, old);
393 #ifdef HAVE_PRIORITY_SCHEDULING
394 /* Reset priorities */
395 if (old->priority == highest_priority)
396 highest_priority = 100;
397 #endif
400 /*---------------------------------------------------------------------------
401 * Switch thread in round robin fashion.
402 *---------------------------------------------------------------------------
404 void switch_thread(bool save_context, struct thread_entry **blocked_list)
406 #ifdef RB_PROFILE
407 profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running));
408 #endif
409 unsigned int *stackptr;
411 #ifdef SIMULATOR
412 /* Do nothing */
413 #else
415 /* Begin task switching by saving our current context so that we can
416 * restore the state of the current thread later to the point prior
417 * to this call. */
418 if (save_context)
420 store_context(&cores[CURRENT_CORE].running->context);
422 # if CONFIG_CPU != TCC730
423 /* Check if the current thread stack is overflown */
424 stackptr = cores[CURRENT_CORE].running->stack;
425 if(stackptr[0] != DEADBEEF)
426 panicf("Stkov %s", cores[CURRENT_CORE].running->name);
427 # endif
429 /* Check if a thread state change has been requested. */
430 if (cores[CURRENT_CORE].running->statearg)
432 /* Change running thread state and switch to next thread. */
433 change_thread_state(blocked_list);
435 else
437 /* Switch to the next running thread. */
438 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
442 /* Go through the list of sleeping task to check if we need to wake up
443 * any of them due to timeout. Also puts core into sleep state until
444 * there is at least one running process again. */
445 sleep_core();
447 #ifdef HAVE_PRIORITY_SCHEDULING
448 /* Select the new task based on priorities and the last time a process
449 * got CPU time. */
450 for (;;)
452 int priority = cores[CURRENT_CORE].running->priority;
454 if (priority < highest_priority)
455 highest_priority = priority;
457 if (priority == highest_priority || (current_tick
458 - cores[CURRENT_CORE].running->last_run > priority * 8))
460 break;
462 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
465 /* Reset the value of thread's last running time to the current time. */
466 cores[CURRENT_CORE].running->last_run = current_tick;
467 #endif
469 #endif
470 /* And finally give control to the next thread. */
471 load_context(&cores[CURRENT_CORE].running->context);
473 #ifdef RB_PROFILE
474 profile_thread_started(get_threadnum(cores[CURRENT_CORE].running));
475 #endif
478 void sleep_thread(int ticks)
480 /* Set the thread's new state and timeout and finally force a task switch
481 * so that scheduler removes thread from the list of running processes
482 * and puts it in list of sleeping tasks. */
483 cores[CURRENT_CORE].running->statearg =
484 SET_STATE(STATE_SLEEPING, current_tick + ticks + 1);
485 switch_thread(true, NULL);
487 /* Clear all flags to indicate we are up and running again. */
488 cores[CURRENT_CORE].running->statearg = 0;
491 void block_thread(struct thread_entry **list, int timeout)
493 struct thread_entry *current;
495 /* Get the entry for the current running thread. */
496 current = cores[CURRENT_CORE].running;
498 /* At next task switch scheduler will immediately change the thread
499 * state (and we also force the task switch to happen). */
500 if (timeout)
502 #ifdef THREAD_EXTRA_CHECKS
503 /* We can store only one thread to the "list" if thread is used
504 * in other list (such as core's list for sleeping tasks). */
505 if (*list)
506 panicf("Blocking violation T->*B");
507 #endif
509 current->statearg =
510 SET_STATE(STATE_BLOCKED_W_TMO, current_tick + timeout);
511 *list = current;
513 /* Now force a task switch and block until we have been woken up
514 * by another thread or timeout is reached. */
515 switch_thread(true, NULL);
517 /* If timeout is reached, we must set list back to NULL here. */
518 *list = NULL;
520 else
522 #ifdef THREAD_EXTRA_CHECKS
523 /* We are not allowed to mix blocking types in one queue. */
524 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
525 panicf("Blocking violation B->*T");
526 #endif
528 current->statearg = SET_STATE(STATE_BLOCKED, 0);
530 /* Now force a task switch and block until we have been woken up
531 * by another thread or timeout is reached. */
532 switch_thread(true, list);
535 /* Clear all flags to indicate we are up and running again. */
536 current->statearg = 0;
539 void wakeup_thread(struct thread_entry **list)
541 struct thread_entry *thread;
543 /* Check if there is a blocked thread at all. */
544 if (*list == NULL)
545 return ;
547 /* Wake up the last thread first. */
548 thread = *list;
550 /* Determine thread's current state. */
551 switch (GET_STATE(thread->statearg))
553 case STATE_BLOCKED:
554 /* Remove thread from the list of blocked threads and add it
555 * to the scheduler's list of running processes. */
556 remove_from_list(list, thread);
557 add_to_list(&cores[CURRENT_CORE].running, thread);
558 thread->statearg = 0;
559 break;
561 case STATE_BLOCKED_W_TMO:
562 /* Just remove the timeout to cause scheduler to immediately
563 * wake up the thread. */
564 thread->statearg &= 0xC0000000;
565 *list = NULL;
566 break;
568 default:
569 /* Nothing to do. Thread has already been woken up
570 * or it's state is not blocked or blocked with timeout. */
571 return ;
575 /*---------------------------------------------------------------------------
576 * Create thread on the current core.
577 * Return ID if context area could be allocated, else -1.
578 *---------------------------------------------------------------------------
580 struct thread_entry*
581 create_thread(void (*function)(void), void* stack, int stack_size,
582 const char *name IF_PRIO(, int priority))
584 return create_thread_on_core(CURRENT_CORE, function, stack, stack_size,
585 name IF_PRIO(, priority));
588 /*---------------------------------------------------------------------------
589 * Create thread on a specific core.
590 * Return ID if context area could be allocated, else -1.
591 *---------------------------------------------------------------------------
593 struct thread_entry*
594 create_thread_on_core(unsigned int core, void (*function)(void),
595 void* stack, int stack_size,
596 const char *name IF_PRIO(, int priority))
598 unsigned int i;
599 unsigned int stacklen;
600 unsigned int *stackptr;
601 int n;
602 struct regs *regs;
603 struct thread_entry *thread;
605 for (n = 0; n < MAXTHREADS; n++)
607 if (cores[core].threads[n].name == NULL)
608 break;
611 if (n == MAXTHREADS)
612 return NULL;
615 /* Munge the stack to make it easy to spot stack overflows */
616 stacklen = stack_size / sizeof(int);
617 stackptr = stack;
618 for(i = 0;i < stacklen;i++)
620 stackptr[i] = DEADBEEF;
623 /* Store interesting information */
624 thread = &cores[core].threads[n];
625 thread->name = name;
626 thread->stack = stack;
627 thread->stack_size = stack_size;
628 thread->statearg = 0;
629 #ifdef HAVE_PRIORITY_SCHEDULING
630 thread->priority = priority;
631 highest_priority = 100;
632 #endif
633 add_to_list(&cores[core].running, thread);
635 regs = &thread->context;
636 #if defined(CPU_COLDFIRE) || (CONFIG_CPU == SH7034) || defined(CPU_ARM)
637 /* Align stack to an even 32 bit boundary */
638 regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
639 #elif CONFIG_CPU == TCC730
640 /* Align stack on word boundary */
641 regs->sp = (void*)(((unsigned long)stack + stack_size - 2) & ~1);
642 regs->started = 0;
643 #endif
644 regs->start = (void*)function;
646 return thread;
649 /*---------------------------------------------------------------------------
650 * Remove a thread on the current core from the scheduler.
651 * Parameter is the ID as returned from create_thread().
652 *---------------------------------------------------------------------------
654 void remove_thread(struct thread_entry *thread)
656 if (thread == NULL)
657 thread = cores[CURRENT_CORE].running;
659 /* Free the entry by removing thread name. */
660 thread->name = NULL;
661 #ifdef HAVE_PRIORITY_SCHEDULING
662 highest_priority = 100;
663 #endif
665 if (thread == cores[CURRENT_CORE].running)
667 remove_from_list(&cores[CURRENT_CORE].running, thread);
668 switch_thread(false, NULL);
669 return ;
672 if (thread == cores[CURRENT_CORE].sleeping)
673 remove_from_list(&cores[CURRENT_CORE].sleeping, thread);
674 else
675 remove_from_list(NULL, thread);
678 #ifdef HAVE_PRIORITY_SCHEDULING
679 void thread_set_priority(struct thread_entry *thread, int priority)
681 if (thread == NULL)
682 thread = cores[CURRENT_CORE].running;
684 thread->priority = priority;
685 highest_priority = 100;
687 #endif
689 void init_threads(void)
691 unsigned int core = CURRENT_CORE;
693 memset(cores, 0, sizeof cores);
694 cores[core].sleeping = NULL;
695 cores[core].running = NULL;
696 cores[core].threads[0].name = main_thread_name;
697 cores[core].threads[0].statearg = 0;
698 #ifdef HAVE_PRIORITY_SCHEDULING
699 cores[core].threads[0].priority = PRIORITY_USER_INTERFACE;
700 highest_priority = 100;
701 #endif
702 add_to_list(&cores[core].running, &cores[core].threads[0]);
704 /* In multiple core setups, each core has a different stack. There is probably
705 a much better way to do this. */
706 if (core == CPU)
708 cores[CPU].threads[0].stack = stackbegin;
709 cores[CPU].threads[0].stack_size = (int)stackend - (int)stackbegin;
710 } else {
711 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
712 cores[COP].threads[0].stack = cop_stackbegin;
713 cores[COP].threads[0].stack_size = (int)cop_stackend - (int)cop_stackbegin;
714 #endif
716 #if CONFIG_CPU == TCC730
717 cores[core].threads[0].context.started = 1;
718 #else
719 cores[core].threads[0].context.start = 0; /* thread 0 already running */
720 #endif
723 int thread_stack_usage(const struct thread_entry *thread)
725 unsigned int i;
726 unsigned int *stackptr = thread->stack;
728 for (i = 0;i < thread->stack_size/sizeof(int);i++)
730 if (stackptr[i] != DEADBEEF)
731 break;
734 return ((thread->stack_size - i * sizeof(int)) * 100) /
735 thread->stack_size;
738 int thread_get_status(const struct thread_entry *thread)
740 return GET_STATE(thread->statearg);