Fix remote screen check in graphic equalizer, so that it can be used on logf-enabled...
[Rockbox.git] / firmware / thread.c
blob6a94a52333c6ea2fceb179ff75c63fbc26d0ca64
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "system.h"
24 #include "kernel.h"
25 #include "cpu.h"
26 #include "string.h"
27 #ifdef RB_PROFILE
28 #include <profile.h>
29 #endif
31 #define DEADBEEF ((unsigned int)0xdeadbeef)
32 /* Cast to the the machine int type, whose size could be < 4. */
34 struct core_entry cores[NUM_CORES] IBSS_ATTR;
35 #ifdef HAVE_PRIORITY_SCHEDULING
36 static unsigned short highest_priority IBSS_ATTR;
37 #endif
38 #ifdef HAVE_SCHEDULER_BOOSTCTRL
39 static bool cpu_boosted IBSS_ATTR;
40 #endif
42 /* Define to enable additional checks for blocking violations etc. */
43 // #define THREAD_EXTRA_CHECKS
45 static const char main_thread_name[] = "main";
47 extern int stackbegin[];
48 extern int stackend[];
50 #ifdef CPU_PP
51 #ifndef BOOTLOADER
52 extern int cop_stackbegin[];
53 extern int cop_stackend[];
54 #else
55 /* The coprocessor stack is not set up in the bootloader code, but the
56 threading is. No threads are run on the coprocessor, so set up some dummy
57 stack */
58 int *cop_stackbegin = stackbegin;
59 int *cop_stackend = stackend;
60 #endif
61 #endif
63 /* Conserve IRAM
64 static void add_to_list(struct thread_entry **list,
65 struct thread_entry *thread) ICODE_ATTR;
66 static void remove_from_list(struct thread_entry **list,
67 struct thread_entry *thread) ICODE_ATTR;
70 void switch_thread(bool save_context, struct thread_entry **blocked_list)
71 ICODE_ATTR;
73 static inline void store_context(void* addr) __attribute__ ((always_inline));
74 static inline void load_context(const void* addr) __attribute__ ((always_inline));
76 #if defined(CPU_ARM)
77 /*---------------------------------------------------------------------------
78 * Store non-volatile context.
79 *---------------------------------------------------------------------------
81 static inline void store_context(void* addr)
83 asm volatile(
84 "stmia %0, { r4-r11, sp, lr }\n"
85 : : "r" (addr)
89 /*---------------------------------------------------------------------------
90 * Load non-volatile context.
91 *---------------------------------------------------------------------------
93 static inline void load_context(const void* addr)
95 asm volatile(
96 "ldmia %0, { r4-r11, sp, lr }\n" /* load regs r4 to r14 from context */
97 "ldr r0, [%0, #40] \n" /* load start pointer */
98 "mov r1, #0 \n"
99 "cmp r0, r1 \n" /* check for NULL */
100 "strne r1, [%0, #40] \n" /* if it's NULL, we're already running */
101 "movne pc, r0 \n" /* not already running, so jump to start */
102 : : "r" (addr) : "r0", "r1"
106 #elif defined(CPU_COLDFIRE)
107 /*---------------------------------------------------------------------------
108 * Store non-volatile context.
109 *---------------------------------------------------------------------------
111 static inline void store_context(void* addr)
113 asm volatile (
114 "move.l %%macsr,%%d0 \n"
115 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
116 : : "a" (addr) : "d0" /* only! */
120 /*---------------------------------------------------------------------------
121 * Load non-volatile context.
122 *---------------------------------------------------------------------------
124 static inline void load_context(const void* addr)
126 asm volatile (
127 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
128 "move.l %%d0,%%macsr \n"
129 "move.l (52,%0),%%d0 \n" /* Get start address */
130 "beq.b .running \n" /* NULL -> already running */
131 "clr.l (52,%0) \n" /* Clear start address.. */
132 "move.l %%d0,%0 \n"
133 "jmp (%0) \n" /* ..and start the thread */
134 ".running: \n"
135 : : "a" (addr) : "d0" /* only! */
139 #elif CONFIG_CPU == SH7034
140 /*---------------------------------------------------------------------------
141 * Store non-volatile context.
142 *---------------------------------------------------------------------------
144 static inline void store_context(void* addr)
146 asm volatile (
147 "add #36,%0 \n"
148 "sts.l pr, @-%0 \n"
149 "mov.l r15,@-%0 \n"
150 "mov.l r14,@-%0 \n"
151 "mov.l r13,@-%0 \n"
152 "mov.l r12,@-%0 \n"
153 "mov.l r11,@-%0 \n"
154 "mov.l r10,@-%0 \n"
155 "mov.l r9, @-%0 \n"
156 "mov.l r8, @-%0 \n"
157 : : "r" (addr)
161 /*---------------------------------------------------------------------------
162 * Load non-volatile context.
163 *---------------------------------------------------------------------------
165 static inline void load_context(const void* addr)
167 asm volatile (
168 "mov.l @%0+,r8 \n"
169 "mov.l @%0+,r9 \n"
170 "mov.l @%0+,r10 \n"
171 "mov.l @%0+,r11 \n"
172 "mov.l @%0+,r12 \n"
173 "mov.l @%0+,r13 \n"
174 "mov.l @%0+,r14 \n"
175 "mov.l @%0+,r15 \n"
176 "lds.l @%0+,pr \n"
177 "mov.l @%0,r0 \n" /* Get start address */
178 "tst r0,r0 \n"
179 "bt .running \n" /* NULL -> already running */
180 "lds r0,pr \n"
181 "mov #0,r0 \n"
182 "rts \n" /* Start the thread */
183 "mov.l r0,@%0 \n" /* Clear start address */
184 ".running: \n"
185 : : "r" (addr) : "r0" /* only! */
189 #elif CONFIG_CPU == TCC730
190 /*---------------------------------------------------------------------------
191 * Store non-volatile context.
192 *---------------------------------------------------------------------------
194 #define store_context(addr) \
195 __asm__ volatile ( \
196 "push r0,r1\n\t" \
197 "push r2,r3\n\t" \
198 "push r4,r5\n\t" \
199 "push r6,r7\n\t" \
200 "push a8,a9\n\t" \
201 "push a10,a11\n\t" \
202 "push a12,a13\n\t" \
203 "push a14\n\t" \
204 "ldw @[%0+0], a15\n\t" : : "a" (addr) );
206 /*---------------------------------------------------------------------------
207 * Load non-volatile context.
208 *---------------------------------------------------------------------------
210 #define load_context(addr) \
212 if (!(addr)->started) { \
213 (addr)->started = 1; \
214 __asm__ volatile ( \
215 "ldw a15, @[%0+0]\n\t" \
216 "ldw a14, @[%0+4]\n\t" \
217 "jmp a14\n\t" : : "a" (addr) \
218 ); \
219 } else \
220 __asm__ volatile ( \
221 "ldw a15, @[%0+0]\n\t" \
222 "pop a14\n\t" \
223 "pop a13,a12\n\t" \
224 "pop a11,a10\n\t" \
225 "pop a9,a8\n\t" \
226 "pop r7,r6\n\t" \
227 "pop r5,r4\n\t" \
228 "pop r3,r2\n\t" \
229 "pop r1,r0\n\t" : : "a" (addr) \
230 ); \
234 #endif
236 static void add_to_list(struct thread_entry **list,
237 struct thread_entry *thread)
239 if (*list == NULL)
241 thread->next = thread;
242 thread->prev = thread;
243 *list = thread;
245 else
247 /* Insert last */
248 thread->next = *list;
249 thread->prev = (*list)->prev;
250 thread->prev->next = thread;
251 (*list)->prev = thread;
253 /* Insert next
254 thread->next = (*list)->next;
255 thread->prev = *list;
256 thread->next->prev = thread;
257 (*list)->next = thread;
262 static void remove_from_list(struct thread_entry **list,
263 struct thread_entry *thread)
265 if (list != NULL)
267 if (thread == thread->next)
269 *list = NULL;
270 return;
273 if (thread == *list)
274 *list = thread->next;
277 /* Fix links to jump over the removed entry. */
278 thread->prev->next = thread->next;
279 thread->next->prev = thread->prev;
282 /* Compiler trick: Don't declare as static to prevent putting
283 * function in IRAM. */
284 void check_sleepers(void)
286 struct thread_entry *current, *next;
288 /* Check sleeping threads. */
289 current = cores[CURRENT_CORE].sleeping;
290 if (current == NULL)
291 return ;
293 for (;;)
295 next = current->next;
297 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
299 /* Sleep timeout has been reached so bring the thread
300 * back to life again. */
301 remove_from_list(&cores[CURRENT_CORE].sleeping, current);
302 add_to_list(&cores[CURRENT_CORE].running, current);
304 /* If there is no more processes in the list, break the loop. */
305 if (cores[CURRENT_CORE].sleeping == NULL)
306 break;
308 current = next;
309 continue;
312 current = next;
314 /* Break the loop once we have walked through the list of all
315 * sleeping processes. */
316 if (current == cores[CURRENT_CORE].sleeping)
317 break;
321 static inline void sleep_core(void)
323 static long last_tick = 0;
325 for (;;)
327 if (last_tick != current_tick)
329 check_sleepers();
330 last_tick = current_tick;
333 /* We must sleep until there is at least one process in the list
334 * of running processes. */
335 if (cores[CURRENT_CORE].running != NULL)
336 break;
338 #ifdef HAVE_SCHEDULER_BOOSTCTRL
339 if (cpu_boosted)
341 cpu_boost(false);
342 cpu_boosted = false;
344 #endif
346 /* Enter sleep mode to reduce power usage, woken up on interrupt */
347 #ifdef CPU_COLDFIRE
348 asm volatile ("stop #0x2000");
349 #elif CONFIG_CPU == SH7034
350 and_b(0x7F, &SBYCR);
351 asm volatile ("sleep");
352 #elif CONFIG_CPU == PP5020
353 /* This should sleep the CPU. It appears to wake by itself on
354 interrupts */
355 CPU_CTL = 0x80000000;
356 #elif CONFIG_CPU == TCC730
357 /* Sleep mode is triggered by the SYS instr on CalmRisc16.
358 * Unfortunately, the manual doesn't specify which arg to use.
359 __asm__ volatile ("sys #0x0f");
360 0x1f seems to trigger a reset;
361 0x0f is the only one other argument used by Archos.
363 #elif CONFIG_CPU == S3C2440
364 CLKCON |= 2;
365 #endif
369 #ifdef RB_PROFILE
370 static int get_threadnum(struct thread_entry *thread)
372 int i;
374 for (i = 0; i < MAXTHREADS; i++)
376 if (&cores[CURRENT_CORE].threads[i] == thread)
377 return i;
380 return -1;
383 void profile_thread(void) {
384 profstart(get_threadnum(cores[CURRENT_CORE].running));
386 #endif
388 /* Compiler trick: Don't declare as static to prevent putting
389 * function in IRAM. */
390 void change_thread_state(struct thread_entry **blocked_list)
392 struct thread_entry *old;
394 /* Remove the thread from the list of running threads. */
395 old = cores[CURRENT_CORE].running;
396 remove_from_list(&cores[CURRENT_CORE].running, old);
398 /* And put the thread into a new list of inactive threads. */
399 if (GET_STATE(old->statearg) == STATE_BLOCKED)
400 add_to_list(blocked_list, old);
401 else
402 add_to_list(&cores[CURRENT_CORE].sleeping, old);
404 #ifdef HAVE_PRIORITY_SCHEDULING
405 /* Reset priorities */
406 if (old->priority == highest_priority)
407 highest_priority = 100;
408 #endif
411 /*---------------------------------------------------------------------------
412 * Switch thread in round robin fashion.
413 *---------------------------------------------------------------------------
415 void switch_thread(bool save_context, struct thread_entry **blocked_list)
417 #ifdef RB_PROFILE
418 profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running));
419 #endif
420 unsigned int *stackptr;
422 #ifdef SIMULATOR
423 /* Do nothing */
424 #else
426 /* Begin task switching by saving our current context so that we can
427 * restore the state of the current thread later to the point prior
428 * to this call. */
429 if (save_context)
431 store_context(&cores[CURRENT_CORE].running->context);
433 # if CONFIG_CPU != TCC730
434 /* Check if the current thread stack is overflown */
435 stackptr = cores[CURRENT_CORE].running->stack;
436 if(stackptr[0] != DEADBEEF)
437 panicf("Stkov %s", cores[CURRENT_CORE].running->name);
438 # endif
440 /* Check if a thread state change has been requested. */
441 if (cores[CURRENT_CORE].running->statearg)
443 /* Change running thread state and switch to next thread. */
444 change_thread_state(blocked_list);
446 else
448 /* Switch to the next running thread. */
449 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
453 /* Go through the list of sleeping task to check if we need to wake up
454 * any of them due to timeout. Also puts core into sleep state until
455 * there is at least one running process again. */
456 sleep_core();
458 #ifdef HAVE_PRIORITY_SCHEDULING
459 /* Select the new task based on priorities and the last time a process
460 * got CPU time. */
461 for (;;)
463 int priority = cores[CURRENT_CORE].running->priority;
465 if (priority < highest_priority)
466 highest_priority = priority;
468 if (priority == highest_priority || (current_tick
469 - cores[CURRENT_CORE].running->last_run > priority * 8))
471 break;
473 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
476 /* Reset the value of thread's last running time to the current time. */
477 cores[CURRENT_CORE].running->last_run = current_tick;
478 #endif
480 #endif
481 /* And finally give control to the next thread. */
482 load_context(&cores[CURRENT_CORE].running->context);
484 #ifdef RB_PROFILE
485 profile_thread_started(get_threadnum(cores[CURRENT_CORE].running));
486 #endif
489 void sleep_thread(int ticks)
491 /* Set the thread's new state and timeout and finally force a task switch
492 * so that scheduler removes thread from the list of running processes
493 * and puts it in list of sleeping tasks. */
494 cores[CURRENT_CORE].running->statearg =
495 SET_STATE(STATE_SLEEPING, current_tick + ticks + 1);
496 switch_thread(true, NULL);
498 /* Clear all flags to indicate we are up and running again. */
499 cores[CURRENT_CORE].running->statearg = 0;
502 void block_thread(struct thread_entry **list, int timeout)
504 struct thread_entry *current;
506 /* Get the entry for the current running thread. */
507 current = cores[CURRENT_CORE].running;
509 /* At next task switch scheduler will immediately change the thread
510 * state (and we also force the task switch to happen). */
511 if (timeout)
513 #ifdef THREAD_EXTRA_CHECKS
514 /* We can store only one thread to the "list" if thread is used
515 * in other list (such as core's list for sleeping tasks). */
516 if (*list)
517 panicf("Blocking violation T->*B");
518 #endif
520 current->statearg =
521 SET_STATE(STATE_BLOCKED_W_TMO, current_tick + timeout);
522 *list = current;
524 /* Now force a task switch and block until we have been woken up
525 * by another thread or timeout is reached. */
526 switch_thread(true, NULL);
528 /* If timeout is reached, we must set list back to NULL here. */
529 *list = NULL;
531 else
533 #ifdef THREAD_EXTRA_CHECKS
534 /* We are not allowed to mix blocking types in one queue. */
535 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
536 panicf("Blocking violation B->*T");
537 #endif
539 current->statearg = SET_STATE(STATE_BLOCKED, 0);
541 /* Now force a task switch and block until we have been woken up
542 * by another thread or timeout is reached. */
543 switch_thread(true, list);
546 /* Clear all flags to indicate we are up and running again. */
547 current->statearg = 0;
550 void wakeup_thread(struct thread_entry **list)
552 struct thread_entry *thread;
554 /* Check if there is a blocked thread at all. */
555 if (*list == NULL)
556 return ;
558 /* Wake up the last thread first. */
559 thread = *list;
561 /* Determine thread's current state. */
562 switch (GET_STATE(thread->statearg))
564 case STATE_BLOCKED:
565 /* Remove thread from the list of blocked threads and add it
566 * to the scheduler's list of running processes. */
567 remove_from_list(list, thread);
568 add_to_list(&cores[CURRENT_CORE].running, thread);
569 thread->statearg = 0;
570 break;
572 case STATE_BLOCKED_W_TMO:
573 /* Just remove the timeout to cause scheduler to immediately
574 * wake up the thread. */
575 thread->statearg &= 0xC0000000;
576 *list = NULL;
577 break;
579 default:
580 /* Nothing to do. Thread has already been woken up
581 * or it's state is not blocked or blocked with timeout. */
582 return ;
586 /*---------------------------------------------------------------------------
587 * Create thread on the current core.
588 * Return ID if context area could be allocated, else -1.
589 *---------------------------------------------------------------------------
591 struct thread_entry*
592 create_thread(void (*function)(void), void* stack, int stack_size,
593 const char *name IF_PRIO(, int priority))
595 return create_thread_on_core(CURRENT_CORE, function, stack, stack_size,
596 name IF_PRIO(, priority));
599 /*---------------------------------------------------------------------------
600 * Create thread on a specific core.
601 * Return ID if context area could be allocated, else -1.
602 *---------------------------------------------------------------------------
604 struct thread_entry*
605 create_thread_on_core(unsigned int core, void (*function)(void),
606 void* stack, int stack_size,
607 const char *name IF_PRIO(, int priority))
609 unsigned int i;
610 unsigned int stacklen;
611 unsigned int *stackptr;
612 int n;
613 struct regs *regs;
614 struct thread_entry *thread;
616 for (n = 0; n < MAXTHREADS; n++)
618 if (cores[core].threads[n].name == NULL)
619 break;
622 if (n == MAXTHREADS)
623 return NULL;
626 /* Munge the stack to make it easy to spot stack overflows */
627 stacklen = stack_size / sizeof(int);
628 stackptr = stack;
629 for(i = 0;i < stacklen;i++)
631 stackptr[i] = DEADBEEF;
634 /* Store interesting information */
635 thread = &cores[core].threads[n];
636 thread->name = name;
637 thread->stack = stack;
638 thread->stack_size = stack_size;
639 thread->statearg = 0;
640 #ifdef HAVE_PRIORITY_SCHEDULING
641 thread->priority = priority;
642 highest_priority = 100;
643 #endif
644 add_to_list(&cores[core].running, thread);
646 regs = &thread->context;
647 #if defined(CPU_COLDFIRE) || (CONFIG_CPU == SH7034) || defined(CPU_ARM)
648 /* Align stack to an even 32 bit boundary */
649 regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
650 #elif CONFIG_CPU == TCC730
651 /* Align stack on word boundary */
652 regs->sp = (void*)(((unsigned long)stack + stack_size - 2) & ~1);
653 regs->started = 0;
654 #endif
655 regs->start = (void*)function;
657 return thread;
660 #ifdef HAVE_SCHEDULER_BOOSTCTRL
661 void trigger_cpu_boost(void)
663 if (!cpu_boosted)
665 cpu_boost(true);
666 cpu_boosted = true;
669 #endif
671 /*---------------------------------------------------------------------------
672 * Remove a thread on the current core from the scheduler.
673 * Parameter is the ID as returned from create_thread().
674 *---------------------------------------------------------------------------
676 void remove_thread(struct thread_entry *thread)
678 if (thread == NULL)
679 thread = cores[CURRENT_CORE].running;
681 /* Free the entry by removing thread name. */
682 thread->name = NULL;
683 #ifdef HAVE_PRIORITY_SCHEDULING
684 highest_priority = 100;
685 #endif
687 if (thread == cores[CURRENT_CORE].running)
689 remove_from_list(&cores[CURRENT_CORE].running, thread);
690 switch_thread(false, NULL);
691 return ;
694 if (thread == cores[CURRENT_CORE].sleeping)
695 remove_from_list(&cores[CURRENT_CORE].sleeping, thread);
696 else
697 remove_from_list(NULL, thread);
700 #ifdef HAVE_PRIORITY_SCHEDULING
701 int thread_set_priority(struct thread_entry *thread, int priority)
703 int old_priority;
705 if (thread == NULL)
706 thread = cores[CURRENT_CORE].running;
708 old_priority = thread->priority;
709 thread->priority = priority;
710 highest_priority = 100;
712 return old_priority;
714 #endif
716 void init_threads(void)
718 unsigned int core = CURRENT_CORE;
720 memset(cores, 0, sizeof cores);
721 cores[core].sleeping = NULL;
722 cores[core].running = NULL;
723 cores[core].threads[0].name = main_thread_name;
724 cores[core].threads[0].statearg = 0;
725 #ifdef HAVE_PRIORITY_SCHEDULING
726 cores[core].threads[0].priority = PRIORITY_USER_INTERFACE;
727 highest_priority = 100;
728 #endif
729 #ifdef HAVE_SCHEDULER_BOOSTCTRL
730 cpu_boosted = false;
731 #endif
732 add_to_list(&cores[core].running, &cores[core].threads[0]);
734 /* In multiple core setups, each core has a different stack. There is probably
735 a much better way to do this. */
736 if (core == CPU)
738 cores[CPU].threads[0].stack = stackbegin;
739 cores[CPU].threads[0].stack_size = (int)stackend - (int)stackbegin;
740 } else {
741 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
742 cores[COP].threads[0].stack = cop_stackbegin;
743 cores[COP].threads[0].stack_size = (int)cop_stackend - (int)cop_stackbegin;
744 #endif
746 #if CONFIG_CPU == TCC730
747 cores[core].threads[0].context.started = 1;
748 #else
749 cores[core].threads[0].context.start = 0; /* thread 0 already running */
750 #endif
753 int thread_stack_usage(const struct thread_entry *thread)
755 unsigned int i;
756 unsigned int *stackptr = thread->stack;
758 for (i = 0;i < thread->stack_size/sizeof(int);i++)
760 if (stackptr[i] != DEADBEEF)
761 break;
764 return ((thread->stack_size - i * sizeof(int)) * 100) /
765 thread->stack_size;
768 int thread_get_status(const struct thread_entry *thread)
770 return GET_STATE(thread->statearg);