Make it clear that the player has to be restarted after initializing the database
[Rockbox.git] / firmware / thread.c
blob6a583a470a62e5798f9f944040a8ca5af8d1087e
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "system.h"
24 #include "kernel.h"
25 #include "cpu.h"
26 #include "string.h"
27 #ifdef RB_PROFILE
28 #include <profile.h>
29 #endif
31 #define DEADBEEF ((unsigned int)0xdeadbeef)
32 /* Cast to the the machine int type, whose size could be < 4. */
34 struct core_entry cores[NUM_CORES] IBSS_ATTR;
35 #ifdef HAVE_PRIORITY_SCHEDULING
36 static unsigned short highest_priority IBSS_ATTR;
37 #endif
38 #ifdef HAVE_SCHEDULER_BOOSTCTRL
39 static int boosted_threads IBSS_ATTR;
40 #endif
42 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
43 #define STAY_IRQ_LEVEL -1
44 static int switch_to_irq_level = STAY_IRQ_LEVEL;
45 #endif
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS
50 static const char main_thread_name[] = "main";
52 extern int stackbegin[];
53 extern int stackend[];
55 #ifdef CPU_PP
56 #ifndef BOOTLOADER
57 extern int cop_stackbegin[];
58 extern int cop_stackend[];
59 #else
60 /* The coprocessor stack is not set up in the bootloader code, but the threading
61 * is. No threads are run on the coprocessor, so set up some dummy stack */
62 int *cop_stackbegin = stackbegin;
63 int *cop_stackend = stackend;
64 #endif
65 #endif
67 /* Conserve IRAM
68 static void add_to_list(struct thread_entry **list,
69 struct thread_entry *thread) ICODE_ATTR;
70 static void remove_from_list(struct thread_entry **list,
71 struct thread_entry *thread) ICODE_ATTR;
74 void switch_thread(bool save_context, struct thread_entry **blocked_list)
75 ICODE_ATTR;
77 static inline void store_context(void* addr) __attribute__ ((always_inline));
78 static inline void load_context(const void* addr)
79 __attribute__ ((always_inline));
81 #if defined(CPU_ARM)
82 /*---------------------------------------------------------------------------
83 * Store non-volatile context.
84 *---------------------------------------------------------------------------
86 static inline void store_context(void* addr)
88 asm volatile(
89 "stmia %0, { r4-r11, sp, lr }\n"
90 : : "r" (addr)
94 /*---------------------------------------------------------------------------
95 * Load non-volatile context.
96 *---------------------------------------------------------------------------
98 static inline void load_context(const void* addr)
100 asm volatile(
101 "ldmia %0, { r4-r11, sp, lr }\n" /* load regs r4 to r14 from context */
102 "ldr r0, [%0, #40] \n" /* load start pointer */
103 "mov r1, #0 \n"
104 "cmp r0, r1 \n" /* check for NULL */
105 "strne r1, [%0, #40] \n" /* if it's NULL, we're already running */
106 "movne pc, r0 \n" /* not already running, so jump to start */
107 : : "r" (addr) : "r0", "r1"
111 #elif defined(CPU_COLDFIRE)
112 /*---------------------------------------------------------------------------
113 * Store non-volatile context.
114 *---------------------------------------------------------------------------
116 static inline void store_context(void* addr)
118 asm volatile (
119 "move.l %%macsr,%%d0 \n"
120 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
121 : : "a" (addr) : "d0" /* only! */
125 /*---------------------------------------------------------------------------
126 * Load non-volatile context.
127 *---------------------------------------------------------------------------
129 static inline void load_context(const void* addr)
131 asm volatile (
132 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
133 "move.l %%d0,%%macsr \n"
134 "move.l (52,%0),%%d0 \n" /* Get start address */
135 "beq.b .running \n" /* NULL -> already running */
136 "clr.l (52,%0) \n" /* Clear start address.. */
137 "move.l %%d0,%0 \n"
138 "jmp (%0) \n" /* ..and start the thread */
139 ".running: \n"
140 : : "a" (addr) : "d0" /* only! */
144 #elif CONFIG_CPU == SH7034
145 /*---------------------------------------------------------------------------
146 * Store non-volatile context.
147 *---------------------------------------------------------------------------
149 static inline void store_context(void* addr)
151 asm volatile (
152 "add #36,%0 \n"
153 "sts.l pr, @-%0 \n"
154 "mov.l r15,@-%0 \n"
155 "mov.l r14,@-%0 \n"
156 "mov.l r13,@-%0 \n"
157 "mov.l r12,@-%0 \n"
158 "mov.l r11,@-%0 \n"
159 "mov.l r10,@-%0 \n"
160 "mov.l r9, @-%0 \n"
161 "mov.l r8, @-%0 \n"
162 : : "r" (addr)
166 /*---------------------------------------------------------------------------
167 * Load non-volatile context.
168 *---------------------------------------------------------------------------
170 static inline void load_context(const void* addr)
172 asm volatile (
173 "mov.l @%0+,r8 \n"
174 "mov.l @%0+,r9 \n"
175 "mov.l @%0+,r10 \n"
176 "mov.l @%0+,r11 \n"
177 "mov.l @%0+,r12 \n"
178 "mov.l @%0+,r13 \n"
179 "mov.l @%0+,r14 \n"
180 "mov.l @%0+,r15 \n"
181 "lds.l @%0+,pr \n"
182 "mov.l @%0,r0 \n" /* Get start address */
183 "tst r0,r0 \n"
184 "bt .running \n" /* NULL -> already running */
185 "lds r0,pr \n"
186 "mov #0,r0 \n"
187 "rts \n" /* Start the thread */
188 "mov.l r0,@%0 \n" /* Clear start address */
189 ".running: \n"
190 : : "r" (addr) : "r0" /* only! */
194 #endif
196 static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
198 if (*list == NULL)
200 thread->next = thread;
201 thread->prev = thread;
202 *list = thread;
204 else
206 /* Insert last */
207 thread->next = *list;
208 thread->prev = (*list)->prev;
209 thread->prev->next = thread;
210 (*list)->prev = thread;
212 /* Insert next
213 thread->next = (*list)->next;
214 thread->prev = *list;
215 thread->next->prev = thread;
216 (*list)->next = thread;
221 static void remove_from_list(struct thread_entry **list,
222 struct thread_entry *thread)
224 if (list != NULL)
226 if (thread == thread->next)
228 *list = NULL;
229 return;
232 if (thread == *list)
233 *list = thread->next;
236 /* Fix links to jump over the removed entry. */
237 thread->prev->next = thread->next;
238 thread->next->prev = thread->prev;
241 /* Compiler trick: Don't declare as static to prevent putting
242 * function in IRAM. */
243 void check_sleepers(void)
245 struct thread_entry *current, *next;
247 /* Check sleeping threads. */
248 current = cores[CURRENT_CORE].sleeping;
249 if (current == NULL)
250 return ;
252 for (;;)
254 next = current->next;
256 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
258 /* Sleep timeout has been reached so bring the thread
259 * back to life again. */
260 remove_from_list(&cores[CURRENT_CORE].sleeping, current);
261 add_to_list(&cores[CURRENT_CORE].running, current);
262 current->statearg = 0;
264 /* If there is no more processes in the list, break the loop. */
265 if (cores[CURRENT_CORE].sleeping == NULL)
266 break;
268 current = next;
269 continue;
272 current = next;
274 /* Break the loop once we have walked through the list of all
275 * sleeping processes. */
276 if (current == cores[CURRENT_CORE].sleeping)
277 break;
281 static inline void sleep_core(void)
283 static long last_tick = 0;
284 #if CONFIG_CPU == S3C2440
285 int i;
286 #endif
288 for (;;)
290 if (last_tick != current_tick)
292 check_sleepers();
293 last_tick = current_tick;
296 /* We must sleep until there is at least one process in the list
297 * of running processes. */
298 if (cores[CURRENT_CORE].running != NULL)
299 break;
301 /* Enter sleep mode to reduce power usage, woken up on interrupt */
302 #ifdef CPU_COLDFIRE
303 asm volatile ("stop #0x2000");
304 #elif CONFIG_CPU == SH7034
305 and_b(0x7F, &SBYCR);
306 asm volatile ("sleep");
307 #elif CONFIG_CPU == PP5020
308 /* This should sleep the CPU. It appears to wake by itself on
309 interrupts */
310 CPU_CTL = 0x80000000;
311 #elif CONFIG_CPU == S3C2440
312 CLKCON |= (1 << 2); /* set IDLE bit */
313 for(i=0; i<10; i++); /* wait for IDLE */
314 CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
315 #endif
319 #ifdef RB_PROFILE
320 static int get_threadnum(struct thread_entry *thread)
322 int i;
324 for (i = 0; i < MAXTHREADS; i++)
326 if (&cores[CURRENT_CORE].threads[i] == thread)
327 return i;
330 return -1;
333 void profile_thread(void) {
334 profstart(get_threadnum(cores[CURRENT_CORE].running));
336 #endif
338 /* Compiler trick: Don't declare as static to prevent putting
339 * function in IRAM. */
340 void change_thread_state(struct thread_entry **blocked_list)
342 struct thread_entry *old;
343 unsigned long new_state;
345 /* Remove the thread from the list of running threads. */
346 old = cores[CURRENT_CORE].running;
347 new_state = GET_STATE(old->statearg);
349 /* Check if a thread state change has been requested. */
350 if (new_state)
352 /* Change running thread state and switch to next thread. */
353 remove_from_list(&cores[CURRENT_CORE].running, old);
355 /* And put the thread into a new list of inactive threads. */
356 if (new_state == STATE_BLOCKED)
357 add_to_list(blocked_list, old);
358 else
359 add_to_list(&cores[CURRENT_CORE].sleeping, old);
361 #ifdef HAVE_PRIORITY_SCHEDULING
362 /* Reset priorities */
363 if (old->priority == highest_priority)
364 highest_priority = 100;
365 #endif
367 else
368 /* Switch to the next running thread. */
369 cores[CURRENT_CORE].running = old->next;
372 /*---------------------------------------------------------------------------
373 * Switch thread in round robin fashion.
374 *---------------------------------------------------------------------------
376 void switch_thread(bool save_context, struct thread_entry **blocked_list)
378 #ifdef RB_PROFILE
379 profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running));
380 #endif
381 unsigned int *stackptr;
383 #ifdef SIMULATOR
384 /* Do nothing */
385 #else
387 /* Begin task switching by saving our current context so that we can
388 * restore the state of the current thread later to the point prior
389 * to this call. */
390 if (save_context)
392 store_context(&cores[CURRENT_CORE].running->context);
394 /* Check if the current thread stack is overflown */
395 stackptr = cores[CURRENT_CORE].running->stack;
396 if(stackptr[0] != DEADBEEF)
397 panicf("Stkov %s", cores[CURRENT_CORE].running->name);
399 /* Rearrange thread lists as needed */
400 change_thread_state(blocked_list);
402 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
403 /* This has to be done after the scheduler is finished with the
404 blocked_list pointer so that an IRQ can't kill us by attempting
405 a wake but before attempting any core sleep. */
406 if (switch_to_irq_level != STAY_IRQ_LEVEL)
408 int level = switch_to_irq_level;
409 switch_to_irq_level = STAY_IRQ_LEVEL;
410 set_irq_level(level);
412 #endif
415 /* Go through the list of sleeping task to check if we need to wake up
416 * any of them due to timeout. Also puts core into sleep state until
417 * there is at least one running process again. */
418 sleep_core();
420 #ifdef HAVE_PRIORITY_SCHEDULING
421 /* Select the new task based on priorities and the last time a process
422 * got CPU time. */
423 for (;;)
425 int priority = cores[CURRENT_CORE].running->priority;
427 if (priority < highest_priority)
428 highest_priority = priority;
430 if (priority == highest_priority ||
431 (current_tick - cores[CURRENT_CORE].running->last_run >
432 priority * 8))
433 break;
435 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
438 /* Reset the value of thread's last running time to the current time. */
439 cores[CURRENT_CORE].running->last_run = current_tick;
440 #endif
442 #endif
443 /* And finally give control to the next thread. */
444 load_context(&cores[CURRENT_CORE].running->context);
446 #ifdef RB_PROFILE
447 profile_thread_started(get_threadnum(cores[CURRENT_CORE].running));
448 #endif
451 void sleep_thread(int ticks)
453 struct thread_entry *current;
455 current = cores[CURRENT_CORE].running;
457 #ifdef HAVE_SCHEDULER_BOOSTCTRL
458 if (STATE_IS_BOOSTED(current->statearg)) {
459 boosted_threads--;
460 if (!boosted_threads)
462 cpu_boost(false);
465 #endif
467 /* Set the thread's new state and timeout and finally force a task switch
468 * so that scheduler removes thread from the list of running processes
469 * and puts it in list of sleeping tasks. */
470 SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
471 switch_thread(true, NULL);
474 void block_thread(struct thread_entry **list)
476 struct thread_entry *current;
477 /* Get the entry for the current running thread. */
478 current = cores[CURRENT_CORE].running;
480 #ifdef HAVE_SCHEDULER_BOOSTCTRL
481 /* Keep the boosted state over indefinite block calls, because
482 * we are waiting until the earliest time that someone else
483 * completes an action */
484 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
485 #endif
487 #ifdef THREAD_EXTRA_CHECKS
488 /* We are not allowed to mix blocking types in one queue. */
489 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
490 panicf("Blocking violation B->*T");
491 #endif
493 /* Set the state to blocked and ask the scheduler to switch tasks,
494 * this takes us off of the run queue until we are explicitly woken */
495 SET_STATE(current->statearg, STATE_BLOCKED, 0);
497 switch_thread(true, list);
499 #ifdef HAVE_SCHEDULER_BOOSTCTRL
500 /* Reset only the boosted flag to indicate we are up and running again. */
501 current->statearg = boost_flag;
502 #else
503 /* Clear all flags to indicate we are up and running again. */
504 current->statearg = 0;
505 #endif
508 void block_thread_w_tmo(struct thread_entry **list, int timeout)
510 struct thread_entry *current;
511 /* Get the entry for the current running thread. */
512 current = cores[CURRENT_CORE].running;
514 #ifdef HAVE_SCHEDULER_BOOSTCTRL
515 /* A block with a timeout is a sleep situation, whatever we are waiting
516 * for _may or may not_ happen, regardless of boost state, (user input
517 * for instance), so this thread no longer needs to boost */
518 if (STATE_IS_BOOSTED(current->statearg)) {
519 boosted_threads--;
520 if (!boosted_threads)
522 cpu_boost(false);
525 #endif
527 #ifdef THREAD_EXTRA_CHECKS
528 /* We can store only one thread to the "list" if thread is used
529 * in other list (such as core's list for sleeping tasks). */
530 if (*list)
531 panicf("Blocking violation T->*B");
532 #endif
534 /* Set the state to blocked with the specified timeout */
535 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
537 /* Set the "list" for explicit wakeup */
538 *list = current;
540 /* Now force a task switch and block until we have been woken up
541 * by another thread or timeout is reached. */
542 switch_thread(true, NULL);
544 /* It is now safe for another thread to block on this "list" */
545 *list = NULL;
548 #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) && !defined(SIMULATOR)
549 void set_irq_level_and_block_thread(struct thread_entry **list, int level)
551 switch_to_irq_level = level;
552 block_thread(list);
555 #if 0
556 void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
557 int timeout, int level)
559 switch_to_irq_level = level;
560 block_thread_w_tmo(list, timeout);
562 #endif
563 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
565 void wakeup_thread(struct thread_entry **list)
567 struct thread_entry *thread;
569 /* Check if there is a blocked thread at all. */
570 if (*list == NULL)
571 return ;
573 /* Wake up the last thread first. */
574 thread = *list;
576 /* Determine thread's current state. */
577 switch (GET_STATE(thread->statearg))
579 case STATE_BLOCKED:
580 /* Remove thread from the list of blocked threads and add it
581 * to the scheduler's list of running processes. */
582 remove_from_list(list, thread);
583 add_to_list(&cores[CURRENT_CORE].running, thread);
585 case STATE_BLOCKED_W_TMO:
586 /* Just remove the timeout to cause scheduler to immediately
587 * wake up the thread. */
588 thread->statearg = 0;
589 break;
591 default:
592 /* Nothing to do. Thread has already been woken up
593 * or it's state is not blocked or blocked with timeout. */
594 return ;
598 /*---------------------------------------------------------------------------
599 * Create thread on the current core.
600 * Return ID if context area could be allocated, else -1.
601 *---------------------------------------------------------------------------
603 struct thread_entry*
604 create_thread(void (*function)(void), void* stack, int stack_size,
605 const char *name IF_PRIO(, int priority))
607 return create_thread_on_core(CURRENT_CORE, function, stack, stack_size,
608 name IF_PRIO(, priority));
611 /*---------------------------------------------------------------------------
612 * Create thread on a specific core.
613 * Return ID if context area could be allocated, else -1.
614 *---------------------------------------------------------------------------
616 struct thread_entry*
617 create_thread_on_core(unsigned int core, void (*function)(void),
618 void* stack, int stack_size,
619 const char *name IF_PRIO(, int priority))
621 unsigned int i;
622 unsigned int stacklen;
623 unsigned int *stackptr;
624 int n;
625 struct regs *regs;
626 struct thread_entry *thread;
628 for (n = 0; n < MAXTHREADS; n++)
630 if (cores[core].threads[n].name == NULL)
631 break;
634 if (n == MAXTHREADS)
635 return NULL;
638 /* Munge the stack to make it easy to spot stack overflows */
639 stacklen = stack_size / sizeof(int);
640 stackptr = stack;
641 for(i = 0;i < stacklen;i++)
643 stackptr[i] = DEADBEEF;
646 /* Store interesting information */
647 thread = &cores[core].threads[n];
648 thread->name = name;
649 thread->stack = stack;
650 thread->stack_size = stack_size;
651 thread->statearg = 0;
652 #ifdef HAVE_PRIORITY_SCHEDULING
653 thread->priority = priority;
654 highest_priority = 100;
655 #endif
656 add_to_list(&cores[core].running, thread);
658 regs = &thread->context;
659 /* Align stack to an even 32 bit boundary */
660 regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
661 regs->start = (void*)function;
663 return thread;
666 #ifdef HAVE_SCHEDULER_BOOSTCTRL
667 void trigger_cpu_boost(void)
669 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
671 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
672 if (!boosted_threads)
674 cpu_boost(true);
676 boosted_threads++;
679 #endif
681 /*---------------------------------------------------------------------------
682 * Remove a thread on the current core from the scheduler.
683 * Parameter is the ID as returned from create_thread().
684 *---------------------------------------------------------------------------
686 void remove_thread(struct thread_entry *thread)
688 if (thread == NULL)
689 thread = cores[CURRENT_CORE].running;
691 /* Free the entry by removing thread name. */
692 thread->name = NULL;
693 #ifdef HAVE_PRIORITY_SCHEDULING
694 highest_priority = 100;
695 #endif
697 if (thread == cores[CURRENT_CORE].running)
699 remove_from_list(&cores[CURRENT_CORE].running, thread);
700 switch_thread(false, NULL);
701 return ;
704 if (thread == cores[CURRENT_CORE].sleeping)
705 remove_from_list(&cores[CURRENT_CORE].sleeping, thread);
706 else
707 remove_from_list(NULL, thread);
710 #ifdef HAVE_PRIORITY_SCHEDULING
711 int thread_set_priority(struct thread_entry *thread, int priority)
713 int old_priority;
715 if (thread == NULL)
716 thread = cores[CURRENT_CORE].running;
718 old_priority = thread->priority;
719 thread->priority = priority;
720 highest_priority = 100;
722 return old_priority;
725 int thread_get_priority(struct thread_entry *thread)
727 if (thread == NULL)
728 thread = cores[CURRENT_CORE].running;
730 return thread->priority;
732 #endif
734 void init_threads(void)
736 unsigned int core = CURRENT_CORE;
738 memset(cores, 0, sizeof cores);
739 cores[core].sleeping = NULL;
740 cores[core].running = NULL;
741 cores[core].threads[0].name = main_thread_name;
742 cores[core].threads[0].statearg = 0;
743 #ifdef HAVE_PRIORITY_SCHEDULING
744 cores[core].threads[0].priority = PRIORITY_USER_INTERFACE;
745 highest_priority = 100;
746 #endif
747 #ifdef HAVE_SCHEDULER_BOOSTCTRL
748 boosted_threads = 0;
749 #endif
750 add_to_list(&cores[core].running, &cores[core].threads[0]);
752 /* In multiple core setups, each core has a different stack. There is
753 * probably a much better way to do this. */
754 if (core == CPU)
756 cores[CPU].threads[0].stack = stackbegin;
757 cores[CPU].threads[0].stack_size = (int)stackend - (int)stackbegin;
758 } else {
759 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
760 cores[COP].threads[0].stack = cop_stackbegin;
761 cores[COP].threads[0].stack_size =
762 (int)cop_stackend - (int)cop_stackbegin;
763 #endif
765 cores[core].threads[0].context.start = 0; /* thread 0 already running */
768 int thread_stack_usage(const struct thread_entry *thread)
770 unsigned int i;
771 unsigned int *stackptr = thread->stack;
773 for (i = 0;i < thread->stack_size/sizeof(int);i++)
775 if (stackptr[i] != DEADBEEF)
776 break;
779 return ((thread->stack_size - i * sizeof(int)) * 100) /
780 thread->stack_size;
783 int thread_get_status(const struct thread_entry *thread)
785 return GET_STATE(thread->statearg);