When changing cache path update the size information.
[Rockbox.git] / firmware / thread.c
blob619a1e135a52a4efac721f133c39740736ccbb90
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 #if NUM_CORES > 1
33 # define IF_COP2(x) x
34 #else
35 # define IF_COP2(x) CURRENT_CORE
36 #endif
38 #define DEADBEEF ((unsigned int)0xdeadbeef)
39 /* Cast to the the machine int type, whose size could be < 4. */
41 struct core_entry cores[NUM_CORES] IBSS_ATTR;
42 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
43 #ifdef HAVE_SCHEDULER_BOOSTCTRL
44 static int boosted_threads IBSS_ATTR;
45 #endif
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS 0
50 static const char main_thread_name[] = "main";
52 extern int stackbegin[];
53 extern int stackend[];
55 /* Conserve IRAM
56 static void add_to_list(struct thread_entry **list,
57 struct thread_entry *thread) ICODE_ATTR;
58 static void remove_from_list(struct thread_entry **list,
59 struct thread_entry *thread) ICODE_ATTR;
62 void switch_thread(bool save_context, struct thread_entry **blocked_list)
63 ICODE_ATTR;
65 static inline void store_context(void* addr) __attribute__ ((always_inline));
66 static inline void load_context(const void* addr)
67 __attribute__ ((always_inline));
68 static inline void core_sleep(void) __attribute__((always_inline));
70 #if defined(CPU_ARM)
71 /*---------------------------------------------------------------------------
72 * Start the thread running and terminate it if it returns
73 *---------------------------------------------------------------------------
75 static void start_thread(void) __attribute__((naked,used));
76 static void start_thread(void)
78 /* r0 = context */
79 asm volatile (
80 "ldr sp, [r0, #32] \n" /* Load initial sp */
81 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
82 "mov r1, #0 \n" /* Mark thread as running */
83 "str r1, [r0, #40] \n"
84 #if NUM_CORES > 1
85 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
86 "mov lr, pc \n" /* This could be the first entry into */
87 "bx r0 \n" /* plugin or codec code for this core. */
88 #endif
89 "mov lr, pc \n" /* Call thread function */
90 "bx r4 \n"
91 "mov r0, #0 \n" /* remove_thread(NULL) */
92 "ldr pc, =remove_thread \n"
93 ".ltorg \n" /* Dump constant pool */
94 ); /* No clobber list - new thread doesn't care */
97 /*---------------------------------------------------------------------------
98 * Store non-volatile context.
99 *---------------------------------------------------------------------------
101 static inline void store_context(void* addr)
103 asm volatile(
104 "stmia %0, { r4-r11, sp, lr } \n"
105 : : "r" (addr)
109 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
110 * slot, and thread function pointer in context.start. See load_context for
111 * what happens when thread is initially going to run. */
112 #define THREAD_STARTUP_INIT(core, thread, function) \
113 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
114 (thread)->context.r[1] = (unsigned int)start_thread, \
115 (thread)->context.start = (void *)function; })
117 static inline void load_context(const void* addr)
119 asm volatile(
120 "ldr r0, [%0, #40] \n" /* Load start pointer */
121 "cmp r0, #0 \n" /* Check for NULL */
122 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
123 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
124 : : "r" (addr) : "r0" /* only! */
128 #if defined (CPU_PP)
130 #if NUM_CORES > 1
131 extern int cpu_idlestackbegin[];
132 extern int cpu_idlestackend[];
133 extern int cop_idlestackbegin[];
134 extern int cop_idlestackend[];
135 static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
137 [CPU] = cpu_idlestackbegin,
138 [COP] = cop_idlestackbegin
140 #endif /* NUM_CORES */
142 static inline void core_sleep(void)
144 /* This should sleep the CPU. It appears to wake by itself on
145 interrupts */
146 if (CURRENT_CORE == CPU)
147 CPU_CTL = PROC_SLEEP;
148 else
149 COP_CTL = PROC_SLEEP;
152 #if NUM_CORES > 1
153 /*---------------------------------------------------------------------------
154 * Switches to a stack that always resides in the Rockbox core.
156 * Needed when a thread suicides on a core other than the main CPU since the
157 * stack used when idling is the stack of the last thread to run. This stack
158 * may not reside in the core in which case the core will continue to use a
159 * stack from an unloaded module until another thread runs on it.
160 *---------------------------------------------------------------------------
162 static inline void switch_to_idle_stack(const unsigned int core)
164 asm volatile (
165 "str sp, [%0] \n" /* save original stack pointer on idle stack */
166 "mov sp, %0 \n" /* switch stacks */
167 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
168 (void)core;
170 #endif /* NUM_CORES */
172 #elif CONFIG_CPU == S3C2440
173 static inline void core_sleep(void)
175 int i;
176 CLKCON |= (1 << 2); /* set IDLE bit */
177 for(i=0; i<10; i++); /* wait for IDLE */
178 CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
180 #else
181 static inline void core_sleep(void)
185 #endif
187 #elif defined(CPU_COLDFIRE)
188 /*---------------------------------------------------------------------------
189 * Start the thread running and terminate it if it returns
190 *---------------------------------------------------------------------------
192 void start_thread(void); /* Provide C access to ASM label */
193 static void __start_thread(void) __attribute__((used));
194 static void __start_thread(void)
196 /* a0=macsr, a1=context */
197 asm volatile (
198 "start_thread: \n" /* Start here - no naked attribute */
199 "move.l %a0, %macsr \n" /* Set initial mac status reg */
200 "lea.l 48(%a1), %a1 \n"
201 "move.l (%a1)+, %sp \n" /* Set initial stack */
202 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
203 "clr.l (%a1) \n" /* Mark thread running */
204 "jsr (%a2) \n" /* Call thread function */
205 "clr.l -(%sp) \n" /* remove_thread(NULL) */
206 "jsr remove_thread \n"
210 /* Set EMAC unit to fractional mode with saturation for each new thread,
211 * since that's what'll be the most useful for most things which the dsp
212 * will do. Codecs should still initialize their preferred modes
213 * explicitly. Context pointer is placed in d2 slot and start_thread
214 * pointer in d3 slot. thread function pointer is placed in context.start.
215 * See load_context for what happens when thread is initially going to
216 * run.
218 #define THREAD_STARTUP_INIT(core, thread, function) \
219 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
220 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
221 (thread)->context.d[1] = (unsigned int)start_thread, \
222 (thread)->context.start = (void *)(function); })
224 /*---------------------------------------------------------------------------
225 * Store non-volatile context.
226 *---------------------------------------------------------------------------
228 static inline void store_context(void* addr)
230 asm volatile (
231 "move.l %%macsr,%%d0 \n"
232 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
233 : : "a" (addr) : "d0" /* only! */
237 /*---------------------------------------------------------------------------
238 * Load non-volatile context.
239 *---------------------------------------------------------------------------
241 static inline void load_context(const void* addr)
243 asm volatile (
244 "move.l 52(%0), %%d0 \n" /* Get start address */
245 "beq.b 1f \n" /* NULL -> already running */
246 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
247 "jmp (%%a2) \n" /* Start the thread */
248 "1: \n"
249 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
250 "move.l %%d0, %%macsr \n"
251 : : "a" (addr) : "d0" /* only! */
255 static inline void core_sleep(void)
257 asm volatile ("stop #0x2000");
260 /* Set EMAC unit to fractional mode with saturation for each new thread,
261 since that's what'll be the most useful for most things which the dsp
262 will do. Codecs should still initialize their preferred modes
263 explicitly. */
264 #define THREAD_CPU_INIT(core, thread) \
265 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
267 #elif CONFIG_CPU == SH7034
268 /*---------------------------------------------------------------------------
269 * Start the thread running and terminate it if it returns
270 *---------------------------------------------------------------------------
272 void start_thread(void); /* Provide C access to ASM label */
273 static void __start_thread(void) __attribute__((used));
274 static void __start_thread(void)
276 /* r8 = context */
277 asm volatile (
278 "_start_thread: \n" /* Start here - no naked attribute */
279 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
280 "mov.l @(28, r8), r15 \n" /* Set initial sp */
281 "mov #0, r1 \n" /* Start the thread */
282 "jsr @r0 \n"
283 "mov.l r1, @(36, r8) \n" /* Clear start address */
284 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
285 "jmp @r0 \n"
286 "mov #0, r4 \n"
287 "1: \n"
288 ".long _remove_thread \n"
292 /* Place context pointer in r8 slot, function pointer in r9 slot, and
293 * start_thread pointer in context_start */
294 #define THREAD_STARTUP_INIT(core, thread, function) \
295 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
296 (thread)->context.r[1] = (unsigned int)(function), \
297 (thread)->context.start = (void*)start_thread; })
299 /*---------------------------------------------------------------------------
300 * Store non-volatile context.
301 *---------------------------------------------------------------------------
303 static inline void store_context(void* addr)
305 asm volatile (
306 "add #36, %0 \n" /* Start at last reg. By the time routine */
307 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
308 "mov.l r15,@-%0 \n"
309 "mov.l r14,@-%0 \n"
310 "mov.l r13,@-%0 \n"
311 "mov.l r12,@-%0 \n"
312 "mov.l r11,@-%0 \n"
313 "mov.l r10,@-%0 \n"
314 "mov.l r9, @-%0 \n"
315 "mov.l r8, @-%0 \n"
316 : : "r" (addr)
320 /*---------------------------------------------------------------------------
321 * Load non-volatile context.
322 *---------------------------------------------------------------------------
324 static inline void load_context(const void* addr)
326 asm volatile (
327 "mov.l @(36, %0), r0 \n" /* Get start address */
328 "tst r0, r0 \n"
329 "bt .running \n" /* NULL -> already running */
330 "jmp @r0 \n" /* r8 = context */
331 ".running: \n"
332 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
333 "mov.l @%0+, r9 \n"
334 "mov.l @%0+, r10 \n"
335 "mov.l @%0+, r11 \n"
336 "mov.l @%0+, r12 \n"
337 "mov.l @%0+, r13 \n"
338 "mov.l @%0+, r14 \n"
339 "mov.l @%0+, r15 \n"
340 "lds.l @%0+, pr \n"
341 : : "r" (addr) : "r0" /* only! */
345 static inline void core_sleep(void)
347 and_b(0x7F, &SBYCR);
348 asm volatile ("sleep");
351 #endif
353 #ifndef THREAD_CPU_INIT
354 /* No cpu specific init - make empty */
355 #define THREAD_CPU_INIT(core, thread)
356 #endif
358 #if THREAD_EXTRA_CHECKS
359 static void thread_panicf(const char *msg, struct thread_entry *thread)
361 #if NUM_CORES > 1
362 const unsigned int core = thread->core;
363 #endif
364 static char name[32];
365 thread_get_name(name, 32, thread);
366 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
368 static void thread_stkov(struct thread_entry *thread)
370 thread_panicf("Stkov", thread);
372 #define THREAD_PANICF(msg, thread) \
373 thread_panicf(msg, thread)
374 #define THREAD_ASSERT(exp, msg, thread) \
375 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
376 #else
377 static void thread_stkov(struct thread_entry *thread)
379 #if NUM_CORES > 1
380 const unsigned int core = thread->core;
381 #endif
382 static char name[32];
383 thread_get_name(name, 32, thread);
384 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
386 #define THREAD_PANICF(msg, thread)
387 #define THREAD_ASSERT(exp, msg, thread)
388 #endif /* THREAD_EXTRA_CHECKS */
390 static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
392 if (*list == NULL)
394 thread->next = thread;
395 thread->prev = thread;
396 *list = thread;
398 else
400 /* Insert last */
401 thread->next = *list;
402 thread->prev = (*list)->prev;
403 thread->prev->next = thread;
404 (*list)->prev = thread;
406 /* Insert next
407 thread->next = (*list)->next;
408 thread->prev = *list;
409 thread->next->prev = thread;
410 (*list)->next = thread;
415 static void remove_from_list(struct thread_entry **list,
416 struct thread_entry *thread)
418 if (list != NULL)
420 if (thread == thread->next)
422 *list = NULL;
423 return;
426 if (thread == *list)
427 *list = thread->next;
430 /* Fix links to jump over the removed entry. */
431 thread->prev->next = thread->next;
432 thread->next->prev = thread->prev;
435 static void check_sleepers(void) __attribute__ ((noinline));
436 static void check_sleepers(void)
438 const unsigned int core = CURRENT_CORE;
439 struct thread_entry *current, *next;
441 /* Check sleeping threads. */
442 current = cores[core].sleeping;
444 for (;;)
446 next = current->next;
448 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
450 /* Sleep timeout has been reached so bring the thread
451 * back to life again. */
452 remove_from_list(&cores[core].sleeping, current);
453 add_to_list(&cores[core].running, current);
454 current->statearg = 0;
456 /* If there is no more processes in the list, break the loop. */
457 if (cores[core].sleeping == NULL)
458 break;
460 current = next;
461 continue;
464 current = next;
466 /* Break the loop once we have walked through the list of all
467 * sleeping processes. */
468 if (current == cores[core].sleeping)
469 break;
473 /* Safely finish waking all threads potentialy woken by interrupts -
474 * statearg already zeroed in wakeup_thread. */
475 static void wake_list_awaken(void) __attribute__ ((noinline));
476 static void wake_list_awaken(void)
478 const unsigned int core = CURRENT_CORE;
479 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
481 /* No need for another check in the IRQ lock since IRQs are allowed
482 only to add threads to the waking list. They won't be adding more
483 until we're done here though. */
485 struct thread_entry *waking = cores[core].waking;
486 struct thread_entry *running = cores[core].running;
488 if (running != NULL)
490 /* Place waking threads at the end of the running list. */
491 struct thread_entry *tmp;
492 waking->prev->next = running;
493 running->prev->next = waking;
494 tmp = running->prev;
495 running->prev = waking->prev;
496 waking->prev = tmp;
498 else
500 /* Just transfer the list as-is - just came out of a core
501 * sleep. */
502 cores[core].running = waking;
505 /* Done with waking list */
506 cores[core].waking = NULL;
507 set_irq_level(oldlevel);
510 static inline void sleep_core(void)
512 const unsigned int core = CURRENT_CORE;
514 for (;;)
516 /* We want to do these ASAP as it may change the decision to sleep
517 the core or the core has woken because an interrupt occurred
518 and posted a message to a queue. */
519 if (cores[core].waking != NULL)
520 wake_list_awaken();
522 if (cores[core].last_tick != current_tick)
524 if (cores[core].sleeping != NULL)
525 check_sleepers();
526 cores[core].last_tick = current_tick;
529 /* We must sleep until there is at least one process in the list
530 * of running processes. */
531 if (cores[core].running != NULL)
532 break;
534 /* Enter sleep mode to reduce power usage, woken up on interrupt */
535 core_sleep();
539 #ifdef RB_PROFILE
540 static int get_threadnum(struct thread_entry *thread)
542 int i;
544 for (i = 0; i < MAXTHREADS; i++)
546 if (&threads[i] == thread)
547 return i;
550 return -1;
553 void profile_thread(void) {
554 profstart(get_threadnum(cores[CURRENT_CORE].running));
556 #endif
558 static void change_thread_state(struct thread_entry **blocked_list) __attribute__ ((noinline));
559 static void change_thread_state(struct thread_entry **blocked_list)
561 const unsigned int core = CURRENT_CORE;
562 struct thread_entry *old;
563 unsigned long new_state;
565 /* Remove the thread from the list of running threads. */
566 old = cores[core].running;
567 new_state = GET_STATE(old->statearg);
569 /* Check if a thread state change has been requested. */
570 if (new_state)
572 /* Change running thread state and switch to next thread. */
573 remove_from_list(&cores[core].running, old);
575 /* And put the thread into a new list of inactive threads. */
576 if (new_state == STATE_BLOCKED)
577 add_to_list(blocked_list, old);
578 else
579 add_to_list(&cores[core].sleeping, old);
581 #ifdef HAVE_PRIORITY_SCHEDULING
582 /* Reset priorities */
583 if (old->priority == cores[core].highest_priority)
584 cores[core].highest_priority = 100;
585 #endif
587 else
588 /* Switch to the next running thread. */
589 cores[core].running = old->next;
592 /*---------------------------------------------------------------------------
593 * Switch thread in round robin fashion.
594 *---------------------------------------------------------------------------
596 void switch_thread(bool save_context, struct thread_entry **blocked_list)
598 const unsigned int core = CURRENT_CORE;
600 #ifdef RB_PROFILE
601 profile_thread_stopped(get_threadnum(cores[core].running));
602 #endif
603 unsigned int *stackptr;
605 #ifdef SIMULATOR
606 /* Do nothing */
607 #else
609 /* Begin task switching by saving our current context so that we can
610 * restore the state of the current thread later to the point prior
611 * to this call. */
612 if (save_context)
614 store_context(&cores[core].running->context);
616 /* Check if the current thread stack is overflown */
617 stackptr = cores[core].running->stack;
618 if(stackptr[0] != DEADBEEF)
619 thread_stkov(cores[core].running);
621 /* Rearrange thread lists as needed */
622 change_thread_state(blocked_list);
624 /* This has to be done after the scheduler is finished with the
625 blocked_list pointer so that an IRQ can't kill us by attempting
626 a wake but before attempting any core sleep. */
627 if (cores[core].switch_to_irq_level != STAY_IRQ_LEVEL)
629 int level = cores[core].switch_to_irq_level;
630 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
631 set_irq_level(level);
635 /* Go through the list of sleeping task to check if we need to wake up
636 * any of them due to timeout. Also puts core into sleep state until
637 * there is at least one running process again. */
638 sleep_core();
640 #ifdef HAVE_PRIORITY_SCHEDULING
641 /* Select the new task based on priorities and the last time a process
642 * got CPU time. */
643 for (;;)
645 int priority = cores[core].running->priority;
647 if (priority < cores[core].highest_priority)
648 cores[core].highest_priority = priority;
650 if (priority == cores[core].highest_priority ||
651 (current_tick - cores[core].running->last_run >
652 priority * 8) ||
653 cores[core].running->priority_x != 0)
655 break;
658 cores[core].running = cores[core].running->next;
661 /* Reset the value of thread's last running time to the current time. */
662 cores[core].running->last_run = current_tick;
663 #endif
665 #endif
667 /* And finally give control to the next thread. */
668 load_context(&cores[core].running->context);
670 #ifdef RB_PROFILE
671 profile_thread_started(get_threadnum(cores[core].running));
672 #endif
675 void sleep_thread(int ticks)
677 struct thread_entry *current;
679 current = cores[CURRENT_CORE].running;
681 #ifdef HAVE_SCHEDULER_BOOSTCTRL
682 if (STATE_IS_BOOSTED(current->statearg))
684 boosted_threads--;
685 if (!boosted_threads)
687 cpu_boost(false);
690 #endif
692 /* Set the thread's new state and timeout and finally force a task switch
693 * so that scheduler removes thread from the list of running processes
694 * and puts it in list of sleeping tasks. */
695 SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
697 switch_thread(true, NULL);
700 void block_thread(struct thread_entry **list)
702 struct thread_entry *current;
704 /* Get the entry for the current running thread. */
705 current = cores[CURRENT_CORE].running;
707 #ifdef HAVE_SCHEDULER_BOOSTCTRL
708 /* Keep the boosted state over indefinite block calls, because
709 * we are waiting until the earliest time that someone else
710 * completes an action */
711 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
712 #endif
714 /* We are not allowed to mix blocking types in one queue. */
715 THREAD_ASSERT(*list != NULL && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO,
716 "Blocking violation B->*T", current);
718 /* Set the state to blocked and ask the scheduler to switch tasks,
719 * this takes us off of the run queue until we are explicitly woken */
720 SET_STATE(current->statearg, STATE_BLOCKED, 0);
722 switch_thread(true, list);
724 #ifdef HAVE_SCHEDULER_BOOSTCTRL
725 /* Reset only the boosted flag to indicate we are up and running again. */
726 current->statearg = boost_flag;
727 #else
728 /* Clear all flags to indicate we are up and running again. */
729 current->statearg = 0;
730 #endif
733 void block_thread_w_tmo(struct thread_entry **list, int timeout)
735 struct thread_entry *current;
736 /* Get the entry for the current running thread. */
737 current = cores[CURRENT_CORE].running;
739 #ifdef HAVE_SCHEDULER_BOOSTCTRL
740 /* A block with a timeout is a sleep situation, whatever we are waiting
741 * for _may or may not_ happen, regardless of boost state, (user input
742 * for instance), so this thread no longer needs to boost */
743 if (STATE_IS_BOOSTED(current->statearg))
745 boosted_threads--;
746 if (!boosted_threads)
748 cpu_boost(false);
751 #endif
753 /* We can store only one thread to the "list" if thread is used
754 * in other list (such as core's list for sleeping tasks). */
755 THREAD_ASSERT(*list == NULL, "Blocking violation T->*B", current);
757 /* Set the state to blocked with the specified timeout */
758 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
760 /* Set the "list" for explicit wakeup */
761 *list = current;
763 /* Now force a task switch and block until we have been woken up
764 * by another thread or timeout is reached. */
765 switch_thread(true, NULL);
767 /* It is now safe for another thread to block on this "list" */
768 *list = NULL;
771 #if !defined(SIMULATOR)
772 void set_irq_level_and_block_thread(struct thread_entry **list, int level)
774 cores[CURRENT_CORE].switch_to_irq_level = level;
775 block_thread(list);
778 void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
779 int timeout, int level)
781 cores[CURRENT_CORE].switch_to_irq_level = level;
782 block_thread_w_tmo(list, timeout);
784 #endif
786 void wakeup_thread(struct thread_entry **list)
788 struct thread_entry *thread;
790 /* Check if there is a blocked thread at all. */
791 if (*list == NULL)
793 return ;
796 /* Wake up the last thread first. */
797 thread = *list;
799 /* Determine thread's current state. */
800 switch (GET_STATE(thread->statearg))
802 case STATE_BLOCKED:
803 /* Remove thread from the list of blocked threads and add it
804 * to the scheduler's list of running processes. List removal
805 * is safe since each object maintains it's own list of
806 * sleepers and queues protect against reentrancy. */
807 remove_from_list(list, thread);
808 add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread);
810 case STATE_BLOCKED_W_TMO:
811 /* Just remove the timeout to cause scheduler to immediately
812 * wake up the thread. */
813 thread->statearg = 0;
814 break;
816 default:
817 /* Nothing to do. Thread has already been woken up
818 * or it's state is not blocked or blocked with timeout. */
819 return ;
823 inline static int find_empty_thread_slot(void)
825 int n;
827 for (n = 0; n < MAXTHREADS; n++)
829 if (threads[n].name == NULL)
830 return n;
833 return -1;
836 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
837 before calling. */
838 void wakeup_thread_irq_safe(struct thread_entry **list)
840 struct core_entry *core = &cores[CURRENT_CORE];
841 /* Switch wakeup lists and call wakeup_thread */
842 core->wakeup_list = &core->waking;
843 wakeup_thread(list);
844 /* Switch back to normal running list */
845 core->wakeup_list = &core->running;
848 /*---------------------------------------------------------------------------
849 * Create a thread
850 * If using a dual core architecture, specify which core to start the thread
851 * on, and whether to fall back to the other core if it can't be created
852 * Return ID if context area could be allocated, else NULL.
853 *---------------------------------------------------------------------------
855 struct thread_entry*
856 create_thread(void (*function)(void), void* stack, int stack_size,
857 const char *name IF_PRIO(, int priority)
858 IF_COP(, unsigned int core, bool fallback))
860 unsigned int i;
861 unsigned int stacklen;
862 unsigned int *stackptr;
863 int slot;
864 struct thread_entry *thread;
866 /*****
867 * Ugly code alert!
868 * To prevent ifdef hell while keeping the binary size down, we define
869 * core here if it hasn't been passed as a parameter
870 *****/
871 #if NUM_CORES == 1
872 #define core CPU
873 #endif
875 #if NUM_CORES > 1
876 /* If the kernel hasn't initialised on the COP (most likely due to an old
877 * bootloader) then refuse to start threads on the COP
879 if ((core == COP) && !cores[core].kernel_running)
881 if (fallback)
882 return create_thread(function, stack, stack_size, name
883 IF_PRIO(, priority) IF_COP(, CPU, false));
884 else
885 return NULL;
887 #endif
889 slot = find_empty_thread_slot();
890 if (slot < 0)
892 return NULL;
895 /* Munge the stack to make it easy to spot stack overflows */
896 stacklen = stack_size / sizeof(int);
897 stackptr = stack;
898 for(i = 0;i < stacklen;i++)
900 stackptr[i] = DEADBEEF;
903 /* Store interesting information */
904 thread = &threads[slot];
905 thread->name = name;
906 thread->stack = stack;
907 thread->stack_size = stack_size;
908 thread->statearg = 0;
909 #ifdef HAVE_PRIORITY_SCHEDULING
910 thread->priority_x = 0;
911 thread->priority = priority;
912 cores[core].highest_priority = 100;
913 #endif
915 #if NUM_CORES > 1
916 thread->core = core;
918 /* Writeback stack munging or anything else before starting */
919 if (core != CURRENT_CORE)
921 flush_icache();
923 #endif
925 /* Align stack to an even 32 bit boundary */
926 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
928 /* Load the thread's context structure with needed startup information */
929 THREAD_STARTUP_INIT(core, thread, function);
931 add_to_list(&cores[core].running, thread);
933 return thread;
934 #if NUM_CORES == 1
935 #undef core
936 #endif
939 #ifdef HAVE_SCHEDULER_BOOSTCTRL
940 void trigger_cpu_boost(void)
942 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
944 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
945 if (!boosted_threads)
947 cpu_boost(true);
949 boosted_threads++;
952 #endif
954 /*---------------------------------------------------------------------------
955 * Remove a thread on the current core from the scheduler.
956 * Parameter is the ID as returned from create_thread().
957 *---------------------------------------------------------------------------
959 void remove_thread(struct thread_entry *thread)
961 const unsigned int core = CURRENT_CORE;
963 if (thread == NULL)
964 thread = cores[core].running;
966 /* Free the entry by removing thread name. */
967 thread->name = NULL;
968 #ifdef HAVE_PRIORITY_SCHEDULING
969 cores[IF_COP2(thread->core)].highest_priority = 100;
970 #endif
972 if (thread == cores[IF_COP2(thread->core)].running)
974 remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
975 #if NUM_CORES > 1
976 /* Switch to the idle stack if not on the main core (where "main"
977 * runs) */
978 if (core != CPU)
980 switch_to_idle_stack(core);
983 flush_icache();
984 #endif
985 switch_thread(false, NULL);
986 /* This should never and must never be reached - if it is, the
987 * state is corrupted */
988 THREAD_PANICF("remove_thread->K:*R", thread);
991 if (thread == cores[IF_COP2(thread->core)].sleeping)
992 remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
993 else
994 remove_from_list(NULL, thread);
997 #ifdef HAVE_PRIORITY_SCHEDULING
998 int thread_set_priority(struct thread_entry *thread, int priority)
1000 int old_priority;
1002 if (thread == NULL)
1003 thread = cores[CURRENT_CORE].running;
1005 old_priority = thread->priority;
1006 thread->priority = priority;
1007 cores[IF_COP2(thread->core)].highest_priority = 100;
1009 return old_priority;
1012 int thread_get_priority(struct thread_entry *thread)
1014 if (thread == NULL)
1015 thread = cores[CURRENT_CORE].running;
1017 return thread->priority;
1020 void priority_yield(void)
1022 struct thread_entry *thread = cores[CURRENT_CORE].running;
1023 thread->priority_x = 1;
1024 switch_thread(true, NULL);
1025 thread->priority_x = 0;
1027 #endif /* HAVE_PRIORITY_SCHEDULING */
1029 struct thread_entry * thread_get_current(void)
1031 return cores[CURRENT_CORE].running;
1034 void init_threads(void)
1036 const unsigned int core = CURRENT_CORE;
1037 int slot;
1039 /* CPU will initialize first and then sleep */
1040 slot = find_empty_thread_slot();
1041 #if THREAD_EXTRA_CHECKS
1042 /* This can fail if, for example, .bss isn't zero'ed out by the loader
1043 or threads is in the wrong section. */
1044 if (slot < 0) {
1045 panicf("uninitialized threads[]");
1047 #endif
1049 cores[core].sleeping = NULL;
1050 cores[core].running = NULL;
1051 cores[core].waking = NULL;
1052 cores[core].wakeup_list = &cores[core].running;
1053 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
1054 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
1055 #endif
1056 threads[slot].name = main_thread_name;
1057 threads[slot].statearg = 0;
1058 threads[slot].context.start = 0; /* core's main thread already running */
1059 #if NUM_CORES > 1
1060 threads[slot].core = core;
1061 #endif
1062 #ifdef HAVE_PRIORITY_SCHEDULING
1063 threads[slot].priority = PRIORITY_USER_INTERFACE;
1064 threads[slot].priority_x = 0;
1065 cores[core].highest_priority = 100;
1066 #endif
1067 add_to_list(&cores[core].running, &threads[slot]);
1069 /* In multiple core setups, each core has a different stack. There is
1070 * probably a much better way to do this. */
1071 if (core == CPU)
1073 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1074 boosted_threads = 0;
1075 #endif
1076 threads[slot].stack = stackbegin;
1077 threads[slot].stack_size = (int)stackend - (int)stackbegin;
1078 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
1079 /* Mark CPU initialized */
1080 cores[CPU].kernel_running = true;
1081 /* Do _not_ wait for the COP to init in the bootloader because it doesn't */
1082 /* TODO: HAL interface for this */
1083 /* Wake up coprocessor and let it initialize kernel and threads */
1084 COP_CTL = PROC_WAKE;
1085 /* Sleep until finished */
1086 CPU_CTL = PROC_SLEEP;
1088 else
1090 /* Initial stack is the COP idle stack */
1091 threads[slot].stack = cop_idlestackbegin;
1092 threads[slot].stack_size = IDLE_STACK_SIZE;
1093 /* Mark COP initialized */
1094 cores[COP].kernel_running = true;
1095 /* Get COP safely primed inside switch_thread where it will remain
1096 * until a thread actually exists on it */
1097 CPU_CTL = PROC_WAKE;
1098 remove_thread(NULL);
1099 #endif /* NUM_CORES */
1103 int thread_stack_usage(const struct thread_entry *thread)
1105 unsigned int i;
1106 unsigned int *stackptr = thread->stack;
1108 for (i = 0;i < thread->stack_size/sizeof(int);i++)
1110 if (stackptr[i] != DEADBEEF)
1111 break;
1114 return ((thread->stack_size - i * sizeof(int)) * 100) /
1115 thread->stack_size;
1118 #if NUM_CORES > 1
1119 /*---------------------------------------------------------------------------
1120 * Returns the maximum percentage of the core's idle stack ever used during
1121 * runtime.
1122 *---------------------------------------------------------------------------
1124 int idle_stack_usage(unsigned int core)
1126 unsigned int *stackptr = idle_stacks[core];
1127 int i, usage = 0;
1129 for (i = 0; i < IDLE_STACK_WORDS; i++)
1131 if (stackptr[i] != DEADBEEF)
1133 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
1134 break;
1138 return usage;
1140 #endif
1142 int thread_get_status(const struct thread_entry *thread)
1144 return GET_STATE(thread->statearg);
1147 /*---------------------------------------------------------------------------
1148 * Fills in the buffer with the specified thread's name. If the name is NULL,
1149 * empty, or the thread is in destruct state a formatted ID is written
1150 * instead.
1151 *---------------------------------------------------------------------------
1153 void thread_get_name(char *buffer, int size,
1154 struct thread_entry *thread)
1156 if (size <= 0)
1157 return;
1159 *buffer = '\0';
1161 if (thread)
1163 /* Display thread name if one or ID if none */
1164 const char *name = thread->name;
1165 const char *fmt = "%s";
1166 if (name == NULL || *name == '\0')
1168 name = (const char *)thread;
1169 fmt = "%08lX";
1171 snprintf(buffer, size, fmt, name);