1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
35 # define IF_COP2(x) CURRENT_CORE
38 #define DEADBEEF ((unsigned int)0xdeadbeef)
39 /* Cast to the the machine int type, whose size could be < 4. */
41 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
42 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
43 #ifdef HAVE_SCHEDULER_BOOSTCTRL
44 static int boosted_threads IBSS_ATTR
;
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS 0
50 static const char main_thread_name
[] = "main";
52 extern int stackbegin
[];
53 extern int stackend
[];
56 static void add_to_list(struct thread_entry **list,
57 struct thread_entry *thread) ICODE_ATTR;
58 static void remove_from_list(struct thread_entry **list,
59 struct thread_entry *thread) ICODE_ATTR;
62 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
65 static inline void store_context(void* addr
) __attribute__ ((always_inline
));
66 static inline void load_context(const void* addr
)
67 __attribute__ ((always_inline
));
68 static inline void core_sleep(void) __attribute__((always_inline
));
71 /*---------------------------------------------------------------------------
72 * Start the thread running and terminate it if it returns
73 *---------------------------------------------------------------------------
75 static void start_thread(void) __attribute__((naked
,used
));
76 static void start_thread(void)
80 "ldr sp, [r0, #32] \n" /* Load initial sp */
81 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
82 "mov r1, #0 \n" /* Mark thread as running */
83 "str r1, [r0, #40] \n"
85 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
86 "mov lr, pc \n" /* This could be the first entry into */
87 "bx r0 \n" /* plugin or codec code for this core. */
89 "mov lr, pc \n" /* Call thread function */
91 "mov r0, #0 \n" /* remove_thread(NULL) */
92 "ldr pc, =remove_thread \n"
93 ".ltorg \n" /* Dump constant pool */
94 ); /* No clobber list - new thread doesn't care */
97 /*---------------------------------------------------------------------------
98 * Store non-volatile context.
99 *---------------------------------------------------------------------------
101 static inline void store_context(void* addr
)
104 "stmia %0, { r4-r11, sp, lr } \n"
109 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
110 * slot, and thread function pointer in context.start. See load_context for
111 * what happens when thread is initially going to run. */
112 #define THREAD_STARTUP_INIT(core, thread, function) \
113 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
114 (thread)->context.r[1] = (unsigned int)start_thread, \
115 (thread)->context.start = (void *)function; })
117 static inline void load_context(const void* addr
)
120 "ldr r0, [%0, #40] \n" /* Load start pointer */
121 "cmp r0, #0 \n" /* Check for NULL */
122 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
123 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
124 : : "r" (addr
) : "r0" /* only! */
131 extern int cpu_idlestackbegin
[];
132 extern int cpu_idlestackend
[];
133 extern int cop_idlestackbegin
[];
134 extern int cop_idlestackend
[];
135 static int * const idle_stacks
[NUM_CORES
] NOCACHEDATA_ATTR
=
137 [CPU
] = cpu_idlestackbegin
,
138 [COP
] = cop_idlestackbegin
140 #endif /* NUM_CORES */
142 static inline void core_sleep(void)
144 /* This should sleep the CPU. It appears to wake by itself on
146 if (CURRENT_CORE
== CPU
)
147 CPU_CTL
= PROC_SLEEP
;
149 COP_CTL
= PROC_SLEEP
;
153 /*---------------------------------------------------------------------------
154 * Switches to a stack that always resides in the Rockbox core.
156 * Needed when a thread suicides on a core other than the main CPU since the
157 * stack used when idling is the stack of the last thread to run. This stack
158 * may not reside in the core in which case the core will continue to use a
159 * stack from an unloaded module until another thread runs on it.
160 *---------------------------------------------------------------------------
162 static inline void switch_to_idle_stack(const unsigned int core
)
165 "str sp, [%0] \n" /* save original stack pointer on idle stack */
166 "mov sp, %0 \n" /* switch stacks */
167 : : "r"(&idle_stacks
[core
][IDLE_STACK_WORDS
-1]));
170 #endif /* NUM_CORES */
172 #elif CONFIG_CPU == S3C2440
173 static inline void core_sleep(void)
176 CLKCON
|= (1 << 2); /* set IDLE bit */
177 for(i
=0; i
<10; i
++); /* wait for IDLE */
178 CLKCON
&= ~(1 << 2); /* reset IDLE bit when wake up */
181 static inline void core_sleep(void)
187 #elif defined(CPU_COLDFIRE)
188 /*---------------------------------------------------------------------------
189 * Start the thread running and terminate it if it returns
190 *---------------------------------------------------------------------------
192 void start_thread(void); /* Provide C access to ASM label */
193 static void __start_thread(void) __attribute__((used
));
194 static void __start_thread(void)
196 /* a0=macsr, a1=context */
198 "start_thread: \n" /* Start here - no naked attribute */
199 "move.l %a0, %macsr \n" /* Set initial mac status reg */
200 "lea.l 48(%a1), %a1 \n"
201 "move.l (%a1)+, %sp \n" /* Set initial stack */
202 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
203 "clr.l (%a1) \n" /* Mark thread running */
204 "jsr (%a2) \n" /* Call thread function */
205 "clr.l -(%sp) \n" /* remove_thread(NULL) */
206 "jsr remove_thread \n"
210 /* Set EMAC unit to fractional mode with saturation for each new thread,
211 * since that's what'll be the most useful for most things which the dsp
212 * will do. Codecs should still initialize their preferred modes
213 * explicitly. Context pointer is placed in d2 slot and start_thread
214 * pointer in d3 slot. thread function pointer is placed in context.start.
215 * See load_context for what happens when thread is initially going to
218 #define THREAD_STARTUP_INIT(core, thread, function) \
219 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
220 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
221 (thread)->context.d[1] = (unsigned int)start_thread, \
222 (thread)->context.start = (void *)(function); })
224 /*---------------------------------------------------------------------------
225 * Store non-volatile context.
226 *---------------------------------------------------------------------------
228 static inline void store_context(void* addr
)
231 "move.l %%macsr,%%d0 \n"
232 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
233 : : "a" (addr
) : "d0" /* only! */
237 /*---------------------------------------------------------------------------
238 * Load non-volatile context.
239 *---------------------------------------------------------------------------
241 static inline void load_context(const void* addr
)
244 "move.l 52(%0), %%d0 \n" /* Get start address */
245 "beq.b 1f \n" /* NULL -> already running */
246 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
247 "jmp (%%a2) \n" /* Start the thread */
249 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
250 "move.l %%d0, %%macsr \n"
251 : : "a" (addr
) : "d0" /* only! */
255 static inline void core_sleep(void)
257 asm volatile ("stop #0x2000");
260 /* Set EMAC unit to fractional mode with saturation for each new thread,
261 since that's what'll be the most useful for most things which the dsp
262 will do. Codecs should still initialize their preferred modes
264 #define THREAD_CPU_INIT(core, thread) \
265 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
267 #elif CONFIG_CPU == SH7034
268 /*---------------------------------------------------------------------------
269 * Start the thread running and terminate it if it returns
270 *---------------------------------------------------------------------------
272 void start_thread(void); /* Provide C access to ASM label */
273 static void __start_thread(void) __attribute__((used
));
274 static void __start_thread(void)
278 "_start_thread: \n" /* Start here - no naked attribute */
279 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
280 "mov.l @(28, r8), r15 \n" /* Set initial sp */
281 "mov #0, r1 \n" /* Start the thread */
283 "mov.l r1, @(36, r8) \n" /* Clear start address */
284 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
288 ".long _remove_thread \n"
292 /* Place context pointer in r8 slot, function pointer in r9 slot, and
293 * start_thread pointer in context_start */
294 #define THREAD_STARTUP_INIT(core, thread, function) \
295 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
296 (thread)->context.r[1] = (unsigned int)(function), \
297 (thread)->context.start = (void*)start_thread; })
299 /*---------------------------------------------------------------------------
300 * Store non-volatile context.
301 *---------------------------------------------------------------------------
303 static inline void store_context(void* addr
)
306 "add #36, %0 \n" /* Start at last reg. By the time routine */
307 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
320 /*---------------------------------------------------------------------------
321 * Load non-volatile context.
322 *---------------------------------------------------------------------------
324 static inline void load_context(const void* addr
)
327 "mov.l @(36, %0), r0 \n" /* Get start address */
329 "bt .running \n" /* NULL -> already running */
330 "jmp @r0 \n" /* r8 = context */
332 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
341 : : "r" (addr
) : "r0" /* only! */
345 static inline void core_sleep(void)
348 asm volatile ("sleep");
353 #ifndef THREAD_CPU_INIT
354 /* No cpu specific init - make empty */
355 #define THREAD_CPU_INIT(core, thread)
358 #if THREAD_EXTRA_CHECKS
359 static void thread_panicf(const char *msg
, struct thread_entry
*thread
)
362 const unsigned int core
= thread
->core
;
364 static char name
[32];
365 thread_get_name(name
, 32, thread
);
366 panicf ("%s %s" IF_COP(" (%d)"), msg
, name
IF_COP(, core
));
368 static void thread_stkov(struct thread_entry
*thread
)
370 thread_panicf("Stkov", thread
);
372 #define THREAD_PANICF(msg, thread) \
373 thread_panicf(msg, thread)
374 #define THREAD_ASSERT(exp, msg, thread) \
375 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
377 static void thread_stkov(struct thread_entry
*thread
)
380 const unsigned int core
= thread
->core
;
382 static char name
[32];
383 thread_get_name(name
, 32, thread
);
384 panicf("Stkov %s" IF_COP(" (%d)"), name
IF_COP(, core
));
386 #define THREAD_PANICF(msg, thread)
387 #define THREAD_ASSERT(exp, msg, thread)
388 #endif /* THREAD_EXTRA_CHECKS */
390 static void add_to_list(struct thread_entry
**list
, struct thread_entry
*thread
)
394 thread
->next
= thread
;
395 thread
->prev
= thread
;
401 thread
->next
= *list
;
402 thread
->prev
= (*list
)->prev
;
403 thread
->prev
->next
= thread
;
404 (*list
)->prev
= thread
;
407 thread->next = (*list)->next;
408 thread->prev = *list;
409 thread->next->prev = thread;
410 (*list)->next = thread;
415 static void remove_from_list(struct thread_entry
**list
,
416 struct thread_entry
*thread
)
420 if (thread
== thread
->next
)
427 *list
= thread
->next
;
430 /* Fix links to jump over the removed entry. */
431 thread
->prev
->next
= thread
->next
;
432 thread
->next
->prev
= thread
->prev
;
435 static void check_sleepers(void) __attribute__ ((noinline
));
436 static void check_sleepers(void)
438 const unsigned int core
= CURRENT_CORE
;
439 struct thread_entry
*current
, *next
;
441 /* Check sleeping threads. */
442 current
= cores
[core
].sleeping
;
446 next
= current
->next
;
448 if ((unsigned)current_tick
>= GET_STATE_ARG(current
->statearg
))
450 /* Sleep timeout has been reached so bring the thread
451 * back to life again. */
452 remove_from_list(&cores
[core
].sleeping
, current
);
453 add_to_list(&cores
[core
].running
, current
);
454 current
->statearg
= 0;
456 /* If there is no more processes in the list, break the loop. */
457 if (cores
[core
].sleeping
== NULL
)
466 /* Break the loop once we have walked through the list of all
467 * sleeping processes. */
468 if (current
== cores
[core
].sleeping
)
473 /* Safely finish waking all threads potentialy woken by interrupts -
474 * statearg already zeroed in wakeup_thread. */
475 static void wake_list_awaken(void) __attribute__ ((noinline
));
476 static void wake_list_awaken(void)
478 const unsigned int core
= CURRENT_CORE
;
479 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
481 /* No need for another check in the IRQ lock since IRQs are allowed
482 only to add threads to the waking list. They won't be adding more
483 until we're done here though. */
485 struct thread_entry
*waking
= cores
[core
].waking
;
486 struct thread_entry
*running
= cores
[core
].running
;
490 /* Place waking threads at the end of the running list. */
491 struct thread_entry
*tmp
;
492 waking
->prev
->next
= running
;
493 running
->prev
->next
= waking
;
495 running
->prev
= waking
->prev
;
500 /* Just transfer the list as-is - just came out of a core
502 cores
[core
].running
= waking
;
505 /* Done with waking list */
506 cores
[core
].waking
= NULL
;
507 set_irq_level(oldlevel
);
510 static inline void sleep_core(void)
512 const unsigned int core
= CURRENT_CORE
;
516 /* We want to do these ASAP as it may change the decision to sleep
517 the core or the core has woken because an interrupt occurred
518 and posted a message to a queue. */
519 if (cores
[core
].waking
!= NULL
)
522 if (cores
[core
].last_tick
!= current_tick
)
524 if (cores
[core
].sleeping
!= NULL
)
526 cores
[core
].last_tick
= current_tick
;
529 /* We must sleep until there is at least one process in the list
530 * of running processes. */
531 if (cores
[core
].running
!= NULL
)
534 /* Enter sleep mode to reduce power usage, woken up on interrupt */
540 static int get_threadnum(struct thread_entry
*thread
)
544 for (i
= 0; i
< MAXTHREADS
; i
++)
546 if (&threads
[i
] == thread
)
553 void profile_thread(void) {
554 profstart(get_threadnum(cores
[CURRENT_CORE
].running
));
558 static void change_thread_state(struct thread_entry
**blocked_list
) __attribute__ ((noinline
));
559 static void change_thread_state(struct thread_entry
**blocked_list
)
561 const unsigned int core
= CURRENT_CORE
;
562 struct thread_entry
*old
;
563 unsigned long new_state
;
565 /* Remove the thread from the list of running threads. */
566 old
= cores
[core
].running
;
567 new_state
= GET_STATE(old
->statearg
);
569 /* Check if a thread state change has been requested. */
572 /* Change running thread state and switch to next thread. */
573 remove_from_list(&cores
[core
].running
, old
);
575 /* And put the thread into a new list of inactive threads. */
576 if (new_state
== STATE_BLOCKED
)
577 add_to_list(blocked_list
, old
);
579 add_to_list(&cores
[core
].sleeping
, old
);
581 #ifdef HAVE_PRIORITY_SCHEDULING
582 /* Reset priorities */
583 if (old
->priority
== cores
[core
].highest_priority
)
584 cores
[core
].highest_priority
= 100;
588 /* Switch to the next running thread. */
589 cores
[core
].running
= old
->next
;
592 /*---------------------------------------------------------------------------
593 * Switch thread in round robin fashion.
594 *---------------------------------------------------------------------------
596 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
598 const unsigned int core
= CURRENT_CORE
;
601 profile_thread_stopped(get_threadnum(cores
[core
].running
));
603 unsigned int *stackptr
;
609 /* Begin task switching by saving our current context so that we can
610 * restore the state of the current thread later to the point prior
614 store_context(&cores
[core
].running
->context
);
616 /* Check if the current thread stack is overflown */
617 stackptr
= cores
[core
].running
->stack
;
618 if(stackptr
[0] != DEADBEEF
)
619 thread_stkov(cores
[core
].running
);
621 /* Rearrange thread lists as needed */
622 change_thread_state(blocked_list
);
624 /* This has to be done after the scheduler is finished with the
625 blocked_list pointer so that an IRQ can't kill us by attempting
626 a wake but before attempting any core sleep. */
627 if (cores
[core
].switch_to_irq_level
!= STAY_IRQ_LEVEL
)
629 int level
= cores
[core
].switch_to_irq_level
;
630 cores
[core
].switch_to_irq_level
= STAY_IRQ_LEVEL
;
631 set_irq_level(level
);
635 /* Go through the list of sleeping task to check if we need to wake up
636 * any of them due to timeout. Also puts core into sleep state until
637 * there is at least one running process again. */
640 #ifdef HAVE_PRIORITY_SCHEDULING
641 /* Select the new task based on priorities and the last time a process
645 int priority
= cores
[core
].running
->priority
;
647 if (priority
< cores
[core
].highest_priority
)
648 cores
[core
].highest_priority
= priority
;
650 if (priority
== cores
[core
].highest_priority
||
651 (current_tick
- cores
[core
].running
->last_run
>
653 cores
[core
].running
->priority_x
!= 0)
658 cores
[core
].running
= cores
[core
].running
->next
;
661 /* Reset the value of thread's last running time to the current time. */
662 cores
[core
].running
->last_run
= current_tick
;
667 /* And finally give control to the next thread. */
668 load_context(&cores
[core
].running
->context
);
671 profile_thread_started(get_threadnum(cores
[core
].running
));
675 void sleep_thread(int ticks
)
677 struct thread_entry
*current
;
679 current
= cores
[CURRENT_CORE
].running
;
681 #ifdef HAVE_SCHEDULER_BOOSTCTRL
682 if (STATE_IS_BOOSTED(current
->statearg
))
685 if (!boosted_threads
)
692 /* Set the thread's new state and timeout and finally force a task switch
693 * so that scheduler removes thread from the list of running processes
694 * and puts it in list of sleeping tasks. */
695 SET_STATE(current
->statearg
, STATE_SLEEPING
, current_tick
+ ticks
+ 1);
697 switch_thread(true, NULL
);
700 void block_thread(struct thread_entry
**list
)
702 struct thread_entry
*current
;
704 /* Get the entry for the current running thread. */
705 current
= cores
[CURRENT_CORE
].running
;
707 #ifdef HAVE_SCHEDULER_BOOSTCTRL
708 /* Keep the boosted state over indefinite block calls, because
709 * we are waiting until the earliest time that someone else
710 * completes an action */
711 unsigned long boost_flag
= STATE_IS_BOOSTED(current
->statearg
);
714 /* We are not allowed to mix blocking types in one queue. */
715 THREAD_ASSERT(*list
!= NULL
&& GET_STATE((*list
)->statearg
) == STATE_BLOCKED_W_TMO
,
716 "Blocking violation B->*T", current
);
718 /* Set the state to blocked and ask the scheduler to switch tasks,
719 * this takes us off of the run queue until we are explicitly woken */
720 SET_STATE(current
->statearg
, STATE_BLOCKED
, 0);
722 switch_thread(true, list
);
724 #ifdef HAVE_SCHEDULER_BOOSTCTRL
725 /* Reset only the boosted flag to indicate we are up and running again. */
726 current
->statearg
= boost_flag
;
728 /* Clear all flags to indicate we are up and running again. */
729 current
->statearg
= 0;
733 void block_thread_w_tmo(struct thread_entry
**list
, int timeout
)
735 struct thread_entry
*current
;
736 /* Get the entry for the current running thread. */
737 current
= cores
[CURRENT_CORE
].running
;
739 #ifdef HAVE_SCHEDULER_BOOSTCTRL
740 /* A block with a timeout is a sleep situation, whatever we are waiting
741 * for _may or may not_ happen, regardless of boost state, (user input
742 * for instance), so this thread no longer needs to boost */
743 if (STATE_IS_BOOSTED(current
->statearg
))
746 if (!boosted_threads
)
753 /* We can store only one thread to the "list" if thread is used
754 * in other list (such as core's list for sleeping tasks). */
755 THREAD_ASSERT(*list
== NULL
, "Blocking violation T->*B", current
);
757 /* Set the state to blocked with the specified timeout */
758 SET_STATE(current
->statearg
, STATE_BLOCKED_W_TMO
, current_tick
+ timeout
);
760 /* Set the "list" for explicit wakeup */
763 /* Now force a task switch and block until we have been woken up
764 * by another thread or timeout is reached. */
765 switch_thread(true, NULL
);
767 /* It is now safe for another thread to block on this "list" */
771 #if !defined(SIMULATOR)
772 void set_irq_level_and_block_thread(struct thread_entry
**list
, int level
)
774 cores
[CURRENT_CORE
].switch_to_irq_level
= level
;
778 void set_irq_level_and_block_thread_w_tmo(struct thread_entry
**list
,
779 int timeout
, int level
)
781 cores
[CURRENT_CORE
].switch_to_irq_level
= level
;
782 block_thread_w_tmo(list
, timeout
);
786 void wakeup_thread(struct thread_entry
**list
)
788 struct thread_entry
*thread
;
790 /* Check if there is a blocked thread at all. */
796 /* Wake up the last thread first. */
799 /* Determine thread's current state. */
800 switch (GET_STATE(thread
->statearg
))
803 /* Remove thread from the list of blocked threads and add it
804 * to the scheduler's list of running processes. List removal
805 * is safe since each object maintains it's own list of
806 * sleepers and queues protect against reentrancy. */
807 remove_from_list(list
, thread
);
808 add_to_list(cores
[IF_COP2(thread
->core
)].wakeup_list
, thread
);
810 case STATE_BLOCKED_W_TMO
:
811 /* Just remove the timeout to cause scheduler to immediately
812 * wake up the thread. */
813 thread
->statearg
= 0;
817 /* Nothing to do. Thread has already been woken up
818 * or it's state is not blocked or blocked with timeout. */
823 inline static int find_empty_thread_slot(void)
827 for (n
= 0; n
< MAXTHREADS
; n
++)
829 if (threads
[n
].name
== NULL
)
836 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
838 void wakeup_thread_irq_safe(struct thread_entry
**list
)
840 struct core_entry
*core
= &cores
[CURRENT_CORE
];
841 /* Switch wakeup lists and call wakeup_thread */
842 core
->wakeup_list
= &core
->waking
;
844 /* Switch back to normal running list */
845 core
->wakeup_list
= &core
->running
;
848 /*---------------------------------------------------------------------------
850 * If using a dual core architecture, specify which core to start the thread
851 * on, and whether to fall back to the other core if it can't be created
852 * Return ID if context area could be allocated, else NULL.
853 *---------------------------------------------------------------------------
856 create_thread(void (*function
)(void), void* stack
, int stack_size
,
857 const char *name
IF_PRIO(, int priority
)
858 IF_COP(, unsigned int core
, bool fallback
))
861 unsigned int stacklen
;
862 unsigned int *stackptr
;
864 struct thread_entry
*thread
;
868 * To prevent ifdef hell while keeping the binary size down, we define
869 * core here if it hasn't been passed as a parameter
876 /* If the kernel hasn't initialised on the COP (most likely due to an old
877 * bootloader) then refuse to start threads on the COP
879 if ((core
== COP
) && !cores
[core
].kernel_running
)
882 return create_thread(function
, stack
, stack_size
, name
883 IF_PRIO(, priority
) IF_COP(, CPU
, false));
889 slot
= find_empty_thread_slot();
895 /* Munge the stack to make it easy to spot stack overflows */
896 stacklen
= stack_size
/ sizeof(int);
898 for(i
= 0;i
< stacklen
;i
++)
900 stackptr
[i
] = DEADBEEF
;
903 /* Store interesting information */
904 thread
= &threads
[slot
];
906 thread
->stack
= stack
;
907 thread
->stack_size
= stack_size
;
908 thread
->statearg
= 0;
909 #ifdef HAVE_PRIORITY_SCHEDULING
910 thread
->priority_x
= 0;
911 thread
->priority
= priority
;
912 cores
[core
].highest_priority
= 100;
918 /* Writeback stack munging or anything else before starting */
919 if (core
!= CURRENT_CORE
)
925 /* Align stack to an even 32 bit boundary */
926 thread
->context
.sp
= (void*)(((unsigned int)stack
+ stack_size
) & ~3);
928 /* Load the thread's context structure with needed startup information */
929 THREAD_STARTUP_INIT(core
, thread
, function
);
931 add_to_list(&cores
[core
].running
, thread
);
939 #ifdef HAVE_SCHEDULER_BOOSTCTRL
940 void trigger_cpu_boost(void)
942 if (!STATE_IS_BOOSTED(cores
[CURRENT_CORE
].running
->statearg
))
944 SET_BOOST_STATE(cores
[CURRENT_CORE
].running
->statearg
);
945 if (!boosted_threads
)
954 /*---------------------------------------------------------------------------
955 * Remove a thread on the current core from the scheduler.
956 * Parameter is the ID as returned from create_thread().
957 *---------------------------------------------------------------------------
959 void remove_thread(struct thread_entry
*thread
)
961 const unsigned int core
= CURRENT_CORE
;
964 thread
= cores
[core
].running
;
966 /* Free the entry by removing thread name. */
968 #ifdef HAVE_PRIORITY_SCHEDULING
969 cores
[IF_COP2(thread
->core
)].highest_priority
= 100;
972 if (thread
== cores
[IF_COP2(thread
->core
)].running
)
974 remove_from_list(&cores
[IF_COP2(thread
->core
)].running
, thread
);
976 /* Switch to the idle stack if not on the main core (where "main"
980 switch_to_idle_stack(core
);
985 switch_thread(false, NULL
);
986 /* This should never and must never be reached - if it is, the
987 * state is corrupted */
988 THREAD_PANICF("remove_thread->K:*R", thread
);
991 if (thread
== cores
[IF_COP2(thread
->core
)].sleeping
)
992 remove_from_list(&cores
[IF_COP2(thread
->core
)].sleeping
, thread
);
994 remove_from_list(NULL
, thread
);
997 #ifdef HAVE_PRIORITY_SCHEDULING
998 int thread_set_priority(struct thread_entry
*thread
, int priority
)
1003 thread
= cores
[CURRENT_CORE
].running
;
1005 old_priority
= thread
->priority
;
1006 thread
->priority
= priority
;
1007 cores
[IF_COP2(thread
->core
)].highest_priority
= 100;
1009 return old_priority
;
1012 int thread_get_priority(struct thread_entry
*thread
)
1015 thread
= cores
[CURRENT_CORE
].running
;
1017 return thread
->priority
;
1020 void priority_yield(void)
1022 struct thread_entry
*thread
= cores
[CURRENT_CORE
].running
;
1023 thread
->priority_x
= 1;
1024 switch_thread(true, NULL
);
1025 thread
->priority_x
= 0;
1027 #endif /* HAVE_PRIORITY_SCHEDULING */
1029 struct thread_entry
* thread_get_current(void)
1031 return cores
[CURRENT_CORE
].running
;
1034 void init_threads(void)
1036 const unsigned int core
= CURRENT_CORE
;
1039 /* CPU will initialize first and then sleep */
1040 slot
= find_empty_thread_slot();
1041 #if THREAD_EXTRA_CHECKS
1042 /* This can fail if, for example, .bss isn't zero'ed out by the loader
1043 or threads is in the wrong section. */
1045 panicf("uninitialized threads[]");
1049 cores
[core
].sleeping
= NULL
;
1050 cores
[core
].running
= NULL
;
1051 cores
[core
].waking
= NULL
;
1052 cores
[core
].wakeup_list
= &cores
[core
].running
;
1053 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
1054 cores
[core
].switch_to_irq_level
= STAY_IRQ_LEVEL
;
1056 threads
[slot
].name
= main_thread_name
;
1057 threads
[slot
].statearg
= 0;
1058 threads
[slot
].context
.start
= 0; /* core's main thread already running */
1060 threads
[slot
].core
= core
;
1062 #ifdef HAVE_PRIORITY_SCHEDULING
1063 threads
[slot
].priority
= PRIORITY_USER_INTERFACE
;
1064 threads
[slot
].priority_x
= 0;
1065 cores
[core
].highest_priority
= 100;
1067 add_to_list(&cores
[core
].running
, &threads
[slot
]);
1069 /* In multiple core setups, each core has a different stack. There is
1070 * probably a much better way to do this. */
1073 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1074 boosted_threads
= 0;
1076 threads
[slot
].stack
= stackbegin
;
1077 threads
[slot
].stack_size
= (int)stackend
- (int)stackbegin
;
1078 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
1079 /* Mark CPU initialized */
1080 cores
[CPU
].kernel_running
= true;
1081 /* Do _not_ wait for the COP to init in the bootloader because it doesn't */
1082 /* TODO: HAL interface for this */
1083 /* Wake up coprocessor and let it initialize kernel and threads */
1084 COP_CTL
= PROC_WAKE
;
1085 /* Sleep until finished */
1086 CPU_CTL
= PROC_SLEEP
;
1090 /* Initial stack is the COP idle stack */
1091 threads
[slot
].stack
= cop_idlestackbegin
;
1092 threads
[slot
].stack_size
= IDLE_STACK_SIZE
;
1093 /* Mark COP initialized */
1094 cores
[COP
].kernel_running
= true;
1095 /* Get COP safely primed inside switch_thread where it will remain
1096 * until a thread actually exists on it */
1097 CPU_CTL
= PROC_WAKE
;
1098 remove_thread(NULL
);
1099 #endif /* NUM_CORES */
1103 int thread_stack_usage(const struct thread_entry
*thread
)
1106 unsigned int *stackptr
= thread
->stack
;
1108 for (i
= 0;i
< thread
->stack_size
/sizeof(int);i
++)
1110 if (stackptr
[i
] != DEADBEEF
)
1114 return ((thread
->stack_size
- i
* sizeof(int)) * 100) /
1119 /*---------------------------------------------------------------------------
1120 * Returns the maximum percentage of the core's idle stack ever used during
1122 *---------------------------------------------------------------------------
1124 int idle_stack_usage(unsigned int core
)
1126 unsigned int *stackptr
= idle_stacks
[core
];
1129 for (i
= 0; i
< IDLE_STACK_WORDS
; i
++)
1131 if (stackptr
[i
] != DEADBEEF
)
1133 usage
= ((IDLE_STACK_WORDS
- i
) * 100) / IDLE_STACK_WORDS
;
1142 int thread_get_status(const struct thread_entry
*thread
)
1144 return GET_STATE(thread
->statearg
);
1147 /*---------------------------------------------------------------------------
1148 * Fills in the buffer with the specified thread's name. If the name is NULL,
1149 * empty, or the thread is in destruct state a formatted ID is written
1151 *---------------------------------------------------------------------------
1153 void thread_get_name(char *buffer
, int size
,
1154 struct thread_entry
*thread
)
1163 /* Display thread name if one or ID if none */
1164 const char *name
= thread
->name
;
1165 const char *fmt
= "%s";
1166 if (name
== NULL
|| *name
== '\0')
1168 name
= (const char *)thread
;
1171 snprintf(buffer
, size
, fmt
, name
);