Initial work on a port to the Logik DAX 1GB MP3/DAB player. The bootloader build...
[kugel-rb.git] / firmware / thread.c
blob4bcea0542d4039e18fe0f19a01375f9624d85cde
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
33 #ifdef DEBUG
34 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
35 #else
36 #define THREAD_EXTRA_CHECKS 0
37 #endif
39 /**
40 * General locking order to guarantee progress. Order must be observed but
41 * all stages are not nescessarily obligatory. Going from 1) to 3) is
42 * perfectly legal.
44 * 1) IRQ
45 * This is first because of the likelyhood of having an interrupt occur that
46 * also accesses one of the objects farther down the list. Any non-blocking
47 * synchronization done may already have a lock on something during normal
48 * execution and if an interrupt handler running on the same processor as
49 * the one that has the resource locked were to attempt to access the
50 * resource, the interrupt handler would wait forever waiting for an unlock
51 * that will never happen. There is no danger if the interrupt occurs on
52 * a different processor because the one that has the lock will eventually
53 * unlock and the other processor's handler may proceed at that time. Not
54 * nescessary when the resource in question is definitely not available to
55 * interrupt handlers.
57 * 2) Kernel Object
58 * 1) May be needed beforehand if the kernel object allows dual-use such as
59 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
66 * 3) Thread Slot
67 * This locks access to the thread's slot such that its state cannot be
68 * altered by another processor when a state change is in progress such as
69 * when it is in the process of going on a blocked list. An attempt to wake
70 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state.
73 * 4) Lists
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
82 * 5) Core Lists
83 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an
85 * operation may only be performed by the thread's own core in a normal
86 * execution context. The wakeup list is the prime example where a thread
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
91 #define DEADBEEF ((unsigned int)0xdeadbeef)
92 /* Cast to the the machine int type, whose size could be < 4. */
93 struct core_entry cores[NUM_CORES] IBSS_ATTR;
94 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
95 #ifdef HAVE_SCHEDULER_BOOSTCTRL
96 static int boosted_threads IBSS_ATTR;
97 #endif
99 static const char main_thread_name[] = "main";
100 extern int stackbegin[];
101 extern int stackend[];
103 /* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
104 * never results in requiring a wait until the next tick (up to 10000uS!). Likely
105 * requires assembly and careful instruction ordering. Multicore requires
106 * carefully timed sections in order to have synchronization without locking of
107 * any sort.
109 * 1) Disable all interrupts (FIQ and IRQ for ARM for instance)
110 * 2) Check *waking == NULL.
111 * 3) *waking not NULL? Goto step 7.
112 * 4) On multicore, stay awake if directed to do so by another. If so, goto step 7.
113 * 5) If processor requires, atomically reenable interrupts and perform step 6.
114 * 6) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
115 * goto step 8.
116 * 7) Reenable interrupts.
117 * 8) Exit procedure.
119 static inline void core_sleep(
120 IF_COP(unsigned int core,) struct thread_entry **waking)
121 __attribute__((always_inline));
123 static void check_tmo_threads(void)
124 __attribute__((noinline));
126 static inline void block_thread_on_l(
127 struct thread_queue *list, struct thread_entry *thread, unsigned state)
128 __attribute__((always_inline));
130 static inline void block_thread_on_l_no_listlock(
131 struct thread_entry **list, struct thread_entry *thread, unsigned state)
132 __attribute__((always_inline));
134 static inline void _block_thread_on_l(
135 struct thread_queue *list, struct thread_entry *thread,
136 unsigned state IF_SWCL(, const bool single))
137 __attribute__((always_inline));
139 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
140 struct thread_queue *list IF_SWCL(, const bool nolock))
141 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
143 IF_SWCL(static inline) void _block_thread(
144 struct thread_queue *list IF_SWCL(, const bool nolock))
145 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
147 static void add_to_list_tmo(struct thread_entry *thread)
148 __attribute__((noinline));
150 static void core_schedule_wakeup(struct thread_entry *thread)
151 __attribute__((noinline));
153 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
154 __attribute__((always_inline));
156 static inline void run_blocking_ops(
157 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
158 __attribute__((always_inline));
160 static void thread_stkov(struct thread_entry *thread)
161 __attribute__((noinline));
163 static inline void store_context(void* addr)
164 __attribute__((always_inline));
166 static inline void load_context(const void* addr)
167 __attribute__((always_inline));
169 void switch_thread(struct thread_entry *old)
170 __attribute__((noinline));
173 /****************************************************************************
174 * Processor-specific section
177 #if defined(CPU_ARM)
178 /*---------------------------------------------------------------------------
179 * Start the thread running and terminate it if it returns
180 *---------------------------------------------------------------------------
182 static void start_thread(void) __attribute__((naked,used));
183 static void start_thread(void)
185 /* r0 = context */
186 asm volatile (
187 "ldr sp, [r0, #32] \n" /* Load initial sp */
188 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
189 "mov r1, #0 \n" /* Mark thread as running */
190 "str r1, [r0, #40] \n"
191 #if NUM_CORES > 1
192 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
193 "mov lr, pc \n" /* This could be the first entry into */
194 "bx r0 \n" /* plugin or codec code for this core. */
195 #endif
196 "mov lr, pc \n" /* Call thread function */
197 "bx r4 \n"
198 "mov r0, #0 \n" /* remove_thread(NULL) */
199 "ldr pc, =remove_thread \n"
200 ".ltorg \n" /* Dump constant pool */
201 ); /* No clobber list - new thread doesn't care */
204 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
205 * slot, and thread function pointer in context.start. See load_context for
206 * what happens when thread is initially going to run. */
207 #define THREAD_STARTUP_INIT(core, thread, function) \
208 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
209 (thread)->context.r[1] = (unsigned int)start_thread, \
210 (thread)->context.start = (void *)function; })
212 /*---------------------------------------------------------------------------
213 * Store non-volatile context.
214 *---------------------------------------------------------------------------
216 static inline void store_context(void* addr)
218 asm volatile(
219 "stmia %0, { r4-r11, sp, lr } \n"
220 : : "r" (addr)
224 /*---------------------------------------------------------------------------
225 * Load non-volatile context.
226 *---------------------------------------------------------------------------
228 static inline void load_context(const void* addr)
230 asm volatile(
231 "ldr r0, [%0, #40] \n" /* Load start pointer */
232 "cmp r0, #0 \n" /* Check for NULL */
233 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
234 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
235 : : "r" (addr) : "r0" /* only! */
239 #if defined (CPU_PP)
241 #if NUM_CORES > 1
242 extern int cpu_idlestackbegin[];
243 extern int cpu_idlestackend[];
244 extern int cop_idlestackbegin[];
245 extern int cop_idlestackend[];
246 static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
248 [CPU] = cpu_idlestackbegin,
249 [COP] = cop_idlestackbegin
251 #endif /* NUM_CORES */
253 #if CONFIG_CORELOCK == SW_CORELOCK
254 /* Software core locks using Peterson's mutual exclusion algorithm */
256 /*---------------------------------------------------------------------------
257 * Initialize the corelock structure.
258 *---------------------------------------------------------------------------
260 void corelock_init(struct corelock *cl)
262 memset(cl, 0, sizeof (*cl));
265 #if 1 /* Assembly locks to minimize overhead */
266 /*---------------------------------------------------------------------------
267 * Wait for the corelock to become free and acquire it when it does.
268 *---------------------------------------------------------------------------
270 void corelock_lock(struct corelock *cl) __attribute__((naked));
271 void corelock_lock(struct corelock *cl)
273 asm volatile (
274 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
275 "ldrb r1, [r1] \n"
276 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
277 "and r2, r1, #1 \n" /* r2 = othercore */
278 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
279 "1: \n"
280 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
281 "cmp r3, #0 \n"
282 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core ? */
283 "cmpne r3, r1, lsr #7 \n"
284 "bxeq lr \n" /* yes? lock acquired */
285 "b 1b \n" /* keep trying */
286 : : "i"(&PROCESSOR_ID)
288 (void)cl;
291 /*---------------------------------------------------------------------------
292 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
293 *---------------------------------------------------------------------------
295 int corelock_try_lock(struct corelock *cl) __attribute__((naked));
296 int corelock_try_lock(struct corelock *cl)
298 asm volatile (
299 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
300 "ldrb r1, [r1] \n"
301 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
302 "and r2, r1, #1 \n" /* r2 = othercore */
303 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
304 "1: \n"
305 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
306 "cmp r3, #0 \n"
307 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core? */
308 "cmpne r3, r1, lsr #7 \n"
309 "moveq r0, #1 \n" /* yes? lock acquired */
310 "bxeq lr \n"
311 "mov r2, #0 \n" /* cl->myl[core] = 0 */
312 "strb r2, [r0, r1, lsr #7] \n"
313 "mov r0, r2 \n"
314 "bx lr \n" /* acquisition failed */
315 : : "i"(&PROCESSOR_ID)
318 return 0;
319 (void)cl;
322 /*---------------------------------------------------------------------------
323 * Release ownership of the corelock
324 *---------------------------------------------------------------------------
326 void corelock_unlock(struct corelock *cl) __attribute__((naked));
327 void corelock_unlock(struct corelock *cl)
329 asm volatile (
330 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
331 "ldrb r1, [r1] \n"
332 "mov r2, #0 \n" /* cl->myl[core] = 0 */
333 "strb r2, [r0, r1, lsr #7] \n"
334 "bx lr \n"
335 : : "i"(&PROCESSOR_ID)
337 (void)cl;
339 #else /* C versions for reference */
340 /*---------------------------------------------------------------------------
341 * Wait for the corelock to become free and aquire it when it does.
342 *---------------------------------------------------------------------------
344 void corelock_lock(struct corelock *cl)
346 const unsigned int core = CURRENT_CORE;
347 const unsigned int othercore = 1 - core;
349 cl->myl[core] = core;
350 cl->turn = othercore;
352 for (;;)
354 if (cl->myl[othercore] == 0 || cl->turn == core)
355 break;
359 /*---------------------------------------------------------------------------
360 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
361 *---------------------------------------------------------------------------
363 int corelock_try_lock(struct corelock *cl)
365 const unsigned int core = CURRENT_CORE;
366 const unsigned int othercore = 1 - core;
368 cl->myl[core] = core;
369 cl->turn = othercore;
371 if (cl->myl[othercore] == 0 || cl->turn == core)
373 return 1;
376 cl->myl[core] = 0;
377 return 0;
380 /*---------------------------------------------------------------------------
381 * Release ownership of the corelock
382 *---------------------------------------------------------------------------
384 void corelock_unlock(struct corelock *cl)
386 cl->myl[CURRENT_CORE] = 0;
388 #endif /* ASM / C selection */
390 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
392 /*---------------------------------------------------------------------------
393 * Put core in a power-saving state if waking list wasn't repopulated and if
394 * no other core requested a wakeup for it to perform a task.
395 *---------------------------------------------------------------------------
397 static inline void core_sleep(IF_COP(unsigned int core,) struct thread_entry **waking)
399 #if NUM_CORES > 1
400 #ifdef CPU_PP502x
401 #if 1
402 /* Disabling IRQ and FIQ is important to making the fixed-time sequence
403 * non-interruptable */
404 asm volatile (
405 "mrs r2, cpsr \n" /* Disable IRQ, FIQ */
406 "orr r2, r2, #0xc0 \n"
407 "msr cpsr_c, r2 \n"
408 "mov r0, #4 \n" /* r0 = 0x4 << core */
409 "mov r0, r0, lsl %[c] \n"
410 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
411 "ldr r1, [%[waking]] \n" /* *waking == NULL ? */
412 "cmp r1, #0 \n"
413 "ldreq r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
414 "tsteq r1, r0, lsl #2 \n"
415 "moveq r1, #0x80000000 \n" /* Then sleep */
416 "streq r1, [%[ctl], %[c], lsl #2] \n"
417 "moveq r1, #0 \n" /* Clear control reg */
418 "streq r1, [%[ctl], %[c], lsl #2] \n"
419 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
420 "str r1, [%[mbx], #8] \n"
421 "1: \n" /* Wait for wake procedure to finish */
422 "ldr r1, [%[mbx], #0] \n"
423 "tst r1, r0, lsr #2 \n"
424 "bne 1b \n"
425 "bic r2, r2, #0xc0 \n" /* Enable interrupts */
426 "msr cpsr_c, r2 \n"
428 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
429 [waking]"r"(waking), [c]"r"(core)
430 : "r0", "r1", "r2");
431 #else /* C version for reference */
432 /* Disable IRQ, FIQ */
433 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
435 /* Signal intent to sleep */
436 MBX_MSG_SET = 0x4 << core;
438 /* Something waking or other processor intends to wake us? */
439 if (*waking == NULL && (MBX_MSG_STAT & (0x10 << core)) == 0)
441 PROC_CTL(core) = PROC_SLEEP; nop; /* Snooze */
442 PROC_CTL(core) = 0; /* Clear control reg */
445 /* Signal wake - clear wake flag */
446 MBX_MSG_CLR = 0x14 << core;
448 /* Wait for other processor to finish wake procedure */
449 while (MBX_MSG_STAT & (0x1 << core));
451 /* Enable IRQ, FIQ */
452 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
453 #endif /* ASM/C selection */
454 #else
455 /* TODO: PP5002 */
456 #endif /* CONFIG_CPU == */
457 #else
458 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
459 if (*waking == NULL)
461 PROC_CTL(IF_COP_CORE(core)) = PROC_SLEEP;
463 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
464 #endif /* NUM_CORES */
467 /*---------------------------------------------------------------------------
468 * Wake another processor core that is sleeping or prevent it from doing so
469 * if it was already destined. FIQ, IRQ should be disabled before calling.
470 *---------------------------------------------------------------------------
472 void core_wake(IF_COP_VOID(unsigned int othercore))
474 #if NUM_CORES == 1
475 /* No wakey - core already wakey */
476 #elif defined (CPU_PP502x)
477 #if 1
478 /* avoid r0 since that contains othercore */
479 asm volatile (
480 "mrs r3, cpsr \n" /* Disable IRQ */
481 "orr r1, r3, #0x80 \n"
482 "msr cpsr_c, r1 \n"
483 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
484 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
485 "str r2, [%[mbx], #4] \n"
486 "1: \n" /* If it intends to sleep, let it first */
487 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
488 "eor r1, r1, #0xc \n"
489 "tst r1, r2, lsr #2 \n"
490 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
491 "tsteq r1, #0x80000000 \n"
492 "beq 1b \n" /* Wait for sleep or wake */
493 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
494 "movne r1, #0x0 \n"
495 "strne r1, [%[ctl], %[oc], lsl #2] \n"
496 "mov r1, r2, lsr #4 \n"
497 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
498 "msr cpsr_c, r3 \n" /* Restore int status */
500 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [oc]"r" (othercore)
501 : "r1", "r2", "r3");
502 #else /* C version for reference */
503 /* Disable interrupts - avoid reentrancy from the tick */
504 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
506 /* Signal intent to wake other processor - set stay awake */
507 MBX_MSG_SET = 0x11 << othercore;
509 /* If it intends to sleep, wait until it does or aborts */
510 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
511 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
513 /* If sleeping, wake it up */
514 if (PROC_CTL(othercore) & PROC_SLEEP)
516 PROC_CTL(othercore) = 0;
519 /* Done with wake procedure */
520 MBX_MSG_CLR = 0x1 << othercore;
521 set_irq_level(oldlevel);
522 #endif /* ASM/C selection */
523 #else
524 PROC_CTL(othercore) = PROC_WAKE;
525 #endif
528 #if NUM_CORES > 1
529 /*---------------------------------------------------------------------------
530 * Switches to a stack that always resides in the Rockbox core.
532 * Needed when a thread suicides on a core other than the main CPU since the
533 * stack used when idling is the stack of the last thread to run. This stack
534 * may not reside in the core in which case the core will continue to use a
535 * stack from an unloaded module until another thread runs on it.
536 *---------------------------------------------------------------------------
538 static inline void switch_to_idle_stack(const unsigned int core)
540 asm volatile (
541 "str sp, [%0] \n" /* save original stack pointer on idle stack */
542 "mov sp, %0 \n" /* switch stacks */
543 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
544 (void)core;
547 /*---------------------------------------------------------------------------
548 * Perform core switch steps that need to take place inside switch_thread.
550 * These steps must take place while before changing the processor and after
551 * having entered switch_thread since switch_thread may not do a normal return
552 * because the stack being used for anything the compiler saved will not belong
553 * to the thread's destination core and it may have been recycled for other
554 * purposes by the time a normal context load has taken place. switch_thread
555 * will also clobber anything stashed in the thread's context or stored in the
556 * nonvolatile registers if it is saved there before the call since the
557 * compiler's order of operations cannot be known for certain.
559 static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
561 /* Flush our data to ram */
562 flush_icache();
563 /* Stash thread in r4 slot */
564 thread->context.r[0] = (unsigned int)thread;
565 /* Stash restart address in r5 slot */
566 thread->context.r[1] = (unsigned int)thread->context.start;
567 /* Save sp in context.sp while still running on old core */
568 thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1];
571 /*---------------------------------------------------------------------------
572 * Machine-specific helper function for switching the processor a thread is
573 * running on. Basically, the thread suicides on the departing core and is
574 * reborn on the destination. Were it not for gcc's ill-behavior regarding
575 * naked functions written in C where it actually clobbers non-volatile
576 * registers before the intended prologue code, this would all be much
577 * simpler. Generic setup is done in switch_core itself.
580 /*---------------------------------------------------------------------------
581 * This actually performs the core switch.
583 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
584 __attribute__((naked));
585 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
587 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
588 * Stack access also isn't permitted until restoring the original stack and
589 * context. */
590 asm volatile (
591 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
592 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
593 "ldr r2, [r2, r0, lsl #2] \n"
594 "add r2, r2, %0*4 \n"
595 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
596 "mov sp, r2 \n" /* switch stacks */
597 "adr r2, 1f \n" /* r2 = new core restart address */
598 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
599 "mov r0, r1 \n" /* switch_thread(thread) */
600 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
601 "1: \n"
602 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
603 "mov r1, #0 \n" /* Clear start address */
604 "str r1, [r0, #40] \n"
605 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
606 "mov lr, pc \n"
607 "bx r0 \n"
608 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
609 ".ltorg \n" /* Dump constant pool */
610 : : "i"(IDLE_STACK_WORDS)
612 (void)core; (void)thread;
614 #endif /* NUM_CORES */
616 #elif CONFIG_CPU == S3C2440
618 /*---------------------------------------------------------------------------
619 * Put core in a power-saving state if waking list wasn't repopulated.
620 *---------------------------------------------------------------------------
622 static inline void core_sleep(struct thread_entry **waking)
624 /* FIQ also changes the CLKCON register so FIQ must be disabled
625 when changing it here */
626 asm volatile (
627 "mrs r0, cpsr \n" /* Disable IRQ, FIQ */
628 "orr r0, r0, #0xc0 \n"
629 "msr cpsr_c, r0 \n"
630 "ldr r1, [%0] \n" /* Check *waking */
631 "cmp r1, #0 \n"
632 "bne 2f \n" /* != NULL -> exit */
633 "bic r0, r0, #0xc0 \n" /* Prepare IRQ, FIQ enable */
634 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
635 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
636 "orr r2, r2, #4 \n"
637 "str r2, [r1, #0xc] \n"
638 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
639 "mov r3, #0 \n" /* wait for IDLE */
640 "1: \n"
641 "add r3, r3, #1 \n"
642 "cmp r3, #10 \n"
643 "bne 1b \n"
644 "orr r0, r0, #0xc0 \n" /* Disable IRQ, FIQ */
645 "msr cpsr_c, r0 \n"
646 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
647 "bic r2, r2, #4 \n"
648 "str r2, [r1, #0xc] \n"
649 "2: \n"
650 "bic r0, r0, #0xc0 \n" /* Enable IRQ, FIQ */
651 "msr cpsr_c, r0 \n"
652 : : "r"(waking) : "r0", "r1", "r2", "r3");
654 #elif defined(CPU_TCC77X)
655 static inline void core_sleep(struct thread_entry **waking)
657 #warning TODO: Implement core_sleep
659 #else
660 static inline void core_sleep(struct thread_entry **waking)
662 (void) waking;
663 #warning core_sleep not implemented, battery life will be decreased
665 #endif /* CONFIG_CPU == */
667 #elif defined(CPU_COLDFIRE)
668 /*---------------------------------------------------------------------------
669 * Start the thread running and terminate it if it returns
670 *---------------------------------------------------------------------------
672 void start_thread(void); /* Provide C access to ASM label */
673 static void __start_thread(void) __attribute__((used));
674 static void __start_thread(void)
676 /* a0=macsr, a1=context */
677 asm volatile (
678 "start_thread: \n" /* Start here - no naked attribute */
679 "move.l %a0, %macsr \n" /* Set initial mac status reg */
680 "lea.l 48(%a1), %a1 \n"
681 "move.l (%a1)+, %sp \n" /* Set initial stack */
682 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
683 "clr.l (%a1) \n" /* Mark thread running */
684 "jsr (%a2) \n" /* Call thread function */
685 "clr.l -(%sp) \n" /* remove_thread(NULL) */
686 "jsr remove_thread \n"
690 /* Set EMAC unit to fractional mode with saturation for each new thread,
691 * since that's what'll be the most useful for most things which the dsp
692 * will do. Codecs should still initialize their preferred modes
693 * explicitly. Context pointer is placed in d2 slot and start_thread
694 * pointer in d3 slot. thread function pointer is placed in context.start.
695 * See load_context for what happens when thread is initially going to
696 * run.
698 #define THREAD_STARTUP_INIT(core, thread, function) \
699 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
700 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
701 (thread)->context.d[1] = (unsigned int)start_thread, \
702 (thread)->context.start = (void *)(function); })
704 /*---------------------------------------------------------------------------
705 * Store non-volatile context.
706 *---------------------------------------------------------------------------
708 static inline void store_context(void* addr)
710 asm volatile (
711 "move.l %%macsr,%%d0 \n"
712 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
713 : : "a" (addr) : "d0" /* only! */
717 /*---------------------------------------------------------------------------
718 * Load non-volatile context.
719 *---------------------------------------------------------------------------
721 static inline void load_context(const void* addr)
723 asm volatile (
724 "move.l 52(%0), %%d0 \n" /* Get start address */
725 "beq.b 1f \n" /* NULL -> already running */
726 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
727 "jmp (%%a2) \n" /* Start the thread */
728 "1: \n"
729 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
730 "move.l %%d0, %%macsr \n"
731 : : "a" (addr) : "d0" /* only! */
735 /*---------------------------------------------------------------------------
736 * Put core in a power-saving state if waking list wasn't repopulated.
737 *---------------------------------------------------------------------------
739 static inline void core_sleep(struct thread_entry **waking)
741 asm volatile (
742 "moveq.l %1, %%d0 \n" /* Disable interrupts (not audio DMA) */
743 "lsl.l #8, %%d0 \n"
744 "move.w %%d0, %%sr \n"
745 "tst.l (%0) \n" /* Check *waking */
746 "beq.b 1f \n" /* != NULL -> exit */
747 "moveq.l #0x20, %%d0 \n" /* Enable interrupts */
748 "lsl.l #8, %%d0 \n"
749 "move.w %%d0, %%sr \n"
750 ".word 0x51fb \n" /* tpf.l - eat stop instruction */
751 "1: \n"
752 "stop #0x2000 \n" /* Supervisor mode, interrupts enabled
753 upon wakeup */
754 : : "a"(waking), "i"((0x2000 | HIGHEST_IRQ_LEVEL) >> 8) : "d0"
758 #elif CONFIG_CPU == SH7034
759 /*---------------------------------------------------------------------------
760 * Start the thread running and terminate it if it returns
761 *---------------------------------------------------------------------------
763 void start_thread(void); /* Provide C access to ASM label */
764 static void __start_thread(void) __attribute__((used));
765 static void __start_thread(void)
767 /* r8 = context */
768 asm volatile (
769 "_start_thread: \n" /* Start here - no naked attribute */
770 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
771 "mov.l @(28, r8), r15 \n" /* Set initial sp */
772 "mov #0, r1 \n" /* Start the thread */
773 "jsr @r0 \n"
774 "mov.l r1, @(36, r8) \n" /* Clear start address */
775 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
776 "jmp @r0 \n"
777 "mov #0, r4 \n"
778 "1: \n"
779 ".long _remove_thread \n"
783 /* Place context pointer in r8 slot, function pointer in r9 slot, and
784 * start_thread pointer in context_start */
785 #define THREAD_STARTUP_INIT(core, thread, function) \
786 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
787 (thread)->context.r[1] = (unsigned int)(function), \
788 (thread)->context.start = (void*)start_thread; })
790 /*---------------------------------------------------------------------------
791 * Store non-volatile context.
792 *---------------------------------------------------------------------------
794 static inline void store_context(void* addr)
796 asm volatile (
797 "add #36, %0 \n" /* Start at last reg. By the time routine */
798 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
799 "mov.l r15,@-%0 \n"
800 "mov.l r14,@-%0 \n"
801 "mov.l r13,@-%0 \n"
802 "mov.l r12,@-%0 \n"
803 "mov.l r11,@-%0 \n"
804 "mov.l r10,@-%0 \n"
805 "mov.l r9, @-%0 \n"
806 "mov.l r8, @-%0 \n"
807 : : "r" (addr)
811 /*---------------------------------------------------------------------------
812 * Load non-volatile context.
813 *---------------------------------------------------------------------------
815 static inline void load_context(const void* addr)
817 asm volatile (
818 "mov.l @(36, %0), r0 \n" /* Get start address */
819 "tst r0, r0 \n"
820 "bt .running \n" /* NULL -> already running */
821 "jmp @r0 \n" /* r8 = context */
822 ".running: \n"
823 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
824 "mov.l @%0+, r9 \n"
825 "mov.l @%0+, r10 \n"
826 "mov.l @%0+, r11 \n"
827 "mov.l @%0+, r12 \n"
828 "mov.l @%0+, r13 \n"
829 "mov.l @%0+, r14 \n"
830 "mov.l @%0+, r15 \n"
831 "lds.l @%0+, pr \n"
832 : : "r" (addr) : "r0" /* only! */
836 /*---------------------------------------------------------------------------
837 * Put core in a power-saving state if waking list wasn't repopulated.
838 *---------------------------------------------------------------------------
840 static inline void core_sleep(struct thread_entry **waking)
842 asm volatile (
843 "mov %2, r1 \n" /* Disable interrupts */
844 "ldc r1, sr \n"
845 "mov.l @%1, r1 \n" /* Check *waking */
846 "tst r1, r1 \n"
847 "bf 1f \n" /* *waking != NULL ? exit */
848 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
849 "mov #0, r1 \n" /* Enable interrupts */
850 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
851 "bra 2f \n" /* bra and sleep are executed at once */
852 "sleep \n" /* Execute standby */
853 "1: \n"
854 "mov #0, r1 \n" /* Enable interrupts */
855 "ldc r1, sr \n"
856 "2: \n"
858 : "z"(&SBYCR-GBR), "r"(waking), "i"(HIGHEST_IRQ_LEVEL)
859 : "r1");
862 #endif /* CONFIG_CPU == */
865 * End Processor-specific section
866 ***************************************************************************/
868 #if THREAD_EXTRA_CHECKS
869 static void thread_panicf(const char *msg, struct thread_entry *thread)
871 #if NUM_CORES > 1
872 const unsigned int core = thread->core;
873 #endif
874 static char name[32];
875 thread_get_name(name, 32, thread);
876 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
878 static void thread_stkov(struct thread_entry *thread)
880 thread_panicf("Stkov", thread);
882 #define THREAD_PANICF(msg, thread) \
883 thread_panicf(msg, thread)
884 #define THREAD_ASSERT(exp, msg, thread) \
885 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
886 #else
887 static void thread_stkov(struct thread_entry *thread)
889 #if NUM_CORES > 1
890 const unsigned int core = thread->core;
891 #endif
892 static char name[32];
893 thread_get_name(name, 32, thread);
894 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
896 #define THREAD_PANICF(msg, thread)
897 #define THREAD_ASSERT(exp, msg, thread)
898 #endif /* THREAD_EXTRA_CHECKS */
900 /*---------------------------------------------------------------------------
901 * Lock a list pointer and returns its value
902 *---------------------------------------------------------------------------
904 #if CONFIG_CORELOCK == SW_CORELOCK
905 /* Separate locking function versions */
907 /* Thread locking */
908 #define GET_THREAD_STATE(thread) \
909 ({ corelock_lock(&(thread)->cl); (thread)->state; })
910 #define TRY_GET_THREAD_STATE(thread) \
911 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
912 #define UNLOCK_THREAD(thread, state) \
913 ({ corelock_unlock(&(thread)->cl); })
914 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
915 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
917 /* List locking */
918 #define LOCK_LIST(tqp) \
919 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
920 #define UNLOCK_LIST(tqp, mod) \
921 ({ corelock_unlock(&(tqp)->cl); })
922 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
923 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
925 /* Select the queue pointer directly */
926 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
927 ({ add_to_list_l(&(tqp)->queue, (thread)); })
928 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
929 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
931 #elif CONFIG_CORELOCK == CORELOCK_SWAP
932 /* Native swap/exchange versions */
934 /* Thread locking */
935 #define GET_THREAD_STATE(thread) \
936 ({ unsigned _s; \
937 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
938 _s; })
939 #define TRY_GET_THREAD_STATE(thread) \
940 ({ xchg8(&(thread)->state, STATE_BUSY); })
941 #define UNLOCK_THREAD(thread, _state) \
942 ({ (thread)->state = (_state); })
943 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
944 ({ (thread)->state = (_state); })
946 /* List locking */
947 #define LOCK_LIST(tqp) \
948 ({ struct thread_entry *_l; \
949 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
950 _l; })
951 #define UNLOCK_LIST(tqp, mod) \
952 ({ (tqp)->queue = (mod); })
953 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
954 ({ (tqp)->queue = (mod); })
956 /* Select the local queue pointer copy returned from LOCK_LIST */
957 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
958 ({ add_to_list_l(&(tc), (thread)); })
959 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
960 ({ remove_from_list_l(&(tc), (thread)); })
962 #else
963 /* Single-core/non-locked versions */
965 /* Threads */
966 #define GET_THREAD_STATE(thread) \
967 ({ (thread)->state; })
968 #define UNLOCK_THREAD(thread, _state)
969 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
970 ({ (thread)->state = (_state); })
972 /* Lists */
973 #define LOCK_LIST(tqp) \
974 ({ (tqp)->queue; })
975 #define UNLOCK_LIST(tqp, mod)
976 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
977 ({ (tqp)->queue = (mod); })
979 /* Select the queue pointer directly */
980 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
981 ({ add_to_list_l(&(tqp)->queue, (thread)); })
982 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
983 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
985 #endif /* locking selection */
987 #if THREAD_EXTRA_CHECKS
988 /*---------------------------------------------------------------------------
989 * Lock the thread slot to obtain the state and then unlock it. Waits for
990 * it not to be busy. Used for debugging.
991 *---------------------------------------------------------------------------
993 static unsigned peek_thread_state(struct thread_entry *thread)
995 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
996 unsigned state = GET_THREAD_STATE(thread);
997 UNLOCK_THREAD(thread, state);
998 set_irq_level(oldlevel);
999 return state;
1001 #endif /* THREAD_EXTRA_CHECKS */
1003 /*---------------------------------------------------------------------------
1004 * Adds a thread to a list of threads using "intert last". Uses the "l"
1005 * links.
1006 *---------------------------------------------------------------------------
1008 static void add_to_list_l(struct thread_entry **list,
1009 struct thread_entry *thread)
1011 struct thread_entry *l = *list;
1013 if (l == NULL)
1015 /* Insert into unoccupied list */
1016 thread->l.next = thread;
1017 thread->l.prev = thread;
1018 *list = thread;
1019 return;
1022 /* Insert last */
1023 thread->l.next = l;
1024 thread->l.prev = l->l.prev;
1025 thread->l.prev->l.next = thread;
1026 l->l.prev = thread;
1028 /* Insert next
1029 thread->l.next = l->l.next;
1030 thread->l.prev = l;
1031 thread->l.next->l.prev = thread;
1032 l->l.next = thread;
1036 /*---------------------------------------------------------------------------
1037 * Locks a list, adds the thread entry and unlocks the list on multicore.
1038 * Defined as add_to_list_l on single-core.
1039 *---------------------------------------------------------------------------
1041 #if NUM_CORES > 1
1042 static void add_to_list_l_locked(struct thread_queue *tq,
1043 struct thread_entry *thread)
1045 struct thread_entry *t = LOCK_LIST(tq);
1046 ADD_TO_LIST_L_SELECT(t, tq, thread);
1047 UNLOCK_LIST(tq, t);
1048 (void)t;
1050 #else
1051 #define add_to_list_l_locked(tq, thread) \
1052 add_to_list_l(&(tq)->queue, (thread))
1053 #endif
1055 /*---------------------------------------------------------------------------
1056 * Removes a thread from a list of threads. Uses the "l" links.
1057 *---------------------------------------------------------------------------
1059 static void remove_from_list_l(struct thread_entry **list,
1060 struct thread_entry *thread)
1062 struct thread_entry *prev, *next;
1064 next = thread->l.next;
1066 if (thread == next)
1068 /* The only item */
1069 *list = NULL;
1070 return;
1073 if (thread == *list)
1075 /* List becomes next item */
1076 *list = next;
1079 prev = thread->l.prev;
1081 /* Fix links to jump over the removed entry. */
1082 prev->l.next = next;
1083 next->l.prev = prev;
1086 /*---------------------------------------------------------------------------
1087 * Locks a list, removes the thread entry and unlocks the list on multicore.
1088 * Defined as remove_from_list_l on single-core.
1089 *---------------------------------------------------------------------------
1091 #if NUM_CORES > 1
1092 static void remove_from_list_l_locked(struct thread_queue *tq,
1093 struct thread_entry *thread)
1095 struct thread_entry *t = LOCK_LIST(tq);
1096 REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
1097 UNLOCK_LIST(tq, t);
1098 (void)t;
1100 #else
1101 #define remove_from_list_l_locked(tq, thread) \
1102 remove_from_list_l(&(tq)->queue, (thread))
1103 #endif
1105 /*---------------------------------------------------------------------------
1106 * Add a thread from the core's timout list by linking the pointers in its
1107 * tmo structure.
1108 *---------------------------------------------------------------------------
1110 static void add_to_list_tmo(struct thread_entry *thread)
1112 /* Insert first */
1113 struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout;
1115 thread->tmo.prev = thread;
1116 thread->tmo.next = t;
1118 if (t != NULL)
1120 /* Fix second item's prev pointer to point to this thread */
1121 t->tmo.prev = thread;
1124 cores[IF_COP_CORE(thread->core)].timeout = thread;
1127 /*---------------------------------------------------------------------------
1128 * Remove a thread from the core's timout list by unlinking the pointers in
1129 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1130 * is cancelled.
1131 *---------------------------------------------------------------------------
1133 static void remove_from_list_tmo(struct thread_entry *thread)
1135 struct thread_entry *next = thread->tmo.next;
1136 struct thread_entry *prev;
1138 if (thread == cores[IF_COP_CORE(thread->core)].timeout)
1140 /* Next item becomes list head */
1141 cores[IF_COP_CORE(thread->core)].timeout = next;
1143 if (next != NULL)
1145 /* Fix new list head's prev to point to itself. */
1146 next->tmo.prev = next;
1149 thread->tmo.prev = NULL;
1150 return;
1153 prev = thread->tmo.prev;
1155 if (next != NULL)
1157 next->tmo.prev = prev;
1160 prev->tmo.next = next;
1161 thread->tmo.prev = NULL;
1164 /*---------------------------------------------------------------------------
1165 * Schedules a thread wakeup on the specified core. Threads will be made
1166 * ready to run when the next task switch occurs. Note that this does not
1167 * introduce an on-core delay since the soonest the next thread may run is
1168 * no sooner than that. Other cores and on-core interrupts may only ever
1169 * add to the list.
1170 *---------------------------------------------------------------------------
1172 static void core_schedule_wakeup(struct thread_entry *thread)
1174 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1175 const unsigned int core = IF_COP_CORE(thread->core);
1176 add_to_list_l_locked(&cores[core].waking, thread);
1177 #if NUM_CORES > 1
1178 if (core != CURRENT_CORE)
1180 core_wake(core);
1182 #endif
1183 set_irq_level(oldlevel);
1186 /*---------------------------------------------------------------------------
1187 * If the waking list was populated, move all threads on it onto the running
1188 * list so they may be run ASAP.
1189 *---------------------------------------------------------------------------
1191 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1193 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1194 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
1195 struct thread_entry *r = cores[IF_COP_CORE(core)].running;
1197 /* Tranfer all threads on waking list to running list in one
1198 swoop */
1199 if (r != NULL)
1201 /* Place waking threads at the end of the running list. */
1202 struct thread_entry *tmp;
1203 w->l.prev->l.next = r;
1204 r->l.prev->l.next = w;
1205 tmp = r->l.prev;
1206 r->l.prev = w->l.prev;
1207 w->l.prev = tmp;
1209 else
1211 /* Just transfer the list as-is */
1212 cores[IF_COP_CORE(core)].running = w;
1214 /* Just leave any timeout threads on the timeout list. If a timeout check
1215 * is due, they will be removed there. If they do a timeout again before
1216 * being removed, they will just stay on the list with a new expiration
1217 * tick. */
1219 /* Waking list is clear - NULL and unlock it */
1220 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
1221 set_irq_level(oldlevel);
1224 /*---------------------------------------------------------------------------
1225 * Check the core's timeout list when at least one thread is due to wake.
1226 * Filtering for the condition is done before making the call. Resets the
1227 * tick when the next check will occur.
1228 *---------------------------------------------------------------------------
1230 static void check_tmo_threads(void)
1232 const unsigned int core = CURRENT_CORE;
1233 const long tick = current_tick; /* snapshot the current tick */
1234 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1235 struct thread_entry *next = cores[core].timeout;
1237 /* If there are no processes waiting for a timeout, just keep the check
1238 tick from falling into the past. */
1239 if (next != NULL)
1241 /* Check sleeping threads. */
1242 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1246 /* Must make sure noone else is examining the state, wait until
1247 slot is no longer busy */
1248 struct thread_entry *curr = next;
1249 next = curr->tmo.next;
1251 unsigned state = GET_THREAD_STATE(curr);
1253 if (state < TIMEOUT_STATE_FIRST)
1255 /* Cleanup threads no longer on a timeout but still on the
1256 * list. */
1257 remove_from_list_tmo(curr);
1258 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1260 else if (TIME_BEFORE(tick, curr->tmo_tick))
1262 /* Timeout still pending - this will be the usual case */
1263 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1265 /* Earliest timeout found so far - move the next check up
1266 to its time */
1267 next_tmo_check = curr->tmo_tick;
1269 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1271 else
1273 /* Sleep timeout has been reached so bring the thread back to
1274 * life again. */
1275 if (state == STATE_BLOCKED_W_TMO)
1277 remove_from_list_l_locked(curr->bqp, curr);
1280 remove_from_list_tmo(curr);
1281 add_to_list_l(&cores[core].running, curr);
1282 UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING);
1285 /* Break the loop once we have walked through the list of all
1286 * sleeping processes or have removed them all. */
1288 while (next != NULL);
1290 set_irq_level(oldlevel);
1293 cores[core].next_tmo_check = next_tmo_check;
1296 /*---------------------------------------------------------------------------
1297 * Performs operations that must be done before blocking a thread but after
1298 * the state is saved - follows reverse of locking order. blk_ops.flags is
1299 * assumed to be nonzero.
1300 *---------------------------------------------------------------------------
1302 static inline void run_blocking_ops(
1303 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
1305 #if NUM_CORES > 1
1306 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
1307 const unsigned flags = ops->flags;
1309 if (flags == 0)
1310 return;
1312 if (flags & TBOP_SWITCH_CORE)
1314 core_switch_blk_op(core, thread);
1317 #if CONFIG_CORELOCK == SW_CORELOCK
1318 if (flags & TBOP_UNLOCK_LIST)
1320 UNLOCK_LIST(ops->list_p, NULL);
1323 if (flags & TBOP_UNLOCK_CORELOCK)
1325 corelock_unlock(ops->cl_p);
1328 if (flags & TBOP_UNLOCK_THREAD)
1330 UNLOCK_THREAD(ops->thread, 0);
1332 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1333 /* Write updated variable value into memory location */
1334 switch (flags & TBOP_VAR_TYPE_MASK)
1336 case TBOP_UNLOCK_LIST:
1337 UNLOCK_LIST(ops->list_p, ops->list_v);
1338 break;
1339 case TBOP_SET_VARi:
1340 *ops->var_ip = ops->var_iv;
1341 break;
1342 case TBOP_SET_VARu8:
1343 *ops->var_u8p = ops->var_u8v;
1344 break;
1346 #endif /* CONFIG_CORELOCK == */
1348 /* Unlock thread's slot */
1349 if (flags & TBOP_UNLOCK_CURRENT)
1351 UNLOCK_THREAD(thread, ops->state);
1354 /* Reset the IRQ level */
1355 if (flags & TBOP_IRQ_LEVEL)
1357 set_irq_level(ops->irq_level);
1360 ops->flags = 0;
1361 #else
1362 int level = cores[CURRENT_CORE].irq_level;
1363 if (level == STAY_IRQ_LEVEL)
1364 return;
1366 cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL;
1367 set_irq_level(level);
1368 #endif /* NUM_CORES */
1372 /*---------------------------------------------------------------------------
1373 * Runs any operations that may cause threads to be ready to run and then
1374 * sleeps the processor core until the next interrupt if none are.
1375 *---------------------------------------------------------------------------
1377 static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1379 for (;;)
1381 /* We want to do these ASAP as it may change the decision to sleep
1382 * the core or a core has woken because an interrupt occurred
1383 * and posted a message to a queue. */
1384 if (cores[IF_COP_CORE(core)].waking.queue != NULL)
1386 core_perform_wakeup(IF_COP(core));
1389 /* If there are threads on a timeout and the earliest wakeup is due,
1390 * check the list and wake any threads that need to start running
1391 * again. */
1392 if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
1394 check_tmo_threads();
1397 /* If there is a ready to run task, return its ID and keep core
1398 * awake. */
1399 if (cores[IF_COP_CORE(core)].running != NULL)
1401 return cores[IF_COP_CORE(core)].running;
1404 /* Enter sleep mode to reduce power usage - woken up on interrupt or
1405 * wakeup request from another core. May abort if the waking list
1406 * became populated (again). See beginning of this file for the
1407 * algorithm to atomically determine this. */
1408 core_sleep(IF_COP(core, ) &cores[IF_COP_CORE(core)].waking.queue);
1412 #ifdef RB_PROFILE
1413 void profile_thread(void)
1415 profstart(cores[CURRENT_CORE].running - threads);
1417 #endif
1419 /*---------------------------------------------------------------------------
1420 * Prepares a thread to block on an object's list and/or for a specified
1421 * duration - expects object and slot to be appropriately locked if needed.
1422 *---------------------------------------------------------------------------
1424 static inline void _block_thread_on_l(struct thread_queue *list,
1425 struct thread_entry *thread,
1426 unsigned state
1427 IF_SWCL(, const bool nolock))
1429 /* If inlined, unreachable branches will be pruned with no size penalty
1430 because constant params are used for state and nolock. */
1431 const unsigned int core = IF_COP_CORE(thread->core);
1433 /* Remove the thread from the list of running threads. */
1434 remove_from_list_l(&cores[core].running, thread);
1436 /* Add a timeout to the block if not infinite */
1437 switch (state)
1439 case STATE_BLOCKED:
1440 /* Put the thread into a new list of inactive threads. */
1441 #if CONFIG_CORELOCK == SW_CORELOCK
1442 if (nolock)
1444 thread->bqp = NULL; /* Indicate nolock list */
1445 thread->bqnlp = (struct thread_entry **)list;
1446 add_to_list_l((struct thread_entry **)list, thread);
1448 else
1449 #endif
1451 thread->bqp = list;
1452 add_to_list_l_locked(list, thread);
1454 break;
1455 case STATE_BLOCKED_W_TMO:
1456 /* Put the thread into a new list of inactive threads. */
1457 #if CONFIG_CORELOCK == SW_CORELOCK
1458 if (nolock)
1460 thread->bqp = NULL; /* Indicate nolock list */
1461 thread->bqnlp = (struct thread_entry **)list;
1462 add_to_list_l((struct thread_entry **)list, thread);
1464 else
1465 #endif
1467 thread->bqp = list;
1468 add_to_list_l_locked(list, thread);
1470 /* Fall-through */
1471 case STATE_SLEEPING:
1472 /* If this thread times out sooner than any other thread, update
1473 next_tmo_check to its timeout */
1474 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1476 cores[core].next_tmo_check = thread->tmo_tick;
1479 if (thread->tmo.prev == NULL)
1481 add_to_list_tmo(thread);
1483 /* else thread was never removed from list - just keep it there */
1484 break;
1487 #ifdef HAVE_PRIORITY_SCHEDULING
1488 /* Reset priorities */
1489 if (thread->priority == cores[core].highest_priority)
1490 cores[core].highest_priority = LOWEST_PRIORITY;
1491 #endif
1493 #if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
1494 /* Safe to set state now */
1495 thread->state = state;
1496 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1497 cores[core].blk_ops.state = state;
1498 #endif
1500 #if NUM_CORES > 1
1501 /* Delay slot unlock until task switch */
1502 cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
1503 #endif
1506 static inline void block_thread_on_l(
1507 struct thread_queue *list, struct thread_entry *thread, unsigned state)
1509 _block_thread_on_l(list, thread, state IF_SWCL(, false));
1512 static inline void block_thread_on_l_no_listlock(
1513 struct thread_entry **list, struct thread_entry *thread, unsigned state)
1515 _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
1518 /*---------------------------------------------------------------------------
1519 * Switch thread in round robin fashion for any given priority. Any thread
1520 * that removed itself from the running list first must specify itself in
1521 * the paramter.
1523 * INTERNAL: Intended for use by kernel and not for programs.
1524 *---------------------------------------------------------------------------
1526 void switch_thread(struct thread_entry *old)
1528 const unsigned int core = CURRENT_CORE;
1529 struct thread_entry *thread = cores[core].running;
1531 if (old == NULL)
1533 /* Move to next thread */
1534 old = thread;
1535 cores[core].running = old->l.next;
1537 /* else running list is already at next thread */
1539 #ifdef RB_PROFILE
1540 profile_thread_stopped(old - threads);
1541 #endif
1543 /* Begin task switching by saving our current context so that we can
1544 * restore the state of the current thread later to the point prior
1545 * to this call. */
1546 store_context(&old->context);
1548 /* Check if the current thread stack is overflown */
1549 if(((unsigned int *)old->stack)[0] != DEADBEEF)
1550 thread_stkov(old);
1552 /* Run any blocking operations requested before switching/sleeping */
1553 run_blocking_ops(IF_COP(core, old));
1555 /* Go through the list of sleeping task to check if we need to wake up
1556 * any of them due to timeout. Also puts core into sleep state until
1557 * there is at least one running process again. */
1558 thread = sleep_core(IF_COP(core));
1560 #ifdef HAVE_PRIORITY_SCHEDULING
1561 /* Select the new task based on priorities and the last time a process
1562 * got CPU time. */
1563 for (;;)
1565 int priority = MIN(thread->priority, thread->priority_x);
1567 if (priority < cores[core].highest_priority)
1568 cores[core].highest_priority = priority;
1570 if (priority == cores[core].highest_priority ||
1571 (current_tick - thread->last_run > priority * 8))
1573 cores[core].running = thread;
1574 break;
1577 thread = thread->l.next;
1580 /* Reset the value of thread's last running time to the current time. */
1581 thread->last_run = current_tick;
1582 #endif /* HAVE_PRIORITY_SCHEDULING */
1584 /* And finally give control to the next thread. */
1585 load_context(&thread->context);
1587 #ifdef RB_PROFILE
1588 profile_thread_started(thread - threads);
1589 #endif
1592 /*---------------------------------------------------------------------------
1593 * Removes the boost flag from a thread and unboosts the CPU if thread count
1594 * of boosted threads reaches zero. Requires thread slot to be locked first.
1595 *---------------------------------------------------------------------------
1597 static inline void unboost_thread(struct thread_entry *thread)
1599 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1600 if (thread->boosted != 0)
1602 thread->boosted = 0;
1603 if (--boosted_threads == 0)
1605 cpu_boost(false);
1608 #endif
1609 (void)thread;
1612 /*---------------------------------------------------------------------------
1613 * Sleeps a thread for a specified number of ticks and unboost the thread if
1614 * if it is boosted. If ticks is zero, it does not delay but instead switches
1615 * tasks.
1617 * INTERNAL: Intended for use by kernel and not for programs.
1618 *---------------------------------------------------------------------------
1620 void sleep_thread(int ticks)
1622 /* Get the entry for the current running thread. */
1623 struct thread_entry *current = cores[CURRENT_CORE].running;
1625 #if NUM_CORES > 1
1626 /* Lock thread slot */
1627 GET_THREAD_STATE(current);
1628 #endif
1630 /* Remove our boosted status if any */
1631 unboost_thread(current);
1633 /* Set our timeout, change lists, and finally switch threads.
1634 * Unlock during switch on mulicore. */
1635 current->tmo_tick = current_tick + ticks + 1;
1636 block_thread_on_l(NULL, current, STATE_SLEEPING);
1637 switch_thread(current);
1639 /* Our status should be STATE_RUNNING */
1640 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1641 "S:R->!*R", current);
1644 /*---------------------------------------------------------------------------
1645 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1646 * Caller with interrupt-accessible lists should disable interrupts first
1647 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1649 * INTERNAL: Intended for use by kernel objects and not for programs.
1650 *---------------------------------------------------------------------------
1652 IF_SWCL(static inline) void _block_thread(struct thread_queue *list
1653 IF_SWCL(, const bool nolock))
1655 /* Get the entry for the current running thread. */
1656 struct thread_entry *current = cores[CURRENT_CORE].running;
1658 /* Set the state to blocked and ask the scheduler to switch tasks,
1659 * this takes us off of the run queue until we are explicitly woken */
1661 #if NUM_CORES > 1
1662 /* Lock thread slot */
1663 GET_THREAD_STATE(current);
1664 #endif
1666 #if CONFIG_CORELOCK == SW_CORELOCK
1667 /* One branch optimized away during inlining */
1668 if (nolock)
1670 block_thread_on_l_no_listlock((struct thread_entry **)list,
1671 current, STATE_BLOCKED);
1673 else
1674 #endif
1676 block_thread_on_l(list, current, STATE_BLOCKED);
1679 switch_thread(current);
1681 /* Our status should be STATE_RUNNING */
1682 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1683 "B:R->!*R", current);
1686 #if CONFIG_CORELOCK == SW_CORELOCK
1687 /* Inline lock/nolock version of _block_thread into these functions */
1688 void block_thread(struct thread_queue *tq)
1690 _block_thread(tq, false);
1693 void block_thread_no_listlock(struct thread_entry **list)
1695 _block_thread((struct thread_queue *)list, true);
1697 #endif /* CONFIG_CORELOCK */
1699 /*---------------------------------------------------------------------------
1700 * Block a thread on a blocking queue for a specified time interval or until
1701 * explicitly woken - whichever happens first.
1702 * Caller with interrupt-accessible lists should disable interrupts first
1703 * and request that interrupt level be restored after switching out the
1704 * current thread.
1706 * INTERNAL: Intended for use by kernel objects and not for programs.
1707 *---------------------------------------------------------------------------
1709 void block_thread_w_tmo(struct thread_queue *list, int timeout)
1711 /* Get the entry for the current running thread. */
1712 struct thread_entry *current = cores[CURRENT_CORE].running;
1714 #if NUM_CORES > 1
1715 /* Lock thread slot */
1716 GET_THREAD_STATE(current);
1717 #endif
1719 /* A block with a timeout is a sleep situation, whatever we are waiting
1720 * for _may or may not_ happen, regardless of boost state, (user input
1721 * for instance), so this thread no longer needs to boost */
1722 unboost_thread(current);
1724 /* Set the state to blocked with the specified timeout */
1725 current->tmo_tick = current_tick + timeout;
1726 /* Set the list for explicit wakeup */
1727 block_thread_on_l(list, current, STATE_BLOCKED_W_TMO);
1729 /* Now force a task switch and block until we have been woken up
1730 * by another thread or timeout is reached - whichever happens first */
1731 switch_thread(current);
1733 /* Our status should be STATE_RUNNING */
1734 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1735 "T:R->!*R", current);
1738 /*---------------------------------------------------------------------------
1739 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
1740 * that called sleep().
1741 * Caller with interrupt-accessible lists should disable interrupts first.
1742 * This code should be considered a critical section by the caller.
1744 * INTERNAL: Intended for use by kernel objects and not for programs.
1745 *---------------------------------------------------------------------------
1747 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
1748 struct thread_queue *list IF_SWCL(, const bool nolock))
1750 struct thread_entry *t;
1751 struct thread_entry *thread;
1752 unsigned state;
1754 /* Wake up the last thread first. */
1755 #if CONFIG_CORELOCK == SW_CORELOCK
1756 /* One branch optimized away during inlining */
1757 if (nolock)
1759 t = list->queue;
1761 else
1762 #endif
1764 t = LOCK_LIST(list);
1767 /* Check if there is a blocked thread at all. */
1768 if (t == NULL)
1770 #if CONFIG_CORELOCK == SW_CORELOCK
1771 if (!nolock)
1772 #endif
1774 UNLOCK_LIST(list, NULL);
1776 return NULL;
1779 thread = t;
1781 #if NUM_CORES > 1
1782 #if CONFIG_CORELOCK == SW_CORELOCK
1783 if (nolock)
1785 /* Lock thread only, not list */
1786 state = GET_THREAD_STATE(thread);
1788 else
1789 #endif
1791 /* This locks in reverse order from other routines so a retry in the
1792 correct order may be needed */
1793 state = TRY_GET_THREAD_STATE(thread);
1794 if (state == STATE_BUSY)
1796 /* Unlock list and retry slot, then list */
1797 UNLOCK_LIST(list, t);
1798 state = GET_THREAD_STATE(thread);
1799 t = LOCK_LIST(list);
1800 /* Be sure thread still exists here - it couldn't have re-added
1801 itself if it was woken elsewhere because this function is
1802 serialized within the object that owns the list. */
1803 if (thread != t)
1805 /* Thread disappeared :( */
1806 UNLOCK_LIST(list, t);
1807 UNLOCK_THREAD(thread, state);
1808 return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
1812 #else /* NUM_CORES == 1 */
1813 state = GET_THREAD_STATE(thread);
1814 #endif /* NUM_CORES */
1816 /* Determine thread's current state. */
1817 switch (state)
1819 case STATE_BLOCKED:
1820 case STATE_BLOCKED_W_TMO:
1821 /* Remove thread from object's blocked list - select t or list depending
1822 on locking type at compile time */
1823 REMOVE_FROM_LIST_L_SELECT(t, list, thread);
1824 #if CONFIG_CORELOCK == SW_CORELOCK
1825 /* Statment optimized away during inlining if nolock != false */
1826 if (!nolock)
1827 #endif
1829 UNLOCK_LIST(list, t); /* Unlock list - removal complete */
1832 #ifdef HAVE_PRIORITY_SCHEDULING
1833 /* Give the task a kick to avoid a stall after wakeup.
1834 Not really proper treatment - TODO later. */
1835 thread->last_run = current_tick - 8*LOWEST_PRIORITY;
1836 #endif
1837 core_schedule_wakeup(thread);
1838 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
1839 return thread;
1840 default:
1841 /* Nothing to do. State is not blocked. */
1842 #if THREAD_EXTRA_CHECKS
1843 THREAD_PANICF("wakeup_thread->block invalid", thread);
1844 case STATE_RUNNING:
1845 case STATE_KILLED:
1846 #endif
1847 #if CONFIG_CORELOCK == SW_CORELOCK
1848 /* Statement optimized away during inlining if nolock != false */
1849 if (!nolock)
1850 #endif
1852 UNLOCK_LIST(list, t); /* Unlock the object's list */
1854 UNLOCK_THREAD(thread, state); /* Unlock thread slot */
1855 return NULL;
1859 #if CONFIG_CORELOCK == SW_CORELOCK
1860 /* Inline lock/nolock version of _wakeup_thread into these functions */
1861 struct thread_entry * wakeup_thread(struct thread_queue *tq)
1863 return _wakeup_thread(tq, false);
1866 struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
1868 return _wakeup_thread((struct thread_queue *)list, true);
1870 #endif /* CONFIG_CORELOCK */
1872 /*---------------------------------------------------------------------------
1873 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1874 * will be locked on multicore.
1875 *---------------------------------------------------------------------------
1877 static int find_empty_thread_slot(void)
1879 #if NUM_CORES > 1
1880 /* Any slot could be on an IRQ-accessible list */
1881 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1882 #endif
1883 /* Thread slots are not locked on single core */
1885 int n;
1887 for (n = 0; n < MAXTHREADS; n++)
1889 /* Obtain current slot state - lock it on multicore */
1890 unsigned state = GET_THREAD_STATE(&threads[n]);
1892 if (state == STATE_KILLED
1893 #if NUM_CORES > 1
1894 && threads[n].name != THREAD_DESTRUCT
1895 #endif
1898 /* Slot is empty - leave it locked and caller will unlock */
1899 break;
1902 /* Finished examining slot - no longer busy - unlock on multicore */
1903 UNLOCK_THREAD(&threads[n], state);
1906 #if NUM_CORES > 1
1907 set_irq_level(oldlevel); /* Reenable interrups - this slot is
1908 not accesible to them yet */
1909 #endif
1911 return n;
1915 /*---------------------------------------------------------------------------
1916 * Place the current core in idle mode - woken up on interrupt or wake
1917 * request from another core.
1918 *---------------------------------------------------------------------------
1920 void core_idle(void)
1922 const unsigned int core = CURRENT_CORE;
1923 core_sleep(IF_COP(core,) &cores[core].waking.queue);
1926 /*---------------------------------------------------------------------------
1927 * Create a thread
1928 * If using a dual core architecture, specify which core to start the thread
1929 * on, and whether to fall back to the other core if it can't be created
1930 * Return ID if context area could be allocated, else NULL.
1931 *---------------------------------------------------------------------------
1933 struct thread_entry*
1934 create_thread(void (*function)(void), void* stack, int stack_size,
1935 unsigned flags, const char *name
1936 IF_PRIO(, int priority)
1937 IF_COP(, unsigned int core))
1939 unsigned int i;
1940 unsigned int stacklen;
1941 unsigned int *stackptr;
1942 int slot;
1943 struct thread_entry *thread;
1944 unsigned state;
1946 slot = find_empty_thread_slot();
1947 if (slot >= MAXTHREADS)
1949 return NULL;
1952 /* Munge the stack to make it easy to spot stack overflows */
1953 stacklen = stack_size / sizeof(int);
1954 stackptr = stack;
1955 for(i = 0;i < stacklen;i++)
1957 stackptr[i] = DEADBEEF;
1960 /* Store interesting information */
1961 thread = &threads[slot];
1962 thread->name = name;
1963 thread->stack = stack;
1964 thread->stack_size = stack_size;
1965 thread->bqp = NULL;
1966 #if CONFIG_CORELOCK == SW_CORELOCK
1967 thread->bqnlp = NULL;
1968 #endif
1969 thread->queue = NULL;
1970 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1971 thread->boosted = 0;
1972 #endif
1973 #ifdef HAVE_PRIORITY_SCHEDULING
1974 thread->priority_x = LOWEST_PRIORITY;
1975 thread->priority = priority;
1976 thread->last_run = current_tick - priority * 8;
1977 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
1978 #endif
1980 #if NUM_CORES > 1
1981 thread->core = core;
1983 /* Writeback stack munging or anything else before starting */
1984 if (core != CURRENT_CORE)
1986 flush_icache();
1988 #endif
1990 /* Thread is not on any timeout list but be a bit paranoid */
1991 thread->tmo.prev = NULL;
1993 state = (flags & CREATE_THREAD_FROZEN) ?
1994 STATE_FROZEN : STATE_RUNNING;
1996 /* Align stack to an even 32 bit boundary */
1997 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
1999 /* Load the thread's context structure with needed startup information */
2000 THREAD_STARTUP_INIT(core, thread, function);
2002 if (state == STATE_RUNNING)
2004 #if NUM_CORES > 1
2005 if (core != CURRENT_CORE)
2007 /* Next task switch on other core moves thread to running list */
2008 core_schedule_wakeup(thread);
2010 else
2011 #endif
2013 /* Place on running list immediately */
2014 add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
2018 /* remove lock and set state */
2019 UNLOCK_THREAD_SET_STATE(thread, state);
2021 return thread;
2024 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2025 void trigger_cpu_boost(void)
2027 /* No IRQ disable nescessary since the current thread cannot be blocked
2028 on an IRQ-accessible list */
2029 struct thread_entry *current = cores[CURRENT_CORE].running;
2030 unsigned state;
2032 state = GET_THREAD_STATE(current);
2034 if (current->boosted == 0)
2036 current->boosted = 1;
2037 if (++boosted_threads == 1)
2039 cpu_boost(true);
2043 UNLOCK_THREAD(current, state);
2044 (void)state;
2046 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
2048 /*---------------------------------------------------------------------------
2049 * Remove a thread from the scheduler.
2050 * Parameter is the ID as returned from create_thread().
2052 * Use with care on threads that are not under careful control as this may
2053 * leave various objects in an undefined state. When trying to kill a thread
2054 * on another processor, be sure you know what it's doing and won't be
2055 * switching around itself.
2056 *---------------------------------------------------------------------------
2058 void remove_thread(struct thread_entry *thread)
2060 #if NUM_CORES > 1
2061 /* core is not constant here because of core switching */
2062 unsigned int core = CURRENT_CORE;
2063 unsigned int old_core = NUM_CORES;
2064 #else
2065 const unsigned int core = CURRENT_CORE;
2066 #endif
2067 unsigned state;
2068 int oldlevel;
2070 if (thread == NULL)
2071 thread = cores[core].running;
2073 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2074 state = GET_THREAD_STATE(thread);
2076 if (state == STATE_KILLED)
2078 goto thread_killed;
2081 #if NUM_CORES > 1
2082 if (thread->core != core)
2084 /* Switch cores and safely extract the thread there */
2085 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
2086 condition if the thread runs away to another processor. */
2087 unsigned int new_core = thread->core;
2088 const char *old_name = thread->name;
2090 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2091 UNLOCK_THREAD(thread, state);
2092 set_irq_level(oldlevel);
2094 old_core = switch_core(new_core);
2096 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2097 state = GET_THREAD_STATE(thread);
2099 core = new_core;
2101 if (state == STATE_KILLED)
2103 /* Thread suicided before we could kill it */
2104 goto thread_killed;
2107 /* Reopen slot - it's locked again anyway */
2108 thread->name = old_name;
2110 if (thread->core != core)
2112 /* We won't play thread tag - just forget it */
2113 UNLOCK_THREAD(thread, state);
2114 set_irq_level(oldlevel);
2115 goto thread_kill_abort;
2118 /* Perform the extraction and switch ourselves back to the original
2119 processor */
2121 #endif /* NUM_CORES > 1 */
2123 #ifdef HAVE_PRIORITY_SCHEDULING
2124 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2125 #endif
2126 if (thread->tmo.prev != NULL)
2128 /* Clean thread off the timeout list if a timeout check hasn't
2129 * run yet */
2130 remove_from_list_tmo(thread);
2133 if (thread == cores[core].running)
2135 /* Suicide - thread has unconditional rights to do this */
2136 /* Maintain locks until switch-out */
2137 #if NUM_CORES > 1
2138 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2139 cores[core].blk_ops.irq_level = oldlevel;
2140 #else
2141 cores[core].irq_level = oldlevel;
2142 #endif
2143 block_thread_on_l(NULL, thread, STATE_KILLED);
2145 #if NUM_CORES > 1
2146 /* Switch to the idle stack if not on the main core (where "main"
2147 * runs) */
2148 if (core != CPU)
2150 switch_to_idle_stack(core);
2153 flush_icache();
2154 #endif
2155 /* Signal this thread */
2156 thread_queue_wake_no_listlock(&thread->queue);
2157 /* Switch tasks and never return */
2158 switch_thread(thread);
2159 /* This should never and must never be reached - if it is, the
2160 * state is corrupted */
2161 THREAD_PANICF("remove_thread->K:*R", thread);
2164 #if NUM_CORES > 1
2165 if (thread->name == THREAD_DESTRUCT)
2167 /* Another core is doing this operation already */
2168 UNLOCK_THREAD(thread, state);
2169 set_irq_level(oldlevel);
2170 return;
2172 #endif
2173 if (cores[core].waking.queue != NULL)
2175 /* Get any threads off the waking list and onto the running
2176 * list first - waking and running cannot be distinguished by
2177 * state */
2178 core_perform_wakeup(IF_COP(core));
2181 switch (state)
2183 case STATE_RUNNING:
2184 /* Remove thread from ready to run tasks */
2185 remove_from_list_l(&cores[core].running, thread);
2186 break;
2187 case STATE_BLOCKED:
2188 case STATE_BLOCKED_W_TMO:
2189 /* Remove thread from the queue it's blocked on - including its
2190 * own if waiting there */
2191 #if CONFIG_CORELOCK == SW_CORELOCK
2192 /* One or the other will be valid */
2193 if (thread->bqp == NULL)
2195 remove_from_list_l(thread->bqnlp, thread);
2197 else
2198 #endif /* CONFIG_CORELOCK */
2200 remove_from_list_l_locked(thread->bqp, thread);
2202 break;
2203 /* Otherwise thread is killed or is frozen and hasn't run yet */
2206 /* If thread was waiting on itself, it will have been removed above.
2207 * The wrong order would result in waking the thread first and deadlocking
2208 * since the slot is already locked. */
2209 thread_queue_wake_no_listlock(&thread->queue);
2211 thread_killed: /* Thread was already killed */
2212 /* Removal complete - safe to unlock state and reenable interrupts */
2213 UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED);
2214 set_irq_level(oldlevel);
2216 #if NUM_CORES > 1
2217 thread_kill_abort: /* Something stopped us from killing the thread */
2218 if (old_core < NUM_CORES)
2220 /* Did a removal on another processor's thread - switch back to
2221 native core */
2222 switch_core(old_core);
2224 #endif
2227 /*---------------------------------------------------------------------------
2228 * Block the current thread until another thread terminates. A thread may
2229 * wait on itself to terminate which prevents it from running again and it
2230 * will need to be killed externally.
2231 * Parameter is the ID as returned from create_thread().
2232 *---------------------------------------------------------------------------
2234 void thread_wait(struct thread_entry *thread)
2236 const unsigned int core = CURRENT_CORE;
2237 struct thread_entry *current = cores[core].running;
2238 unsigned thread_state;
2239 #if NUM_CORES > 1
2240 int oldlevel;
2241 unsigned current_state;
2242 #endif
2244 if (thread == NULL)
2245 thread = current;
2247 #if NUM_CORES > 1
2248 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2249 #endif
2251 thread_state = GET_THREAD_STATE(thread);
2253 #if NUM_CORES > 1
2254 /* We can't lock the same slot twice. The waitee will also lock itself
2255 first then the thread slots that will be locked and woken in turn.
2256 The same order must be observed here as well. */
2257 if (thread == current)
2259 current_state = thread_state;
2261 else
2263 current_state = GET_THREAD_STATE(current);
2265 #endif
2267 if (thread_state != STATE_KILLED)
2269 #if NUM_CORES > 1
2270 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2271 cores[core].blk_ops.irq_level = oldlevel;
2272 #endif
2273 /* Unlock the waitee state at task switch - not done for self-wait
2274 because the would double-unlock the state and potentially
2275 corrupt another's busy assert on the slot */
2276 if (thread != current)
2278 #if CONFIG_CORELOCK == SW_CORELOCK
2279 cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
2280 cores[core].blk_ops.thread = thread;
2281 #elif CONFIG_CORELOCK == CORELOCK_SWAP
2282 cores[core].blk_ops.flags |= TBOP_SET_VARu8;
2283 cores[core].blk_ops.var_u8p = &thread->state;
2284 cores[core].blk_ops.var_u8v = thread_state;
2285 #endif
2287 block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED);
2288 switch_thread(current);
2289 return;
2292 /* Unlock both slots - obviously the current thread can't have
2293 STATE_KILLED so the above if clause will always catch a thread
2294 waiting on itself */
2295 #if NUM_CORES > 1
2296 UNLOCK_THREAD(current, current_state);
2297 UNLOCK_THREAD(thread, thread_state);
2298 set_irq_level(oldlevel);
2299 #endif
2302 #ifdef HAVE_PRIORITY_SCHEDULING
2303 /*---------------------------------------------------------------------------
2304 * Sets the thread's relative priority for the core it runs on.
2305 *---------------------------------------------------------------------------
2307 int thread_set_priority(struct thread_entry *thread, int priority)
2309 unsigned old_priority = (unsigned)-1;
2311 if (thread == NULL)
2312 thread = cores[CURRENT_CORE].running;
2314 #if NUM_CORES > 1
2315 /* Thread could be on any list and therefore on an interrupt accessible
2316 one - disable interrupts */
2317 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2318 #endif
2319 unsigned state = GET_THREAD_STATE(thread);
2321 /* Make sure it's not killed */
2322 if (state != STATE_KILLED)
2324 old_priority = thread->priority;
2325 thread->priority = priority;
2326 cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY;
2329 #if NUM_CORES > 1
2330 UNLOCK_THREAD(thread, state);
2331 set_irq_level(oldlevel);
2332 #endif
2333 return old_priority;
2336 /*---------------------------------------------------------------------------
2337 * Returns the current priority for a thread.
2338 *---------------------------------------------------------------------------
2340 int thread_get_priority(struct thread_entry *thread)
2342 /* Simple, quick probe. */
2343 if (thread == NULL)
2344 thread = cores[CURRENT_CORE].running;
2346 return (unsigned)thread->priority;
2349 /*---------------------------------------------------------------------------
2350 * Yield that guarantees thread execution once per round regardless of
2351 * thread's scheduler priority - basically a transient realtime boost
2352 * without altering the scheduler's thread precedence.
2354 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2355 *---------------------------------------------------------------------------
2357 void priority_yield(void)
2359 const unsigned int core = CURRENT_CORE;
2360 struct thread_entry *thread = cores[core].running;
2361 thread->priority_x = HIGHEST_PRIORITY;
2362 switch_thread(NULL);
2363 thread->priority_x = LOWEST_PRIORITY;
2364 cores[core].highest_priority = LOWEST_PRIORITY;
2366 #endif /* HAVE_PRIORITY_SCHEDULING */
2368 /* Resumes a frozen thread - similar logic to wakeup_thread except that
2369 the thread is on no scheduler list at all. It exists simply by virtue of
2370 the slot having a state of STATE_FROZEN. */
2371 void thread_thaw(struct thread_entry *thread)
2373 #if NUM_CORES > 1
2374 /* Thread could be on any list and therefore on an interrupt accessible
2375 one - disable interrupts */
2376 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2377 #endif
2378 unsigned state = GET_THREAD_STATE(thread);
2380 if (state == STATE_FROZEN)
2382 const unsigned int core = CURRENT_CORE;
2383 #if NUM_CORES > 1
2384 if (thread->core != core)
2386 core_schedule_wakeup(thread);
2388 else
2389 #endif
2391 add_to_list_l(&cores[core].running, thread);
2394 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
2395 #if NUM_CORES > 1
2396 set_irq_level(oldlevel);
2397 #endif
2398 return;
2401 #if NUM_CORES > 1
2402 UNLOCK_THREAD(thread, state);
2403 set_irq_level(oldlevel);
2404 #endif
2407 /*---------------------------------------------------------------------------
2408 * Return the ID of the currently executing thread.
2409 *---------------------------------------------------------------------------
2411 struct thread_entry * thread_get_current(void)
2413 return cores[CURRENT_CORE].running;
2416 #if NUM_CORES > 1
2417 /*---------------------------------------------------------------------------
2418 * Switch the processor that the currently executing thread runs on.
2419 *---------------------------------------------------------------------------
2421 unsigned int switch_core(unsigned int new_core)
2423 const unsigned int core = CURRENT_CORE;
2424 struct thread_entry *current = cores[core].running;
2425 struct thread_entry *w;
2426 int oldlevel;
2428 /* Interrupts can access the lists that will be used - disable them */
2429 unsigned state = GET_THREAD_STATE(current);
2431 if (core == new_core)
2433 /* No change - just unlock everything and return same core */
2434 UNLOCK_THREAD(current, state);
2435 return core;
2438 /* Get us off the running list for the current core */
2439 remove_from_list_l(&cores[core].running, current);
2441 /* Stash return value (old core) in a safe place */
2442 current->retval = core;
2444 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2445 * the other core will likely attempt a removal from the wrong list! */
2446 if (current->tmo.prev != NULL)
2448 remove_from_list_tmo(current);
2451 /* Change the core number for this thread slot */
2452 current->core = new_core;
2454 /* Do not use core_schedule_wakeup here since this will result in
2455 * the thread starting to run on the other core before being finished on
2456 * this one. Delay the wakeup list unlock to keep the other core stuck
2457 * until this thread is ready. */
2458 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2459 w = LOCK_LIST(&cores[new_core].waking);
2460 ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current);
2462 /* Make a callback into device-specific code, unlock the wakeup list so
2463 * that execution may resume on the new core, unlock our slot and finally
2464 * restore the interrupt level */
2465 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
2466 TBOP_UNLOCK_LIST | TBOP_IRQ_LEVEL;
2467 cores[core].blk_ops.irq_level = oldlevel;
2468 cores[core].blk_ops.list_p = &cores[new_core].waking;
2469 #if CONFIG_CORELOCK == CORELOCK_SWAP
2470 cores[core].blk_ops.state = STATE_RUNNING;
2471 cores[core].blk_ops.list_v = w;
2472 #endif
2474 #ifdef HAVE_PRIORITY_SCHEDULING
2475 current->priority_x = HIGHEST_PRIORITY;
2476 cores[core].highest_priority = LOWEST_PRIORITY;
2477 #endif
2478 /* Do the stack switching, cache_maintenence and switch_thread call -
2479 requires native code */
2480 switch_thread_core(core, current);
2482 #ifdef HAVE_PRIORITY_SCHEDULING
2483 current->priority_x = LOWEST_PRIORITY;
2484 cores[current->core].highest_priority = LOWEST_PRIORITY;
2485 #endif
2487 /* Finally return the old core to caller */
2488 return current->retval;
2489 (void)state;
2491 #endif /* NUM_CORES > 1 */
2493 /*---------------------------------------------------------------------------
2494 * Initialize threading API. This assumes interrupts are not yet enabled. On
2495 * multicore setups, no core is allowed to proceed until create_thread calls
2496 * are safe to perform.
2497 *---------------------------------------------------------------------------
2499 void init_threads(void)
2501 const unsigned int core = CURRENT_CORE;
2502 struct thread_entry *thread;
2503 int slot;
2505 /* CPU will initialize first and then sleep */
2506 slot = find_empty_thread_slot();
2508 if (slot >= MAXTHREADS)
2510 /* WTF? There really must be a slot available at this stage.
2511 * This can fail if, for example, .bss isn't zero'ed out by the loader
2512 * or threads is in the wrong section. */
2513 THREAD_PANICF("init_threads->no slot", NULL);
2516 /* Initialize initially non-zero members of core */
2517 thread_queue_init(&cores[core].waking);
2518 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2519 #if NUM_CORES == 1
2520 cores[core].irq_level = STAY_IRQ_LEVEL;
2521 #endif
2522 #ifdef HAVE_PRIORITY_SCHEDULING
2523 cores[core].highest_priority = LOWEST_PRIORITY;
2524 #endif
2526 /* Initialize initially non-zero members of slot */
2527 thread = &threads[slot];
2528 thread->name = main_thread_name;
2529 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); /* No sync worries yet */
2530 #if NUM_CORES > 1
2531 thread->core = core;
2532 #endif
2533 #ifdef HAVE_PRIORITY_SCHEDULING
2534 thread->priority = PRIORITY_USER_INTERFACE;
2535 thread->priority_x = LOWEST_PRIORITY;
2536 #endif
2537 #if CONFIG_CORELOCK == SW_CORELOCK
2538 corelock_init(&thread->cl);
2539 #endif
2541 add_to_list_l(&cores[core].running, thread);
2543 if (core == CPU)
2545 thread->stack = stackbegin;
2546 thread->stack_size = (int)stackend - (int)stackbegin;
2547 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2548 /* TODO: HAL interface for this */
2549 /* Wake up coprocessor and let it initialize kernel and threads */
2550 MBX_MSG_CLR = 0x3f;
2551 COP_CTL = PROC_WAKE;
2552 /* Sleep until finished */
2553 CPU_CTL = PROC_SLEEP;
2555 else
2557 /* Initial stack is the COP idle stack */
2558 thread->stack = cop_idlestackbegin;
2559 thread->stack_size = IDLE_STACK_SIZE;
2560 /* Get COP safely primed inside switch_thread where it will remain
2561 * until a thread actually exists on it */
2562 CPU_CTL = PROC_WAKE;
2563 remove_thread(NULL);
2564 #endif /* NUM_CORES */
2568 /*---------------------------------------------------------------------------
2569 * Returns the maximum percentage of stack a thread ever used while running.
2570 * NOTE: Some large buffer allocations that don't use enough the buffer to
2571 * overwrite stackptr[0] will not be seen.
2572 *---------------------------------------------------------------------------
2574 int thread_stack_usage(const struct thread_entry *thread)
2576 unsigned int *stackptr = thread->stack;
2577 int stack_words = thread->stack_size / sizeof (int);
2578 int i, usage = 0;
2580 for (i = 0; i < stack_words; i++)
2582 if (stackptr[i] != DEADBEEF)
2584 usage = ((stack_words - i) * 100) / stack_words;
2585 break;
2589 return usage;
2592 #if NUM_CORES > 1
2593 /*---------------------------------------------------------------------------
2594 * Returns the maximum percentage of the core's idle stack ever used during
2595 * runtime.
2596 *---------------------------------------------------------------------------
2598 int idle_stack_usage(unsigned int core)
2600 unsigned int *stackptr = idle_stacks[core];
2601 int i, usage = 0;
2603 for (i = 0; i < IDLE_STACK_WORDS; i++)
2605 if (stackptr[i] != DEADBEEF)
2607 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
2608 break;
2612 return usage;
2614 #endif
2616 /*---------------------------------------------------------------------------
2617 * Returns the current thread status. This is a snapshot for debugging and
2618 * does not do any slot synchronization so it could return STATE_BUSY.
2619 *---------------------------------------------------------------------------
2621 unsigned thread_get_status(const struct thread_entry *thread)
2623 return thread->state;
2626 /*---------------------------------------------------------------------------
2627 * Fills in the buffer with the specified thread's name. If the name is NULL,
2628 * empty, or the thread is in destruct state a formatted ID is written
2629 * instead.
2630 *---------------------------------------------------------------------------
2632 void thread_get_name(char *buffer, int size,
2633 struct thread_entry *thread)
2635 if (size <= 0)
2636 return;
2638 *buffer = '\0';
2640 if (thread)
2642 /* Display thread name if one or ID if none */
2643 const char *name = thread->name;
2644 const char *fmt = "%s";
2645 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2647 name = (const char *)thread;
2648 fmt = "%08lX";
2650 snprintf(buffer, size, fmt, name);