Don't check for external rbutil.ini anymore -- doing so didn't gave a real benefit...
[Rockbox.git] / firmware / thread.c
blobf9d8e014999294867562e81aa3904293ea237707
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
33 #ifdef DEBUG
34 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
35 #else
36 #define THREAD_EXTRA_CHECKS 0
37 #endif
39 /**
40 * General locking order to guarantee progress. Order must be observed but
41 * all stages are not nescessarily obligatory. Going from 1) to 3) is
42 * perfectly legal.
44 * 1) IRQ
45 * This is first because of the likelyhood of having an interrupt occur that
46 * also accesses one of the objects farther down the list. Any non-blocking
47 * synchronization done may already have a lock on something during normal
48 * execution and if an interrupt handler running on the same processor as
49 * the one that has the resource locked were to attempt to access the
50 * resource, the interrupt handler would wait forever waiting for an unlock
51 * that will never happen. There is no danger if the interrupt occurs on
52 * a different processor because the one that has the lock will eventually
53 * unlock and the other processor's handler may proceed at that time. Not
54 * nescessary when the resource in question is definitely not available to
55 * interrupt handlers.
57 * 2) Kernel Object
58 * 1) May be needed beforehand if the kernel object allows dual-use such as
59 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
66 * 3) Thread Slot
67 * This locks access to the thread's slot such that its state cannot be
68 * altered by another processor when a state change is in progress such as
69 * when it is in the process of going on a blocked list. An attempt to wake
70 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state.
73 * 4) Lists
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
82 * 5) Core Lists
83 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an
85 * operation may only be performed by the thread's own core in a normal
86 * execution context. The wakeup list is the prime example where a thread
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
91 #define DEADBEEF ((unsigned int)0xdeadbeef)
92 /* Cast to the the machine int type, whose size could be < 4. */
93 struct core_entry cores[NUM_CORES] IBSS_ATTR;
94 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
95 #ifdef HAVE_SCHEDULER_BOOSTCTRL
96 static int boosted_threads IBSS_ATTR;
97 #endif
99 static const char main_thread_name[] = "main";
100 extern int stackbegin[];
101 extern int stackend[];
103 /* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
104 * never results in requiring a wait until the next tick (up to 10000uS!). Likely
105 * requires assembly and careful instruction ordering. Multicore requires
106 * carefully timed sections in order to have synchronization without locking of
107 * any sort.
109 * 1) Disable all interrupts (FIQ and IRQ for ARM for instance)
110 * 2) Check *waking == NULL.
111 * 3) *waking not NULL? Goto step 7.
112 * 4) On multicore, stay awake if directed to do so by another. If so, goto step 7.
113 * 5) If processor requires, atomically reenable interrupts and perform step 6.
114 * 6) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
115 * goto step 8.
116 * 7) Reenable interrupts.
117 * 8) Exit procedure.
119 static inline void core_sleep(
120 IF_COP(unsigned int core,) struct thread_entry **waking)
121 __attribute__((always_inline));
123 static void check_tmo_threads(void)
124 __attribute__((noinline));
126 static inline void block_thread_on_l(
127 struct thread_queue *list, struct thread_entry *thread, unsigned state)
128 __attribute__((always_inline));
130 static inline void block_thread_on_l_no_listlock(
131 struct thread_entry **list, struct thread_entry *thread, unsigned state)
132 __attribute__((always_inline));
134 static inline void _block_thread_on_l(
135 struct thread_queue *list, struct thread_entry *thread,
136 unsigned state IF_SWCL(, const bool single))
137 __attribute__((always_inline));
139 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
140 struct thread_queue *list IF_SWCL(, const bool nolock))
141 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
143 IF_SWCL(static inline) void _block_thread(
144 struct thread_queue *list IF_SWCL(, const bool nolock))
145 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
147 static void add_to_list_tmo(struct thread_entry *thread)
148 __attribute__((noinline));
150 static void core_schedule_wakeup(struct thread_entry *thread)
151 __attribute__((noinline));
153 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
154 __attribute__((always_inline));
156 static inline void run_blocking_ops(
157 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
158 __attribute__((always_inline));
160 static void thread_stkov(struct thread_entry *thread)
161 __attribute__((noinline));
163 static inline void store_context(void* addr)
164 __attribute__((always_inline));
166 static inline void load_context(const void* addr)
167 __attribute__((always_inline));
169 void switch_thread(struct thread_entry *old)
170 __attribute__((noinline));
173 /****************************************************************************
174 * Processor-specific section
177 #if defined(CPU_ARM)
178 /*---------------------------------------------------------------------------
179 * Start the thread running and terminate it if it returns
180 *---------------------------------------------------------------------------
182 static void start_thread(void) __attribute__((naked,used));
183 static void start_thread(void)
185 /* r0 = context */
186 asm volatile (
187 "ldr sp, [r0, #32] \n" /* Load initial sp */
188 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
189 "mov r1, #0 \n" /* Mark thread as running */
190 "str r1, [r0, #40] \n"
191 #if NUM_CORES > 1
192 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
193 "mov lr, pc \n" /* This could be the first entry into */
194 "bx r0 \n" /* plugin or codec code for this core. */
195 #endif
196 "mov lr, pc \n" /* Call thread function */
197 "bx r4 \n"
198 "mov r0, #0 \n" /* remove_thread(NULL) */
199 "ldr pc, =remove_thread \n"
200 ".ltorg \n" /* Dump constant pool */
201 ); /* No clobber list - new thread doesn't care */
204 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
205 * slot, and thread function pointer in context.start. See load_context for
206 * what happens when thread is initially going to run. */
207 #define THREAD_STARTUP_INIT(core, thread, function) \
208 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
209 (thread)->context.r[1] = (unsigned int)start_thread, \
210 (thread)->context.start = (void *)function; })
212 /*---------------------------------------------------------------------------
213 * Store non-volatile context.
214 *---------------------------------------------------------------------------
216 static inline void store_context(void* addr)
218 asm volatile(
219 "stmia %0, { r4-r11, sp, lr } \n"
220 : : "r" (addr)
224 /*---------------------------------------------------------------------------
225 * Load non-volatile context.
226 *---------------------------------------------------------------------------
228 static inline void load_context(const void* addr)
230 asm volatile(
231 "ldr r0, [%0, #40] \n" /* Load start pointer */
232 "cmp r0, #0 \n" /* Check for NULL */
233 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
234 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
235 : : "r" (addr) : "r0" /* only! */
239 #if defined (CPU_PP)
241 #if NUM_CORES > 1
242 extern int cpu_idlestackbegin[];
243 extern int cpu_idlestackend[];
244 extern int cop_idlestackbegin[];
245 extern int cop_idlestackend[];
246 static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
248 [CPU] = cpu_idlestackbegin,
249 [COP] = cop_idlestackbegin
251 #endif /* NUM_CORES */
253 #if CONFIG_CORELOCK == SW_CORELOCK
254 /* Software core locks using Peterson's mutual exclusion algorithm */
256 /*---------------------------------------------------------------------------
257 * Initialize the corelock structure.
258 *---------------------------------------------------------------------------
260 void corelock_init(struct corelock *cl)
262 memset(cl, 0, sizeof (*cl));
265 #if 1 /* Assembly locks to minimize overhead */
266 /*---------------------------------------------------------------------------
267 * Wait for the corelock to become free and acquire it when it does.
268 *---------------------------------------------------------------------------
270 void corelock_lock(struct corelock *cl) __attribute__((naked));
271 void corelock_lock(struct corelock *cl)
273 asm volatile (
274 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
275 "ldrb r1, [r1] \n"
276 "mov r3, #1 \n" /* cl->myl[core] = 1 */
277 "strb r3, [r0, r1, lsr #7] \n"
278 "and r2, r1, #1 \n" /* r2 = othercore */
279 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
280 "1: \n"
281 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 1 ? */
282 "cmp r3, #1 \n"
283 "ldreqb r3, [r0, #2] \n" /* && cl->turn == othercore ? */
284 "cmpeq r3, r2 \n"
285 "bxne lr \n" /* no? lock acquired */
286 "b 1b \n" /* keep trying */
287 : : "i"(&PROCESSOR_ID)
289 (void)cl;
292 /*---------------------------------------------------------------------------
293 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
294 *---------------------------------------------------------------------------
296 int corelock_try_lock(struct corelock *cl) __attribute__((naked));
297 int corelock_try_lock(struct corelock *cl)
299 asm volatile (
300 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
301 "ldrb r1, [r1] \n"
302 "mov r3, #1 \n" /* cl->myl[core] = 1 */
303 "strb r3, [r0, r1, lsr #7] \n"
304 "and r2, r1, #1 \n" /* r2 = othercore */
305 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
306 "1: \n"
307 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 1 ? */
308 "cmp r3, #1 \n"
309 "ldreqb r3, [r0, #2] \n" /* && cl->turn == othercore? */
310 "cmpeq r3, r2 \n"
311 "movne r0, #1 \n" /* no? lock acquired */
312 "bxne lr \n"
313 "mov r2, #0 \n" /* cl->myl[core] = 0 */
314 "strb r2, [r0, r1, lsr #7] \n"
315 "mov r0, r2 \n"
316 "bx lr \n" /* acquisition failed */
317 : : "i"(&PROCESSOR_ID)
320 return 0;
321 (void)cl;
324 /*---------------------------------------------------------------------------
325 * Release ownership of the corelock
326 *---------------------------------------------------------------------------
328 void corelock_unlock(struct corelock *cl) __attribute__((naked));
329 void corelock_unlock(struct corelock *cl)
331 asm volatile (
332 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
333 "ldrb r1, [r1] \n"
334 "mov r2, #0 \n" /* cl->myl[core] = 0 */
335 "strb r2, [r0, r1, lsr #7] \n"
336 "bx lr \n"
337 : : "i"(&PROCESSOR_ID)
339 (void)cl;
341 #else /* C versions for reference */
342 /*---------------------------------------------------------------------------
343 * Wait for the corelock to become free and aquire it when it does.
344 *---------------------------------------------------------------------------
346 void corelock_lock(struct corelock *cl)
348 const unsigned int core = CURRENT_CORE;
349 const unsigned int othercore = 1 - core;
351 cl->myl[core] = 1;
352 cl->turn = othercore;
354 while (cl->myl[othercore] == 1 && cl->turn == othercore);
357 /*---------------------------------------------------------------------------
358 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
359 *---------------------------------------------------------------------------
361 int corelock_try_lock(struct corelock *cl)
363 const unsigned int core = CURRENT_CORE;
364 const unsigned int othercore = 1 - core;
366 cl->myl[core] = 1;
367 cl->turn = othercore;
369 if (cl->myl[othercore] == 1 && cl->turn == othercore)
371 cl->myl[core] = 0;
372 return 0;
375 return 1;
378 /*---------------------------------------------------------------------------
379 * Release ownership of the corelock
380 *---------------------------------------------------------------------------
382 void corelock_unlock(struct corelock *cl)
384 cl->myl[CURRENT_CORE] = 0;
386 #endif /* ASM / C selection */
388 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
390 #ifdef CPU_PP502x
391 /* Some code relies on timing */
392 void switch_thread(struct thread_entry *old) ICODE_ATTR;
393 void core_wake(IF_COP_VOID(unsigned int othercore)) ICODE_ATTR;
394 void core_idle(void) ICODE_ATTR;
395 #endif
397 /*---------------------------------------------------------------------------
398 * Put core in a power-saving state if waking list wasn't repopulated and if
399 * no other core requested a wakeup for it to perform a task.
400 *---------------------------------------------------------------------------
402 static inline void core_sleep(IF_COP(unsigned int core,) struct thread_entry **waking)
404 #if NUM_CORES > 1
405 #ifdef CPU_PP502x
406 /* Disabling IRQ and FIQ is important to making the fixed-time sequence
407 * non-interruptable */
408 asm volatile (
409 "mrs r2, cpsr \n" /* Disable IRQ, FIQ */
410 "orr r2, r2, #0xc0 \n"
411 "msr cpsr_c, r2 \n"
412 "ldr r0, [%[w]] \n" /* Check *waking */
413 "cmp r0, #0 \n" /* != NULL -> exit */
414 "bne 1f \n"
415 /* ------ fixed-time sequence ----- */ /* Can this be relied upon? */
416 "ldr r0, [%[ms], %[oc], lsl #2] \n" /* Stay-awake requested? */
417 "mov r1, #0x80000000 \n"
418 "tst r0, #1 \n"
419 "streq r1, [%[ct], %[c], lsl #2] \n" /* Sleep if not */
420 "nop \n"
421 "mov r0, #0 \n"
422 "str r0, [%[ct], %[c], lsl #2] \n" /* Clear control reg */
423 /* -------------------------------- */
424 "1: \n"
425 "mov r0, #1 \n"
426 "add r1, %[ms], #8 \n"
427 "str r0, [r1, %[oc], lsl #2] \n" /* Clear mailbox */
428 "bic r2, r2, #0xc0 \n" /* Enable interrupts */
429 "msr cpsr_c, r2 \n"
431 : [ct]"r"(&PROC_CTL(CPU)), [ms]"r"(&PROC_MESSAGE(CPU)),
432 [c]"r" (core), [oc]"r"(1-core), [w]"r"(waking)
433 : "r0", "r1", "r2");
434 #else
435 /* TODO: PP5002 */
436 #endif /* CONFIG_CPU == */
437 #else
438 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
439 if (*waking == NULL)
441 PROC_CTL(IF_COP_CORE(core)) = PROC_SLEEP;
443 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
444 #endif /* NUM_CORES */
447 /*---------------------------------------------------------------------------
448 * Wake another processor core that is sleeping or prevent it from doing so
449 * if it was already destined. FIQ, IRQ should be disabled before calling.
450 *---------------------------------------------------------------------------
452 void core_wake(IF_COP_VOID(unsigned int othercore))
454 #if NUM_CORES == 1
455 /* No wakey - core already wakey */
456 #elif defined (CPU_PP502x)
457 /* avoid r0 since that contains othercore */
458 asm volatile (
459 "mrs r2, cpsr \n"
460 "orr r1, r2, #0xc0 \n"
461 "msr cpsr_c, r1 \n"
462 "mov r1, #1 \n"
463 /* ------ fixed-time sequence ----- */ /* Can this be relied upon? */
464 "str r1, [%[ms], %[oc], lsl #2] \n" /* Send stay-awake message */
465 "nop \n"
466 "nop \n"
467 "ldr r1, [%[ct], %[oc], lsl #2] \n" /* Wake other core if asleep */
468 "tst r1, #0x80000000 \n"
469 "bic r1, r1, #0x80000000 \n"
470 "strne r1, [%[ct], %[oc], lsl #2] \n"
471 /* -------------------------------- */
472 "msr cpsr_c, r2 \n"
474 : [ct]"r"(&PROC_CTL(CPU)), [ms]"r"(&PROC_MESSAGE(CPU)),
475 [oc]"r" (othercore)
476 : "r1", "r2");
477 #else
478 PROC_CTL(othercore) = PROC_WAKE;
479 #endif
482 #if NUM_CORES > 1
483 /*---------------------------------------------------------------------------
484 * Switches to a stack that always resides in the Rockbox core.
486 * Needed when a thread suicides on a core other than the main CPU since the
487 * stack used when idling is the stack of the last thread to run. This stack
488 * may not reside in the core in which case the core will continue to use a
489 * stack from an unloaded module until another thread runs on it.
490 *---------------------------------------------------------------------------
492 static inline void switch_to_idle_stack(const unsigned int core)
494 asm volatile (
495 "str sp, [%0] \n" /* save original stack pointer on idle stack */
496 "mov sp, %0 \n" /* switch stacks */
497 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
498 (void)core;
501 /*---------------------------------------------------------------------------
502 * Perform core switch steps that need to take place inside switch_thread.
504 * These steps must take place while before changing the processor and after
505 * having entered switch_thread since switch_thread may not do a normal return
506 * because the stack being used for anything the compiler saved will not belong
507 * to the thread's destination core and it may have been recycled for other
508 * purposes by the time a normal context load has taken place. switch_thread
509 * will also clobber anything stashed in the thread's context or stored in the
510 * nonvolatile registers if it is saved there before the call since the
511 * compiler's order of operations cannot be known for certain.
513 static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
515 /* Flush our data to ram */
516 flush_icache();
517 /* Stash thread in r4 slot */
518 thread->context.r[0] = (unsigned int)thread;
519 /* Stash restart address in r5 slot */
520 thread->context.r[1] = (unsigned int)thread->context.start;
521 /* Save sp in context.sp while still running on old core */
522 thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1];
525 /*---------------------------------------------------------------------------
526 * Machine-specific helper function for switching the processor a thread is
527 * running on. Basically, the thread suicides on the departing core and is
528 * reborn on the destination. Were it not for gcc's ill-behavior regarding
529 * naked functions written in C where it actually clobbers non-volatile
530 * registers before the intended prologue code, this would all be much
531 * simpler. Generic setup is done in switch_core itself.
534 /*---------------------------------------------------------------------------
535 * This actually performs the core switch.
537 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
538 __attribute__((naked));
539 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
541 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
542 * Stack access also isn't permitted until restoring the original stack and
543 * context. */
544 asm volatile (
545 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
546 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
547 "ldr r2, [r2, r0, lsl #2] \n"
548 "add r2, r2, %0*4 \n"
549 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
550 "mov sp, r2 \n" /* switch stacks */
551 "adr r2, 1f \n" /* r2 = new core restart address */
552 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
553 "mov r0, r1 \n" /* switch_thread(thread) */
554 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
555 "1: \n"
556 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
557 "mov r1, #0 \n" /* Clear start address */
558 "str r1, [r0, #40] \n"
559 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
560 "mov lr, pc \n"
561 "bx r0 \n"
562 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
563 ".ltorg \n" /* Dump constant pool */
564 : : "i"(IDLE_STACK_WORDS)
566 (void)core; (void)thread;
568 #endif /* NUM_CORES */
570 #elif CONFIG_CPU == S3C2440
572 /*---------------------------------------------------------------------------
573 * Put core in a power-saving state if waking list wasn't repopulated.
574 *---------------------------------------------------------------------------
576 static inline void core_sleep(struct thread_entry **waking)
578 /* FIQ also changes the CLKCON register so FIQ must be disabled
579 when changing it here */
580 asm volatile (
581 "mrs r0, cpsr \n" /* Disable IRQ, FIQ */
582 "orr r0, r0, #0xc0 \n"
583 "msr cpsr_c, r0 \n"
584 "ldr r1, [%0] \n" /* Check *waking */
585 "cmp r1, #0 \n"
586 "bne 2f \n" /* != NULL -> exit */
587 "bic r0, r0, #0xc0 \n" /* Prepare IRQ, FIQ enable */
588 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
589 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
590 "orr r2, r2, #4 \n"
591 "str r2, [r1, #0xc] \n"
592 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
593 "mov r3, #0 \n" /* wait for IDLE */
594 "1: \n"
595 "add r3, r3, #1 \n"
596 "cmp r3, #10 \n"
597 "bne 1b \n"
598 "orr r0, r0, #0xc0 \n" /* Disable IRQ, FIQ */
599 "msr cpsr_c, r0 \n"
600 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
601 "bic r2, r2, #4 \n"
602 "str r2, [r1, #0xc] \n"
603 "2: \n"
604 "bic r0, r0, #0xc0 \n" /* Enable IRQ, FIQ */
605 "msr cpsr_c, r0 \n"
606 : : "r"(waking) : "r0", "r1", "r2", "r3");
608 #else
609 static inline void core_sleep(struct thread_entry **waking)
611 (void) waking;
612 #warning core_sleep not implemented, battery life will be decreased
614 #endif /* CONFIG_CPU == */
616 #elif defined(CPU_COLDFIRE)
617 /*---------------------------------------------------------------------------
618 * Start the thread running and terminate it if it returns
619 *---------------------------------------------------------------------------
621 void start_thread(void); /* Provide C access to ASM label */
622 static void __start_thread(void) __attribute__((used));
623 static void __start_thread(void)
625 /* a0=macsr, a1=context */
626 asm volatile (
627 "start_thread: \n" /* Start here - no naked attribute */
628 "move.l %a0, %macsr \n" /* Set initial mac status reg */
629 "lea.l 48(%a1), %a1 \n"
630 "move.l (%a1)+, %sp \n" /* Set initial stack */
631 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
632 "clr.l (%a1) \n" /* Mark thread running */
633 "jsr (%a2) \n" /* Call thread function */
634 "clr.l -(%sp) \n" /* remove_thread(NULL) */
635 "jsr remove_thread \n"
639 /* Set EMAC unit to fractional mode with saturation for each new thread,
640 * since that's what'll be the most useful for most things which the dsp
641 * will do. Codecs should still initialize their preferred modes
642 * explicitly. Context pointer is placed in d2 slot and start_thread
643 * pointer in d3 slot. thread function pointer is placed in context.start.
644 * See load_context for what happens when thread is initially going to
645 * run.
647 #define THREAD_STARTUP_INIT(core, thread, function) \
648 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
649 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
650 (thread)->context.d[1] = (unsigned int)start_thread, \
651 (thread)->context.start = (void *)(function); })
653 /*---------------------------------------------------------------------------
654 * Store non-volatile context.
655 *---------------------------------------------------------------------------
657 static inline void store_context(void* addr)
659 asm volatile (
660 "move.l %%macsr,%%d0 \n"
661 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
662 : : "a" (addr) : "d0" /* only! */
666 /*---------------------------------------------------------------------------
667 * Load non-volatile context.
668 *---------------------------------------------------------------------------
670 static inline void load_context(const void* addr)
672 asm volatile (
673 "move.l 52(%0), %%d0 \n" /* Get start address */
674 "beq.b 1f \n" /* NULL -> already running */
675 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
676 "jmp (%%a2) \n" /* Start the thread */
677 "1: \n"
678 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
679 "move.l %%d0, %%macsr \n"
680 : : "a" (addr) : "d0" /* only! */
684 /*---------------------------------------------------------------------------
685 * Put core in a power-saving state if waking list wasn't repopulated.
686 *---------------------------------------------------------------------------
688 static inline void core_sleep(struct thread_entry **waking)
690 asm volatile (
691 "moveq.l %1, %%d0 \n" /* Disable interrupts (not audio DMA) */
692 "lsl.l #8, %%d0 \n"
693 "move.w %%d0, %%sr \n"
694 "tst.l (%0) \n" /* Check *waking */
695 "beq.b 1f \n" /* != NULL -> exit */
696 "moveq.l #0x20, %%d0 \n" /* Enable interrupts */
697 "lsl.l #8, %%d0 \n"
698 "move.w %%d0, %%sr \n"
699 ".word 0x51fb \n" /* tpf.l - eat stop instruction */
700 "1: \n"
701 "stop #0x2000 \n" /* Supervisor mode, interrupts enabled
702 upon wakeup */
703 : : "a"(waking), "i"((0x2000 | HIGHEST_IRQ_LEVEL) >> 8) : "d0"
707 #elif CONFIG_CPU == SH7034
708 /*---------------------------------------------------------------------------
709 * Start the thread running and terminate it if it returns
710 *---------------------------------------------------------------------------
712 void start_thread(void); /* Provide C access to ASM label */
713 static void __start_thread(void) __attribute__((used));
714 static void __start_thread(void)
716 /* r8 = context */
717 asm volatile (
718 "_start_thread: \n" /* Start here - no naked attribute */
719 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
720 "mov.l @(28, r8), r15 \n" /* Set initial sp */
721 "mov #0, r1 \n" /* Start the thread */
722 "jsr @r0 \n"
723 "mov.l r1, @(36, r8) \n" /* Clear start address */
724 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
725 "jmp @r0 \n"
726 "mov #0, r4 \n"
727 "1: \n"
728 ".long _remove_thread \n"
732 /* Place context pointer in r8 slot, function pointer in r9 slot, and
733 * start_thread pointer in context_start */
734 #define THREAD_STARTUP_INIT(core, thread, function) \
735 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
736 (thread)->context.r[1] = (unsigned int)(function), \
737 (thread)->context.start = (void*)start_thread; })
739 /*---------------------------------------------------------------------------
740 * Store non-volatile context.
741 *---------------------------------------------------------------------------
743 static inline void store_context(void* addr)
745 asm volatile (
746 "add #36, %0 \n" /* Start at last reg. By the time routine */
747 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
748 "mov.l r15,@-%0 \n"
749 "mov.l r14,@-%0 \n"
750 "mov.l r13,@-%0 \n"
751 "mov.l r12,@-%0 \n"
752 "mov.l r11,@-%0 \n"
753 "mov.l r10,@-%0 \n"
754 "mov.l r9, @-%0 \n"
755 "mov.l r8, @-%0 \n"
756 : : "r" (addr)
760 /*---------------------------------------------------------------------------
761 * Load non-volatile context.
762 *---------------------------------------------------------------------------
764 static inline void load_context(const void* addr)
766 asm volatile (
767 "mov.l @(36, %0), r0 \n" /* Get start address */
768 "tst r0, r0 \n"
769 "bt .running \n" /* NULL -> already running */
770 "jmp @r0 \n" /* r8 = context */
771 ".running: \n"
772 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
773 "mov.l @%0+, r9 \n"
774 "mov.l @%0+, r10 \n"
775 "mov.l @%0+, r11 \n"
776 "mov.l @%0+, r12 \n"
777 "mov.l @%0+, r13 \n"
778 "mov.l @%0+, r14 \n"
779 "mov.l @%0+, r15 \n"
780 "lds.l @%0+, pr \n"
781 : : "r" (addr) : "r0" /* only! */
785 /*---------------------------------------------------------------------------
786 * Put core in a power-saving state if waking list wasn't repopulated.
787 *---------------------------------------------------------------------------
789 static inline void core_sleep(struct thread_entry **waking)
791 asm volatile (
792 "mov %2, r1 \n" /* Disable interrupts */
793 "ldc r1, sr \n"
794 "mov.l @%1, r1 \n" /* Check *waking */
795 "tst r1, r1 \n"
796 "bf 1f \n" /* *waking != NULL ? exit */
797 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
798 "mov #0, r1 \n" /* Enable interrupts */
799 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
800 "bra 2f \n" /* bra and sleep are executed at once */
801 "sleep \n" /* Execute standby */
802 "1: \n"
803 "mov #0, r1 \n" /* Enable interrupts */
804 "ldc r1, sr \n"
805 "2: \n"
807 : "z"(&SBYCR-GBR), "r"(waking), "i"(HIGHEST_IRQ_LEVEL)
808 : "r1");
811 #endif /* CONFIG_CPU == */
814 * End Processor-specific section
815 ***************************************************************************/
817 #if THREAD_EXTRA_CHECKS
818 static void thread_panicf(const char *msg, struct thread_entry *thread)
820 #if NUM_CORES > 1
821 const unsigned int core = thread->core;
822 #endif
823 static char name[32];
824 thread_get_name(name, 32, thread);
825 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
827 static void thread_stkov(struct thread_entry *thread)
829 thread_panicf("Stkov", thread);
831 #define THREAD_PANICF(msg, thread) \
832 thread_panicf(msg, thread)
833 #define THREAD_ASSERT(exp, msg, thread) \
834 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
835 #else
836 static void thread_stkov(struct thread_entry *thread)
838 #if NUM_CORES > 1
839 const unsigned int core = thread->core;
840 #endif
841 static char name[32];
842 thread_get_name(name, 32, thread);
843 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
845 #define THREAD_PANICF(msg, thread)
846 #define THREAD_ASSERT(exp, msg, thread)
847 #endif /* THREAD_EXTRA_CHECKS */
849 /*---------------------------------------------------------------------------
850 * Lock a list pointer and returns its value
851 *---------------------------------------------------------------------------
853 #if CONFIG_CORELOCK == SW_CORELOCK
854 /* Separate locking function versions */
856 /* Thread locking */
857 #define GET_THREAD_STATE(thread) \
858 ({ corelock_lock(&(thread)->cl); (thread)->state; })
859 #define TRY_GET_THREAD_STATE(thread) \
860 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
861 #define UNLOCK_THREAD(thread, state) \
862 ({ corelock_unlock(&(thread)->cl); })
863 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
864 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
866 /* List locking */
867 #define LOCK_LIST(tqp) \
868 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
869 #define UNLOCK_LIST(tqp, mod) \
870 ({ corelock_unlock(&(tqp)->cl); })
871 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
872 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
874 /* Select the queue pointer directly */
875 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
876 ({ add_to_list_l(&(tqp)->queue, (thread)); })
877 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
878 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
880 #elif CONFIG_CORELOCK == CORELOCK_SWAP
881 /* Native swap/exchange versions */
883 /* Thread locking */
884 #define GET_THREAD_STATE(thread) \
885 ({ unsigned _s; \
886 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
887 _s; })
888 #define TRY_GET_THREAD_STATE(thread) \
889 ({ xchg8(&(thread)->state, STATE_BUSY); })
890 #define UNLOCK_THREAD(thread, _state) \
891 ({ (thread)->state = (_state); })
892 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
893 ({ (thread)->state = (_state); })
895 /* List locking */
896 #define LOCK_LIST(tqp) \
897 ({ struct thread_entry *_l; \
898 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
899 _l; })
900 #define UNLOCK_LIST(tqp, mod) \
901 ({ (tqp)->queue = (mod); })
902 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
903 ({ (tqp)->queue = (mod); })
905 /* Select the local queue pointer copy returned from LOCK_LIST */
906 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
907 ({ add_to_list_l(&(tc), (thread)); })
908 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
909 ({ remove_from_list_l(&(tc), (thread)); })
911 #else
912 /* Single-core/non-locked versions */
914 /* Threads */
915 #define GET_THREAD_STATE(thread) \
916 ({ (thread)->state; })
917 #define UNLOCK_THREAD(thread, _state)
918 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
919 ({ (thread)->state = (_state); })
921 /* Lists */
922 #define LOCK_LIST(tqp) \
923 ({ (tqp)->queue; })
924 #define UNLOCK_LIST(tqp, mod)
925 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
926 ({ (tqp)->queue = (mod); })
928 /* Select the queue pointer directly */
929 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
930 ({ add_to_list_l(&(tqp)->queue, (thread)); })
931 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
932 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
934 #endif /* locking selection */
936 #if THREAD_EXTRA_CHECKS
937 /*---------------------------------------------------------------------------
938 * Lock the thread slot to obtain the state and then unlock it. Waits for
939 * it not to be busy. Used for debugging.
940 *---------------------------------------------------------------------------
942 static unsigned peek_thread_state(struct thread_entry *thread)
944 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
945 unsigned state = GET_THREAD_STATE(thread);
946 UNLOCK_THREAD(thread, state);
947 set_irq_level(oldlevel);
948 return state;
950 #endif /* THREAD_EXTRA_CHECKS */
952 /*---------------------------------------------------------------------------
953 * Adds a thread to a list of threads using "intert last". Uses the "l"
954 * links.
955 *---------------------------------------------------------------------------
957 static void add_to_list_l(struct thread_entry **list,
958 struct thread_entry *thread)
960 struct thread_entry *l = *list;
962 if (l == NULL)
964 /* Insert into unoccupied list */
965 thread->l.next = thread;
966 thread->l.prev = thread;
967 *list = thread;
968 return;
971 /* Insert last */
972 thread->l.next = l;
973 thread->l.prev = l->l.prev;
974 thread->l.prev->l.next = thread;
975 l->l.prev = thread;
977 /* Insert next
978 thread->l.next = l->l.next;
979 thread->l.prev = l;
980 thread->l.next->l.prev = thread;
981 l->l.next = thread;
985 /*---------------------------------------------------------------------------
986 * Locks a list, adds the thread entry and unlocks the list on multicore.
987 * Defined as add_to_list_l on single-core.
988 *---------------------------------------------------------------------------
990 #if NUM_CORES > 1
991 static void add_to_list_l_locked(struct thread_queue *tq,
992 struct thread_entry *thread)
994 struct thread_entry *t = LOCK_LIST(tq);
995 ADD_TO_LIST_L_SELECT(t, tq, thread);
996 UNLOCK_LIST(tq, t);
997 (void)t;
999 #else
1000 #define add_to_list_l_locked(tq, thread) \
1001 add_to_list_l(&(tq)->queue, (thread))
1002 #endif
1004 /*---------------------------------------------------------------------------
1005 * Removes a thread from a list of threads. Uses the "l" links.
1006 *---------------------------------------------------------------------------
1008 static void remove_from_list_l(struct thread_entry **list,
1009 struct thread_entry *thread)
1011 struct thread_entry *prev, *next;
1013 next = thread->l.next;
1015 if (thread == next)
1017 /* The only item */
1018 *list = NULL;
1019 return;
1022 if (thread == *list)
1024 /* List becomes next item */
1025 *list = next;
1028 prev = thread->l.prev;
1030 /* Fix links to jump over the removed entry. */
1031 prev->l.next = next;
1032 next->l.prev = prev;
1035 /*---------------------------------------------------------------------------
1036 * Locks a list, removes the thread entry and unlocks the list on multicore.
1037 * Defined as remove_from_list_l on single-core.
1038 *---------------------------------------------------------------------------
1040 #if NUM_CORES > 1
1041 static void remove_from_list_l_locked(struct thread_queue *tq,
1042 struct thread_entry *thread)
1044 struct thread_entry *t = LOCK_LIST(tq);
1045 REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
1046 UNLOCK_LIST(tq, t);
1047 (void)t;
1049 #else
1050 #define remove_from_list_l_locked(tq, thread) \
1051 remove_from_list_l(&(tq)->queue, (thread))
1052 #endif
1054 /*---------------------------------------------------------------------------
1055 * Add a thread from the core's timout list by linking the pointers in its
1056 * tmo structure.
1057 *---------------------------------------------------------------------------
1059 static void add_to_list_tmo(struct thread_entry *thread)
1061 /* Insert first */
1062 struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout;
1064 thread->tmo.prev = thread;
1065 thread->tmo.next = t;
1067 if (t != NULL)
1069 /* Fix second item's prev pointer to point to this thread */
1070 t->tmo.prev = thread;
1073 cores[IF_COP_CORE(thread->core)].timeout = thread;
1076 /*---------------------------------------------------------------------------
1077 * Remove a thread from the core's timout list by unlinking the pointers in
1078 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1079 * is cancelled.
1080 *---------------------------------------------------------------------------
1082 static void remove_from_list_tmo(struct thread_entry *thread)
1084 struct thread_entry *next = thread->tmo.next;
1085 struct thread_entry *prev;
1087 if (thread == cores[IF_COP_CORE(thread->core)].timeout)
1089 /* Next item becomes list head */
1090 cores[IF_COP_CORE(thread->core)].timeout = next;
1092 if (next != NULL)
1094 /* Fix new list head's prev to point to itself. */
1095 next->tmo.prev = next;
1098 thread->tmo.prev = NULL;
1099 return;
1102 prev = thread->tmo.prev;
1104 if (next != NULL)
1106 next->tmo.prev = prev;
1109 prev->tmo.next = next;
1110 thread->tmo.prev = NULL;
1113 /*---------------------------------------------------------------------------
1114 * Schedules a thread wakeup on the specified core. Threads will be made
1115 * ready to run when the next task switch occurs. Note that this does not
1116 * introduce an on-core delay since the soonest the next thread may run is
1117 * no sooner than that. Other cores and on-core interrupts may only ever
1118 * add to the list.
1119 *---------------------------------------------------------------------------
1121 static void core_schedule_wakeup(struct thread_entry *thread)
1123 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1124 const unsigned int core = IF_COP_CORE(thread->core);
1125 add_to_list_l_locked(&cores[core].waking, thread);
1126 #if NUM_CORES > 1
1127 if (core != CURRENT_CORE)
1129 core_wake(core);
1131 #endif
1132 set_irq_level(oldlevel);
1135 /*---------------------------------------------------------------------------
1136 * If the waking list was populated, move all threads on it onto the running
1137 * list so they may be run ASAP.
1138 *---------------------------------------------------------------------------
1140 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1142 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1143 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
1144 struct thread_entry *r = cores[IF_COP_CORE(core)].running;
1146 /* Tranfer all threads on waking list to running list in one
1147 swoop */
1148 if (r != NULL)
1150 /* Place waking threads at the end of the running list. */
1151 struct thread_entry *tmp;
1152 w->l.prev->l.next = r;
1153 r->l.prev->l.next = w;
1154 tmp = r->l.prev;
1155 r->l.prev = w->l.prev;
1156 w->l.prev = tmp;
1158 else
1160 /* Just transfer the list as-is */
1161 cores[IF_COP_CORE(core)].running = w;
1163 /* Just leave any timeout threads on the timeout list. If a timeout check
1164 * is due, they will be removed there. If they do a timeout again before
1165 * being removed, they will just stay on the list with a new expiration
1166 * tick. */
1168 /* Waking list is clear - NULL and unlock it */
1169 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
1170 set_irq_level(oldlevel);
1173 /*---------------------------------------------------------------------------
1174 * Check the core's timeout list when at least one thread is due to wake.
1175 * Filtering for the condition is done before making the call. Resets the
1176 * tick when the next check will occur.
1177 *---------------------------------------------------------------------------
1179 static void check_tmo_threads(void)
1181 const unsigned int core = CURRENT_CORE;
1182 const long tick = current_tick; /* snapshot the current tick */
1183 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1184 struct thread_entry *next = cores[core].timeout;
1186 /* If there are no processes waiting for a timeout, just keep the check
1187 tick from falling into the past. */
1188 if (next != NULL)
1190 /* Check sleeping threads. */
1191 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1195 /* Must make sure noone else is examining the state, wait until
1196 slot is no longer busy */
1197 struct thread_entry *curr = next;
1198 next = curr->tmo.next;
1200 unsigned state = GET_THREAD_STATE(curr);
1202 if (state < TIMEOUT_STATE_FIRST)
1204 /* Cleanup threads no longer on a timeout but still on the
1205 * list. */
1206 remove_from_list_tmo(curr);
1207 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1209 else if (TIME_BEFORE(tick, curr->tmo_tick))
1211 /* Timeout still pending - this will be the usual case */
1212 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1214 /* Earliest timeout found so far - move the next check up
1215 to its time */
1216 next_tmo_check = curr->tmo_tick;
1218 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1220 else
1222 /* Sleep timeout has been reached so bring the thread back to
1223 * life again. */
1224 if (state == STATE_BLOCKED_W_TMO)
1226 remove_from_list_l_locked(curr->bqp, curr);
1229 remove_from_list_tmo(curr);
1230 add_to_list_l(&cores[core].running, curr);
1231 UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING);
1234 /* Break the loop once we have walked through the list of all
1235 * sleeping processes or have removed them all. */
1237 while (next != NULL);
1239 set_irq_level(oldlevel);
1242 cores[core].next_tmo_check = next_tmo_check;
1245 /*---------------------------------------------------------------------------
1246 * Performs operations that must be done before blocking a thread but after
1247 * the state is saved - follows reverse of locking order. blk_ops.flags is
1248 * assumed to be nonzero.
1249 *---------------------------------------------------------------------------
1251 static inline void run_blocking_ops(
1252 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
1254 #if NUM_CORES > 1
1255 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
1256 const unsigned flags = ops->flags;
1258 if (flags == 0)
1259 return;
1261 if (flags & TBOP_SWITCH_CORE)
1263 core_switch_blk_op(core, thread);
1266 #if CONFIG_CORELOCK == SW_CORELOCK
1267 if (flags & TBOP_UNLOCK_LIST)
1269 UNLOCK_LIST(ops->list_p, NULL);
1272 if (flags & TBOP_UNLOCK_CORELOCK)
1274 corelock_unlock(ops->cl_p);
1277 if (flags & TBOP_UNLOCK_THREAD)
1279 UNLOCK_THREAD(ops->thread, 0);
1281 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1282 /* Write updated variable value into memory location */
1283 switch (flags & TBOP_VAR_TYPE_MASK)
1285 case TBOP_UNLOCK_LIST:
1286 UNLOCK_LIST(ops->list_p, ops->list_v);
1287 break;
1288 case TBOP_SET_VARi:
1289 *ops->var_ip = ops->var_iv;
1290 break;
1291 case TBOP_SET_VARu8:
1292 *ops->var_u8p = ops->var_u8v;
1293 break;
1295 #endif /* CONFIG_CORELOCK == */
1297 /* Unlock thread's slot */
1298 if (flags & TBOP_UNLOCK_CURRENT)
1300 UNLOCK_THREAD(thread, ops->state);
1303 /* Reset the IRQ level */
1304 if (flags & TBOP_IRQ_LEVEL)
1306 set_irq_level(ops->irq_level);
1309 ops->flags = 0;
1310 #else
1311 int level = cores[CURRENT_CORE].irq_level;
1312 if (level == STAY_IRQ_LEVEL)
1313 return;
1315 cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL;
1316 set_irq_level(level);
1317 #endif /* NUM_CORES */
1321 /*---------------------------------------------------------------------------
1322 * Runs any operations that may cause threads to be ready to run and then
1323 * sleeps the processor core until the next interrupt if none are.
1324 *---------------------------------------------------------------------------
1326 static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1328 for (;;)
1330 /* We want to do these ASAP as it may change the decision to sleep
1331 * the core or a core has woken because an interrupt occurred
1332 * and posted a message to a queue. */
1333 if (cores[IF_COP_CORE(core)].waking.queue != NULL)
1335 core_perform_wakeup(IF_COP(core));
1338 /* If there are threads on a timeout and the earliest wakeup is due,
1339 * check the list and wake any threads that need to start running
1340 * again. */
1341 if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
1343 check_tmo_threads();
1346 /* If there is a ready to run task, return its ID and keep core
1347 * awake. */
1348 if (cores[IF_COP_CORE(core)].running != NULL)
1350 return cores[IF_COP_CORE(core)].running;
1353 /* Enter sleep mode to reduce power usage - woken up on interrupt or
1354 * wakeup request from another core. May abort if the waking list
1355 * became populated (again). See beginning of this file for the
1356 * algorithm to atomically determine this. */
1357 core_sleep(IF_COP(core, ) &cores[IF_COP_CORE(core)].waking.queue);
1361 #ifdef RB_PROFILE
1362 void profile_thread(void)
1364 profstart(cores[CURRENT_CORE].running - threads);
1366 #endif
1368 /*---------------------------------------------------------------------------
1369 * Prepares a thread to block on an object's list and/or for a specified
1370 * duration - expects object and slot to be appropriately locked if needed.
1371 *---------------------------------------------------------------------------
1373 static inline void _block_thread_on_l(struct thread_queue *list,
1374 struct thread_entry *thread,
1375 unsigned state
1376 IF_SWCL(, const bool nolock))
1378 /* If inlined, unreachable branches will be pruned with no size penalty
1379 because constant params are used for state and nolock. */
1380 const unsigned int core = IF_COP_CORE(thread->core);
1382 /* Remove the thread from the list of running threads. */
1383 remove_from_list_l(&cores[core].running, thread);
1385 /* Add a timeout to the block if not infinite */
1386 switch (state)
1388 case STATE_BLOCKED:
1389 /* Put the thread into a new list of inactive threads. */
1390 #if CONFIG_CORELOCK == SW_CORELOCK
1391 if (nolock)
1393 thread->bqp = NULL; /* Indicate nolock list */
1394 thread->bqnlp = (struct thread_entry **)list;
1395 add_to_list_l((struct thread_entry **)list, thread);
1397 else
1398 #endif
1400 thread->bqp = list;
1401 add_to_list_l_locked(list, thread);
1403 break;
1404 case STATE_BLOCKED_W_TMO:
1405 /* Put the thread into a new list of inactive threads. */
1406 #if CONFIG_CORELOCK == SW_CORELOCK
1407 if (nolock)
1409 thread->bqp = NULL; /* Indicate nolock list */
1410 thread->bqnlp = (struct thread_entry **)list;
1411 add_to_list_l((struct thread_entry **)list, thread);
1413 else
1414 #endif
1416 thread->bqp = list;
1417 add_to_list_l_locked(list, thread);
1419 /* Fall-through */
1420 case STATE_SLEEPING:
1421 /* If this thread times out sooner than any other thread, update
1422 next_tmo_check to its timeout */
1423 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1425 cores[core].next_tmo_check = thread->tmo_tick;
1428 if (thread->tmo.prev == NULL)
1430 add_to_list_tmo(thread);
1432 /* else thread was never removed from list - just keep it there */
1433 break;
1436 #ifdef HAVE_PRIORITY_SCHEDULING
1437 /* Reset priorities */
1438 if (thread->priority == cores[core].highest_priority)
1439 cores[core].highest_priority = LOWEST_PRIORITY;
1440 #endif
1442 #if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
1443 /* Safe to set state now */
1444 thread->state = state;
1445 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1446 cores[core].blk_ops.state = state;
1447 #endif
1449 #if NUM_CORES > 1
1450 /* Delay slot unlock until task switch */
1451 cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
1452 #endif
1455 static inline void block_thread_on_l(
1456 struct thread_queue *list, struct thread_entry *thread, unsigned state)
1458 _block_thread_on_l(list, thread, state IF_SWCL(, false));
1461 static inline void block_thread_on_l_no_listlock(
1462 struct thread_entry **list, struct thread_entry *thread, unsigned state)
1464 _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
1467 /*---------------------------------------------------------------------------
1468 * Switch thread in round robin fashion for any given priority. Any thread
1469 * that removed itself from the running list first must specify itself in
1470 * the paramter.
1472 * INTERNAL: Intended for use by kernel and not for programs.
1473 *---------------------------------------------------------------------------
1475 void switch_thread(struct thread_entry *old)
1477 const unsigned int core = CURRENT_CORE;
1478 struct thread_entry *thread = cores[core].running;
1480 if (old == NULL)
1482 /* Move to next thread */
1483 old = thread;
1484 cores[core].running = old->l.next;
1486 /* else running list is already at next thread */
1488 #ifdef RB_PROFILE
1489 profile_thread_stopped(old - threads);
1490 #endif
1492 /* Begin task switching by saving our current context so that we can
1493 * restore the state of the current thread later to the point prior
1494 * to this call. */
1495 store_context(&old->context);
1497 /* Check if the current thread stack is overflown */
1498 if(((unsigned int *)old->stack)[0] != DEADBEEF)
1499 thread_stkov(old);
1501 /* Run any blocking operations requested before switching/sleeping */
1502 run_blocking_ops(IF_COP(core, old));
1504 /* Go through the list of sleeping task to check if we need to wake up
1505 * any of them due to timeout. Also puts core into sleep state until
1506 * there is at least one running process again. */
1507 thread = sleep_core(IF_COP(core));
1509 #ifdef HAVE_PRIORITY_SCHEDULING
1510 /* Select the new task based on priorities and the last time a process
1511 * got CPU time. */
1512 for (;;)
1514 int priority = MIN(thread->priority, thread->priority_x);
1516 if (priority < cores[core].highest_priority)
1517 cores[core].highest_priority = priority;
1519 if (priority == cores[core].highest_priority ||
1520 (current_tick - thread->last_run > priority * 8))
1522 cores[core].running = thread;
1523 break;
1526 thread = thread->l.next;
1529 /* Reset the value of thread's last running time to the current time. */
1530 thread->last_run = current_tick;
1531 #endif /* HAVE_PRIORITY_SCHEDULING */
1533 /* And finally give control to the next thread. */
1534 load_context(&thread->context);
1536 #ifdef RB_PROFILE
1537 profile_thread_started(thread - threads);
1538 #endif
1541 /*---------------------------------------------------------------------------
1542 * Removes the boost flag from a thread and unboosts the CPU if thread count
1543 * of boosted threads reaches zero. Requires thread slot to be locked first.
1544 *---------------------------------------------------------------------------
1546 static inline void unboost_thread(struct thread_entry *thread)
1548 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1549 if (thread->boosted != 0)
1551 thread->boosted = 0;
1552 if (--boosted_threads == 0)
1554 cpu_boost(false);
1557 #endif
1558 (void)thread;
1561 /*---------------------------------------------------------------------------
1562 * Sleeps a thread for a specified number of ticks and unboost the thread if
1563 * if it is boosted. If ticks is zero, it does not delay but instead switches
1564 * tasks.
1566 * INTERNAL: Intended for use by kernel and not for programs.
1567 *---------------------------------------------------------------------------
1569 void sleep_thread(int ticks)
1571 /* Get the entry for the current running thread. */
1572 struct thread_entry *current = cores[CURRENT_CORE].running;
1574 #if NUM_CORES > 1
1575 /* Lock thread slot */
1576 GET_THREAD_STATE(current);
1577 #endif
1579 /* Remove our boosted status if any */
1580 unboost_thread(current);
1582 /* Set our timeout, change lists, and finally switch threads.
1583 * Unlock during switch on mulicore. */
1584 current->tmo_tick = current_tick + ticks + 1;
1585 block_thread_on_l(NULL, current, STATE_SLEEPING);
1586 switch_thread(current);
1588 /* Our status should be STATE_RUNNING */
1589 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1590 "S:R->!*R", current);
1593 /*---------------------------------------------------------------------------
1594 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1595 * Caller with interrupt-accessible lists should disable interrupts first
1596 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1598 * INTERNAL: Intended for use by kernel objects and not for programs.
1599 *---------------------------------------------------------------------------
1601 IF_SWCL(static inline) void _block_thread(struct thread_queue *list
1602 IF_SWCL(, const bool nolock))
1604 /* Get the entry for the current running thread. */
1605 struct thread_entry *current = cores[CURRENT_CORE].running;
1607 /* Set the state to blocked and ask the scheduler to switch tasks,
1608 * this takes us off of the run queue until we are explicitly woken */
1610 #if NUM_CORES > 1
1611 /* Lock thread slot */
1612 GET_THREAD_STATE(current);
1613 #endif
1615 #if CONFIG_CORELOCK == SW_CORELOCK
1616 /* One branch optimized away during inlining */
1617 if (nolock)
1619 block_thread_on_l_no_listlock((struct thread_entry **)list,
1620 current, STATE_BLOCKED);
1622 else
1623 #endif
1625 block_thread_on_l(list, current, STATE_BLOCKED);
1628 switch_thread(current);
1630 /* Our status should be STATE_RUNNING */
1631 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1632 "B:R->!*R", current);
1635 #if CONFIG_CORELOCK == SW_CORELOCK
1636 /* Inline lock/nolock version of _block_thread into these functions */
1637 void block_thread(struct thread_queue *tq)
1639 _block_thread(tq, false);
1642 void block_thread_no_listlock(struct thread_entry **list)
1644 _block_thread((struct thread_queue *)list, true);
1646 #endif /* CONFIG_CORELOCK */
1648 /*---------------------------------------------------------------------------
1649 * Block a thread on a blocking queue for a specified time interval or until
1650 * explicitly woken - whichever happens first.
1651 * Caller with interrupt-accessible lists should disable interrupts first
1652 * and request that interrupt level be restored after switching out the
1653 * current thread.
1655 * INTERNAL: Intended for use by kernel objects and not for programs.
1656 *---------------------------------------------------------------------------
1658 void block_thread_w_tmo(struct thread_queue *list, int timeout)
1660 /* Get the entry for the current running thread. */
1661 struct thread_entry *current = cores[CURRENT_CORE].running;
1663 #if NUM_CORES > 1
1664 /* Lock thread slot */
1665 GET_THREAD_STATE(current);
1666 #endif
1668 /* A block with a timeout is a sleep situation, whatever we are waiting
1669 * for _may or may not_ happen, regardless of boost state, (user input
1670 * for instance), so this thread no longer needs to boost */
1671 unboost_thread(current);
1673 /* Set the state to blocked with the specified timeout */
1674 current->tmo_tick = current_tick + timeout;
1675 /* Set the list for explicit wakeup */
1676 block_thread_on_l(list, current, STATE_BLOCKED_W_TMO);
1678 /* Now force a task switch and block until we have been woken up
1679 * by another thread or timeout is reached - whichever happens first */
1680 switch_thread(current);
1682 /* Our status should be STATE_RUNNING */
1683 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1684 "T:R->!*R", current);
1687 /*---------------------------------------------------------------------------
1688 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
1689 * that called sleep().
1690 * Caller with interrupt-accessible lists should disable interrupts first.
1691 * This code should be considered a critical section by the caller.
1693 * INTERNAL: Intended for use by kernel objects and not for programs.
1694 *---------------------------------------------------------------------------
1696 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
1697 struct thread_queue *list IF_SWCL(, const bool nolock))
1699 struct thread_entry *t;
1700 struct thread_entry *thread;
1701 unsigned state;
1703 /* Wake up the last thread first. */
1704 #if CONFIG_CORELOCK == SW_CORELOCK
1705 /* One branch optimized away during inlining */
1706 if (nolock)
1708 t = list->queue;
1710 else
1711 #endif
1713 t = LOCK_LIST(list);
1716 /* Check if there is a blocked thread at all. */
1717 if (t == NULL)
1719 #if CONFIG_CORELOCK == SW_CORELOCK
1720 if (!nolock)
1721 #endif
1723 UNLOCK_LIST(list, NULL);
1725 return NULL;
1728 thread = t;
1730 #if NUM_CORES > 1
1731 #if CONFIG_CORELOCK == SW_CORELOCK
1732 if (nolock)
1734 /* Lock thread only, not list */
1735 state = GET_THREAD_STATE(thread);
1737 else
1738 #endif
1740 /* This locks in reverse order from other routines so a retry in the
1741 correct order may be needed */
1742 state = TRY_GET_THREAD_STATE(thread);
1743 if (state == STATE_BUSY)
1745 /* Unlock list and retry slot, then list */
1746 UNLOCK_LIST(list, t);
1747 state = GET_THREAD_STATE(thread);
1748 t = LOCK_LIST(list);
1749 /* Be sure thread still exists here - it couldn't have re-added
1750 itself if it was woken elsewhere because this function is
1751 serialized within the object that owns the list. */
1752 if (thread != t)
1754 /* Thread disappeared :( */
1755 UNLOCK_LIST(list, t);
1756 UNLOCK_THREAD(thread, state);
1757 return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
1761 #else /* NUM_CORES == 1 */
1762 state = GET_THREAD_STATE(thread);
1763 #endif /* NUM_CORES */
1765 /* Determine thread's current state. */
1766 switch (state)
1768 case STATE_BLOCKED:
1769 case STATE_BLOCKED_W_TMO:
1770 /* Remove thread from object's blocked list - select t or list depending
1771 on locking type at compile time */
1772 REMOVE_FROM_LIST_L_SELECT(t, list, thread);
1773 #if CONFIG_CORELOCK == SW_CORELOCK
1774 /* Statment optimized away during inlining if nolock != false */
1775 if (!nolock)
1776 #endif
1778 UNLOCK_LIST(list, t); /* Unlock list - removal complete */
1781 #ifdef HAVE_PRIORITY_SCHEDULING
1782 /* Give the task a kick to avoid a stall after wakeup.
1783 Not really proper treatment - TODO later. */
1784 thread->last_run = current_tick - 8*LOWEST_PRIORITY;
1785 #endif
1786 core_schedule_wakeup(thread);
1787 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
1788 return thread;
1789 default:
1790 /* Nothing to do. State is not blocked. */
1791 #if THREAD_EXTRA_CHECKS
1792 THREAD_PANICF("wakeup_thread->block invalid", thread);
1793 case STATE_RUNNING:
1794 case STATE_KILLED:
1795 #endif
1796 #if CONFIG_CORELOCK == SW_CORELOCK
1797 /* Statement optimized away during inlining if nolock != false */
1798 if (!nolock)
1799 #endif
1801 UNLOCK_LIST(list, t); /* Unlock the object's list */
1803 UNLOCK_THREAD(thread, state); /* Unlock thread slot */
1804 return NULL;
1808 #if CONFIG_CORELOCK == SW_CORELOCK
1809 /* Inline lock/nolock version of _wakeup_thread into these functions */
1810 struct thread_entry * wakeup_thread(struct thread_queue *tq)
1812 return _wakeup_thread(tq, false);
1815 struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
1817 return _wakeup_thread((struct thread_queue *)list, true);
1819 #endif /* CONFIG_CORELOCK */
1821 /*---------------------------------------------------------------------------
1822 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1823 * will be locked on multicore.
1824 *---------------------------------------------------------------------------
1826 static int find_empty_thread_slot(void)
1828 #if NUM_CORES > 1
1829 /* Any slot could be on an IRQ-accessible list */
1830 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1831 #endif
1832 /* Thread slots are not locked on single core */
1834 int n;
1836 for (n = 0; n < MAXTHREADS; n++)
1838 /* Obtain current slot state - lock it on multicore */
1839 unsigned state = GET_THREAD_STATE(&threads[n]);
1841 if (state == STATE_KILLED
1842 #if NUM_CORES > 1
1843 && threads[n].name != THREAD_DESTRUCT
1844 #endif
1847 /* Slot is empty - leave it locked and caller will unlock */
1848 break;
1851 /* Finished examining slot - no longer busy - unlock on multicore */
1852 UNLOCK_THREAD(&threads[n], state);
1855 #if NUM_CORES > 1
1856 set_irq_level(oldlevel); /* Reenable interrups - this slot is
1857 not accesible to them yet */
1858 #endif
1860 return n;
1864 /*---------------------------------------------------------------------------
1865 * Place the current core in idle mode - woken up on interrupt or wake
1866 * request from another core.
1867 *---------------------------------------------------------------------------
1869 void core_idle(void)
1871 const unsigned int core = CURRENT_CORE;
1872 core_sleep(IF_COP(core,) &cores[core].waking.queue);
1875 /*---------------------------------------------------------------------------
1876 * Create a thread
1877 * If using a dual core architecture, specify which core to start the thread
1878 * on, and whether to fall back to the other core if it can't be created
1879 * Return ID if context area could be allocated, else NULL.
1880 *---------------------------------------------------------------------------
1882 struct thread_entry*
1883 create_thread(void (*function)(void), void* stack, int stack_size,
1884 unsigned flags, const char *name
1885 IF_PRIO(, int priority)
1886 IF_COP(, unsigned int core))
1888 unsigned int i;
1889 unsigned int stacklen;
1890 unsigned int *stackptr;
1891 int slot;
1892 struct thread_entry *thread;
1893 unsigned state;
1895 slot = find_empty_thread_slot();
1896 if (slot >= MAXTHREADS)
1898 return NULL;
1901 /* Munge the stack to make it easy to spot stack overflows */
1902 stacklen = stack_size / sizeof(int);
1903 stackptr = stack;
1904 for(i = 0;i < stacklen;i++)
1906 stackptr[i] = DEADBEEF;
1909 /* Store interesting information */
1910 thread = &threads[slot];
1911 thread->name = name;
1912 thread->stack = stack;
1913 thread->stack_size = stack_size;
1914 thread->bqp = NULL;
1915 #if CONFIG_CORELOCK == SW_CORELOCK
1916 thread->bqnlp = NULL;
1917 #endif
1918 thread->queue = NULL;
1919 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1920 thread->boosted = 0;
1921 #endif
1922 #ifdef HAVE_PRIORITY_SCHEDULING
1923 thread->priority_x = LOWEST_PRIORITY;
1924 thread->priority = priority;
1925 thread->last_run = current_tick - priority * 8;
1926 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
1927 #endif
1929 #if NUM_CORES > 1
1930 thread->core = core;
1932 /* Writeback stack munging or anything else before starting */
1933 if (core != CURRENT_CORE)
1935 flush_icache();
1937 #endif
1939 /* Thread is not on any timeout list but be a bit paranoid */
1940 thread->tmo.prev = NULL;
1942 state = (flags & CREATE_THREAD_FROZEN) ?
1943 STATE_FROZEN : STATE_RUNNING;
1945 /* Align stack to an even 32 bit boundary */
1946 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
1948 /* Load the thread's context structure with needed startup information */
1949 THREAD_STARTUP_INIT(core, thread, function);
1951 if (state == STATE_RUNNING)
1953 #if NUM_CORES > 1
1954 if (core != CURRENT_CORE)
1956 /* Next task switch on other core moves thread to running list */
1957 core_schedule_wakeup(thread);
1959 else
1960 #endif
1962 /* Place on running list immediately */
1963 add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
1967 /* remove lock and set state */
1968 UNLOCK_THREAD_SET_STATE(thread, state);
1970 return thread;
1973 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1974 void trigger_cpu_boost(void)
1976 /* No IRQ disable nescessary since the current thread cannot be blocked
1977 on an IRQ-accessible list */
1978 struct thread_entry *current = cores[CURRENT_CORE].running;
1979 unsigned state;
1981 state = GET_THREAD_STATE(current);
1983 if (current->boosted == 0)
1985 current->boosted = 1;
1986 if (++boosted_threads == 1)
1988 cpu_boost(true);
1992 UNLOCK_THREAD(current, state);
1993 (void)state;
1995 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
1997 /*---------------------------------------------------------------------------
1998 * Remove a thread from the scheduler.
1999 * Parameter is the ID as returned from create_thread().
2001 * Use with care on threads that are not under careful control as this may
2002 * leave various objects in an undefined state. When trying to kill a thread
2003 * on another processor, be sure you know what it's doing and won't be
2004 * switching around itself.
2005 *---------------------------------------------------------------------------
2007 void remove_thread(struct thread_entry *thread)
2009 #if NUM_CORES > 1
2010 /* core is not constant here because of core switching */
2011 unsigned int core = CURRENT_CORE;
2012 unsigned int old_core = NUM_CORES;
2013 #else
2014 const unsigned int core = CURRENT_CORE;
2015 #endif
2016 unsigned state;
2017 int oldlevel;
2019 if (thread == NULL)
2020 thread = cores[core].running;
2022 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2023 state = GET_THREAD_STATE(thread);
2025 if (state == STATE_KILLED)
2027 goto thread_killed;
2030 #if NUM_CORES > 1
2031 if (thread->core != core)
2033 /* Switch cores and safely extract the thread there */
2034 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
2035 condition if the thread runs away to another processor. */
2036 unsigned int new_core = thread->core;
2037 const char *old_name = thread->name;
2039 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2040 UNLOCK_THREAD(thread, state);
2041 set_irq_level(oldlevel);
2043 old_core = switch_core(new_core);
2045 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2046 state = GET_THREAD_STATE(thread);
2048 core = new_core;
2050 if (state == STATE_KILLED)
2052 /* Thread suicided before we could kill it */
2053 goto thread_killed;
2056 /* Reopen slot - it's locked again anyway */
2057 thread->name = old_name;
2059 if (thread->core != core)
2061 /* We won't play thread tag - just forget it */
2062 UNLOCK_THREAD(thread, state);
2063 set_irq_level(oldlevel);
2064 goto thread_kill_abort;
2067 /* Perform the extraction and switch ourselves back to the original
2068 processor */
2070 #endif /* NUM_CORES > 1 */
2072 #ifdef HAVE_PRIORITY_SCHEDULING
2073 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2074 #endif
2075 if (thread->tmo.prev != NULL)
2077 /* Clean thread off the timeout list if a timeout check hasn't
2078 * run yet */
2079 remove_from_list_tmo(thread);
2082 if (thread == cores[core].running)
2084 /* Suicide - thread has unconditional rights to do this */
2085 /* Maintain locks until switch-out */
2086 #if NUM_CORES > 1
2087 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2088 cores[core].blk_ops.irq_level = oldlevel;
2089 #else
2090 cores[core].irq_level = oldlevel;
2091 #endif
2092 block_thread_on_l(NULL, thread, STATE_KILLED);
2094 #if NUM_CORES > 1
2095 /* Switch to the idle stack if not on the main core (where "main"
2096 * runs) */
2097 if (core != CPU)
2099 switch_to_idle_stack(core);
2102 flush_icache();
2103 #endif
2104 /* Signal this thread */
2105 thread_queue_wake_no_listlock(&thread->queue);
2106 /* Switch tasks and never return */
2107 switch_thread(thread);
2108 /* This should never and must never be reached - if it is, the
2109 * state is corrupted */
2110 THREAD_PANICF("remove_thread->K:*R", thread);
2113 #if NUM_CORES > 1
2114 if (thread->name == THREAD_DESTRUCT)
2116 /* Another core is doing this operation already */
2117 UNLOCK_THREAD(thread, state);
2118 set_irq_level(oldlevel);
2119 return;
2121 #endif
2122 if (cores[core].waking.queue != NULL)
2124 /* Get any threads off the waking list and onto the running
2125 * list first - waking and running cannot be distinguished by
2126 * state */
2127 core_perform_wakeup(IF_COP(core));
2130 switch (state)
2132 case STATE_RUNNING:
2133 /* Remove thread from ready to run tasks */
2134 remove_from_list_l(&cores[core].running, thread);
2135 break;
2136 case STATE_BLOCKED:
2137 case STATE_BLOCKED_W_TMO:
2138 /* Remove thread from the queue it's blocked on - including its
2139 * own if waiting there */
2140 #if CONFIG_CORELOCK == SW_CORELOCK
2141 /* One or the other will be valid */
2142 if (thread->bqp == NULL)
2144 remove_from_list_l(thread->bqnlp, thread);
2146 else
2147 #endif /* CONFIG_CORELOCK */
2149 remove_from_list_l_locked(thread->bqp, thread);
2151 break;
2152 /* Otherwise thread is killed or is frozen and hasn't run yet */
2155 /* If thread was waiting on itself, it will have been removed above.
2156 * The wrong order would result in waking the thread first and deadlocking
2157 * since the slot is already locked. */
2158 thread_queue_wake_no_listlock(&thread->queue);
2160 thread_killed: /* Thread was already killed */
2161 /* Removal complete - safe to unlock state and reenable interrupts */
2162 UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED);
2163 set_irq_level(oldlevel);
2165 #if NUM_CORES > 1
2166 thread_kill_abort: /* Something stopped us from killing the thread */
2167 if (old_core < NUM_CORES)
2169 /* Did a removal on another processor's thread - switch back to
2170 native core */
2171 switch_core(old_core);
2173 #endif
2176 /*---------------------------------------------------------------------------
2177 * Block the current thread until another thread terminates. A thread may
2178 * wait on itself to terminate which prevents it from running again and it
2179 * will need to be killed externally.
2180 * Parameter is the ID as returned from create_thread().
2181 *---------------------------------------------------------------------------
2183 void thread_wait(struct thread_entry *thread)
2185 const unsigned int core = CURRENT_CORE;
2186 struct thread_entry *current = cores[core].running;
2187 unsigned thread_state;
2188 #if NUM_CORES > 1
2189 int oldlevel;
2190 unsigned current_state;
2191 #endif
2193 if (thread == NULL)
2194 thread = current;
2196 #if NUM_CORES > 1
2197 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2198 #endif
2200 thread_state = GET_THREAD_STATE(thread);
2202 #if NUM_CORES > 1
2203 /* We can't lock the same slot twice. The waitee will also lock itself
2204 first then the thread slots that will be locked and woken in turn.
2205 The same order must be observed here as well. */
2206 if (thread == current)
2208 current_state = thread_state;
2210 else
2212 current_state = GET_THREAD_STATE(current);
2214 #endif
2216 if (thread_state != STATE_KILLED)
2218 #if NUM_CORES > 1
2219 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2220 cores[core].blk_ops.irq_level = oldlevel;
2221 #endif
2222 /* Unlock the waitee state at task switch - not done for self-wait
2223 because the would double-unlock the state and potentially
2224 corrupt another's busy assert on the slot */
2225 if (thread != current)
2227 #if CONFIG_CORELOCK == SW_CORELOCK
2228 cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
2229 cores[core].blk_ops.thread = thread;
2230 #elif CONFIG_CORELOCK == CORELOCK_SWAP
2231 cores[core].blk_ops.flags |= TBOP_SET_VARu8;
2232 cores[core].blk_ops.var_u8p = &thread->state;
2233 cores[core].blk_ops.var_u8v = thread_state;
2234 #endif
2236 block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED);
2237 switch_thread(current);
2238 return;
2241 /* Unlock both slots - obviously the current thread can't have
2242 STATE_KILLED so the above if clause will always catch a thread
2243 waiting on itself */
2244 #if NUM_CORES > 1
2245 UNLOCK_THREAD(current, current_state);
2246 UNLOCK_THREAD(thread, thread_state);
2247 set_irq_level(oldlevel);
2248 #endif
2251 #ifdef HAVE_PRIORITY_SCHEDULING
2252 /*---------------------------------------------------------------------------
2253 * Sets the thread's relative priority for the core it runs on.
2254 *---------------------------------------------------------------------------
2256 int thread_set_priority(struct thread_entry *thread, int priority)
2258 unsigned old_priority = (unsigned)-1;
2260 if (thread == NULL)
2261 thread = cores[CURRENT_CORE].running;
2263 #if NUM_CORES > 1
2264 /* Thread could be on any list and therefore on an interrupt accessible
2265 one - disable interrupts */
2266 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2267 #endif
2268 unsigned state = GET_THREAD_STATE(thread);
2270 /* Make sure it's not killed */
2271 if (state != STATE_KILLED)
2273 old_priority = thread->priority;
2274 thread->priority = priority;
2275 cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY;
2278 #if NUM_CORES > 1
2279 UNLOCK_THREAD(thread, state);
2280 set_irq_level(oldlevel);
2281 #endif
2282 return old_priority;
2285 /*---------------------------------------------------------------------------
2286 * Returns the current priority for a thread.
2287 *---------------------------------------------------------------------------
2289 int thread_get_priority(struct thread_entry *thread)
2291 /* Simple, quick probe. */
2292 if (thread == NULL)
2293 thread = cores[CURRENT_CORE].running;
2295 return (unsigned)thread->priority;
2298 /*---------------------------------------------------------------------------
2299 * Yield that guarantees thread execution once per round regardless of
2300 * thread's scheduler priority - basically a transient realtime boost
2301 * without altering the scheduler's thread precedence.
2303 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2304 *---------------------------------------------------------------------------
2306 void priority_yield(void)
2308 const unsigned int core = CURRENT_CORE;
2309 struct thread_entry *thread = cores[core].running;
2310 thread->priority_x = HIGHEST_PRIORITY;
2311 switch_thread(NULL);
2312 thread->priority_x = LOWEST_PRIORITY;
2313 cores[core].highest_priority = LOWEST_PRIORITY;
2315 #endif /* HAVE_PRIORITY_SCHEDULING */
2317 /* Resumes a frozen thread - similar logic to wakeup_thread except that
2318 the thread is on no scheduler list at all. It exists simply by virtue of
2319 the slot having a state of STATE_FROZEN. */
2320 void thread_thaw(struct thread_entry *thread)
2322 #if NUM_CORES > 1
2323 /* Thread could be on any list and therefore on an interrupt accessible
2324 one - disable interrupts */
2325 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2326 #endif
2327 unsigned state = GET_THREAD_STATE(thread);
2329 if (state == STATE_FROZEN)
2331 const unsigned int core = CURRENT_CORE;
2332 #if NUM_CORES > 1
2333 if (thread->core != core)
2335 core_schedule_wakeup(thread);
2337 else
2338 #endif
2340 add_to_list_l(&cores[core].running, thread);
2343 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
2344 return;
2347 #if NUM_CORES > 1
2348 UNLOCK_THREAD(thread, state);
2349 set_irq_level(oldlevel);
2350 #endif
2353 /*---------------------------------------------------------------------------
2354 * Return the ID of the currently executing thread.
2355 *---------------------------------------------------------------------------
2357 struct thread_entry * thread_get_current(void)
2359 return cores[CURRENT_CORE].running;
2362 #if NUM_CORES > 1
2363 /*---------------------------------------------------------------------------
2364 * Switch the processor that the currently executing thread runs on.
2365 *---------------------------------------------------------------------------
2367 unsigned int switch_core(unsigned int new_core)
2369 const unsigned int core = CURRENT_CORE;
2370 struct thread_entry *current = cores[core].running;
2371 struct thread_entry *w;
2372 int oldlevel;
2374 /* Interrupts can access the lists that will be used - disable them */
2375 unsigned state = GET_THREAD_STATE(current);
2377 if (core == new_core)
2379 /* No change - just unlock everything and return same core */
2380 UNLOCK_THREAD(current, state);
2381 return core;
2384 /* Get us off the running list for the current core */
2385 remove_from_list_l(&cores[core].running, current);
2387 /* Stash return value (old core) in a safe place */
2388 current->retval = core;
2390 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2391 * the other core will likely attempt a removal from the wrong list! */
2392 if (current->tmo.prev != NULL)
2394 remove_from_list_tmo(current);
2397 /* Change the core number for this thread slot */
2398 current->core = new_core;
2400 /* Do not use core_schedule_wakeup here since this will result in
2401 * the thread starting to run on the other core before being finished on
2402 * this one. Delay the wakeup list unlock to keep the other core stuck
2403 * until this thread is ready. */
2404 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2405 w = LOCK_LIST(&cores[new_core].waking);
2406 ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current);
2408 /* Make a callback into device-specific code, unlock the wakeup list so
2409 * that execution may resume on the new core, unlock our slot and finally
2410 * restore the interrupt level */
2411 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
2412 TBOP_UNLOCK_LIST | TBOP_IRQ_LEVEL;
2413 cores[core].blk_ops.irq_level = oldlevel;
2414 cores[core].blk_ops.list_p = &cores[new_core].waking;
2415 #if CONFIG_CORELOCK == CORELOCK_SWAP
2416 cores[core].blk_ops.state = STATE_RUNNING;
2417 cores[core].blk_ops.list_v = w;
2418 #endif
2420 #ifdef HAVE_PRIORITY_SCHEDULING
2421 current->priority_x = HIGHEST_PRIORITY;
2422 cores[core].highest_priority = LOWEST_PRIORITY;
2423 #endif
2424 /* Do the stack switching, cache_maintenence and switch_thread call -
2425 requires native code */
2426 switch_thread_core(core, current);
2428 #ifdef HAVE_PRIORITY_SCHEDULING
2429 current->priority_x = LOWEST_PRIORITY;
2430 cores[current->core].highest_priority = LOWEST_PRIORITY;
2431 #endif
2433 /* Finally return the old core to caller */
2434 return current->retval;
2435 (void)state;
2437 #endif /* NUM_CORES > 1 */
2439 /*---------------------------------------------------------------------------
2440 * Initialize threading API. This assumes interrupts are not yet enabled. On
2441 * multicore setups, no core is allowed to proceed until create_thread calls
2442 * are safe to perform.
2443 *---------------------------------------------------------------------------
2445 void init_threads(void)
2447 const unsigned int core = CURRENT_CORE;
2448 int slot;
2450 /* CPU will initialize first and then sleep */
2451 slot = find_empty_thread_slot();
2453 if (slot >= MAXTHREADS)
2455 /* WTF? There really must be a slot available at this stage.
2456 * This can fail if, for example, .bss isn't zero'ed out by the loader
2457 * or threads is in the wrong section. */
2458 THREAD_PANICF("init_threads->no slot", NULL);
2461 cores[core].running = NULL;
2462 cores[core].timeout = NULL;
2463 thread_queue_init(&cores[core].waking);
2464 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2465 #if NUM_CORES > 1
2466 cores[core].blk_ops.flags = 0;
2467 #else
2468 cores[core].irq_level = STAY_IRQ_LEVEL;
2469 #endif
2470 threads[slot].name = main_thread_name;
2471 UNLOCK_THREAD_SET_STATE(&threads[slot], STATE_RUNNING); /* No sync worries yet */
2472 threads[slot].context.start = NULL; /* core's main thread already running */
2473 threads[slot].tmo.prev = NULL;
2474 threads[slot].queue = NULL;
2475 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2476 threads[slot].boosted = 0;
2477 #endif
2478 #if NUM_CORES > 1
2479 threads[slot].core = core;
2480 #endif
2481 #ifdef HAVE_PRIORITY_SCHEDULING
2482 threads[slot].priority = PRIORITY_USER_INTERFACE;
2483 threads[slot].priority_x = LOWEST_PRIORITY;
2484 cores[core].highest_priority = LOWEST_PRIORITY;
2485 #endif
2487 add_to_list_l(&cores[core].running, &threads[slot]);
2489 if (core == CPU)
2491 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2492 boosted_threads = 0;
2493 #endif
2494 threads[slot].stack = stackbegin;
2495 threads[slot].stack_size = (int)stackend - (int)stackbegin;
2496 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2497 /* TODO: HAL interface for this */
2498 /* Wake up coprocessor and let it initialize kernel and threads */
2499 COP_CTL = PROC_WAKE;
2500 /* Sleep until finished */
2501 CPU_CTL = PROC_SLEEP;
2503 else
2505 /* Initial stack is the COP idle stack */
2506 threads[slot].stack = cop_idlestackbegin;
2507 threads[slot].stack_size = IDLE_STACK_SIZE;
2508 /* Mark COP initialized */
2509 cores[COP].blk_ops.flags = 0;
2510 /* Get COP safely primed inside switch_thread where it will remain
2511 * until a thread actually exists on it */
2512 CPU_CTL = PROC_WAKE;
2513 remove_thread(NULL);
2514 #endif /* NUM_CORES */
2518 /*---------------------------------------------------------------------------
2519 * Returns the maximum percentage of stack a thread ever used while running.
2520 * NOTE: Some large buffer allocations that don't use enough the buffer to
2521 * overwrite stackptr[0] will not be seen.
2522 *---------------------------------------------------------------------------
2524 int thread_stack_usage(const struct thread_entry *thread)
2526 unsigned int *stackptr = thread->stack;
2527 int stack_words = thread->stack_size / sizeof (int);
2528 int i, usage = 0;
2530 for (i = 0; i < stack_words; i++)
2532 if (stackptr[i] != DEADBEEF)
2534 usage = ((stack_words - i) * 100) / stack_words;
2535 break;
2539 return usage;
2542 #if NUM_CORES > 1
2543 /*---------------------------------------------------------------------------
2544 * Returns the maximum percentage of the core's idle stack ever used during
2545 * runtime.
2546 *---------------------------------------------------------------------------
2548 int idle_stack_usage(unsigned int core)
2550 unsigned int *stackptr = idle_stacks[core];
2551 int i, usage = 0;
2553 for (i = 0; i < IDLE_STACK_WORDS; i++)
2555 if (stackptr[i] != DEADBEEF)
2557 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
2558 break;
2562 return usage;
2564 #endif
2566 /*---------------------------------------------------------------------------
2567 * Returns the current thread status. This is a snapshot for debugging and
2568 * does not do any slot synchronization so it could return STATE_BUSY.
2569 *---------------------------------------------------------------------------
2571 unsigned thread_get_status(const struct thread_entry *thread)
2573 return thread->state;
2576 /*---------------------------------------------------------------------------
2577 * Fills in the buffer with the specified thread's name. If the name is NULL,
2578 * empty, or the thread is in destruct state a formatted ID is written
2579 * instead.
2580 *---------------------------------------------------------------------------
2582 void thread_get_name(char *buffer, int size,
2583 struct thread_entry *thread)
2585 if (size <= 0)
2586 return;
2588 *buffer = '\0';
2590 if (thread)
2592 /* Display thread name if one or ID if none */
2593 const char *name = thread->name;
2594 const char *fmt = "%s";
2595 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2597 name = (const char *)thread;
2598 fmt = "%08lX";
2600 snprintf(buffer, size, fmt, name);