The RockboxUtilityQt wiki page has been renamed to RockboxUtility
[Rockbox.git] / firmware / thread.c
blob37157be2456195595c0fc3e3e4af0f6e4e2640a3
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
33 #ifdef DEBUG
34 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
35 #else
36 #define THREAD_EXTRA_CHECKS 0
37 #endif
39 /**
40 * General locking order to guarantee progress. Order must be observed but
41 * all stages are not nescessarily obligatory. Going from 1) to 3) is
42 * perfectly legal.
44 * 1) IRQ
45 * This is first because of the likelyhood of having an interrupt occur that
46 * also accesses one of the objects farther down the list. Any non-blocking
47 * synchronization done may already have a lock on something during normal
48 * execution and if an interrupt handler running on the same processor as
49 * the one that has the resource locked were to attempt to access the
50 * resource, the interrupt handler would wait forever waiting for an unlock
51 * that will never happen. There is no danger if the interrupt occurs on
52 * a different processor because the one that has the lock will eventually
53 * unlock and the other processor's handler may proceed at that time. Not
54 * nescessary when the resource in question is definitely not available to
55 * interrupt handlers.
57 * 2) Kernel Object
58 * 1) May be needed beforehand if the kernel object allows dual-use such as
59 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
66 * 3) Thread Slot
67 * This locks access to the thread's slot such that its state cannot be
68 * altered by another processor when a state change is in progress such as
69 * when it is in the process of going on a blocked list. An attempt to wake
70 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state.
73 * 4) Lists
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
82 * 5) Core Lists
83 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an
85 * operation may only be performed by the thread's own core in a normal
86 * execution context. The wakeup list is the prime example where a thread
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
91 #define DEADBEEF ((unsigned int)0xdeadbeef)
92 /* Cast to the the machine int type, whose size could be < 4. */
93 struct core_entry cores[NUM_CORES] IBSS_ATTR;
94 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
96 static const char main_thread_name[] = "main";
97 extern int stackbegin[];
98 extern int stackend[];
100 /* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
101 * never results in requiring a wait until the next tick (up to 10000uS!). Likely
102 * requires assembly and careful instruction ordering. Multicore requires
103 * carefully timed sections in order to have synchronization without locking of
104 * any sort.
106 * 1) Disable all interrupts (FIQ and IRQ for ARM for instance)
107 * 2) Check *waking == NULL.
108 * 3) *waking not NULL? Goto step 7.
109 * 4) On multicore, stay awake if directed to do so by another. If so, goto step 7.
110 * 5) If processor requires, atomically reenable interrupts and perform step 6.
111 * 6) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
112 * goto step 8.
113 * 7) Reenable interrupts.
114 * 8) Exit procedure.
116 static inline void core_sleep(
117 IF_COP(unsigned int core,) struct thread_entry **waking)
118 __attribute__((always_inline));
120 static void check_tmo_threads(void)
121 __attribute__((noinline));
123 static inline void block_thread_on_l(
124 struct thread_queue *list, struct thread_entry *thread, unsigned state)
125 __attribute__((always_inline));
127 static inline void block_thread_on_l_no_listlock(
128 struct thread_entry **list, struct thread_entry *thread, unsigned state)
129 __attribute__((always_inline));
131 static inline void _block_thread_on_l(
132 struct thread_queue *list, struct thread_entry *thread,
133 unsigned state IF_SWCL(, const bool single))
134 __attribute__((always_inline));
136 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
137 struct thread_queue *list IF_SWCL(, const bool nolock))
138 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
140 IF_SWCL(static inline) void _block_thread(
141 struct thread_queue *list IF_SWCL(, const bool nolock))
142 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
144 static void add_to_list_tmo(struct thread_entry *thread)
145 __attribute__((noinline));
147 static void core_schedule_wakeup(struct thread_entry *thread)
148 __attribute__((noinline));
150 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
151 __attribute__((always_inline));
153 static inline void run_blocking_ops(
154 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
155 __attribute__((always_inline));
157 static void thread_stkov(struct thread_entry *thread)
158 __attribute__((noinline));
160 static inline void store_context(void* addr)
161 __attribute__((always_inline));
163 static inline void load_context(const void* addr)
164 __attribute__((always_inline));
166 void switch_thread(struct thread_entry *old)
167 __attribute__((noinline));
170 /****************************************************************************
171 * Processor-specific section
174 #if defined(CPU_ARM)
175 /*---------------------------------------------------------------------------
176 * Start the thread running and terminate it if it returns
177 *---------------------------------------------------------------------------
179 static void start_thread(void) __attribute__((naked,used));
180 static void start_thread(void)
182 /* r0 = context */
183 asm volatile (
184 "ldr sp, [r0, #32] \n" /* Load initial sp */
185 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
186 "mov r1, #0 \n" /* Mark thread as running */
187 "str r1, [r0, #40] \n"
188 #if NUM_CORES > 1
189 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
190 "mov lr, pc \n" /* This could be the first entry into */
191 "bx r0 \n" /* plugin or codec code for this core. */
192 #endif
193 "mov lr, pc \n" /* Call thread function */
194 "bx r4 \n"
195 "mov r0, #0 \n" /* remove_thread(NULL) */
196 "ldr pc, =remove_thread \n"
197 ".ltorg \n" /* Dump constant pool */
198 ); /* No clobber list - new thread doesn't care */
201 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
202 * slot, and thread function pointer in context.start. See load_context for
203 * what happens when thread is initially going to run. */
204 #define THREAD_STARTUP_INIT(core, thread, function) \
205 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
206 (thread)->context.r[1] = (unsigned int)start_thread, \
207 (thread)->context.start = (void *)function; })
209 /*---------------------------------------------------------------------------
210 * Store non-volatile context.
211 *---------------------------------------------------------------------------
213 static inline void store_context(void* addr)
215 asm volatile(
216 "stmia %0, { r4-r11, sp, lr } \n"
217 : : "r" (addr)
221 /*---------------------------------------------------------------------------
222 * Load non-volatile context.
223 *---------------------------------------------------------------------------
225 static inline void load_context(const void* addr)
227 asm volatile(
228 "ldr r0, [%0, #40] \n" /* Load start pointer */
229 "cmp r0, #0 \n" /* Check for NULL */
230 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
231 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
232 : : "r" (addr) : "r0" /* only! */
236 #if defined (CPU_PP)
238 #if NUM_CORES > 1
239 extern int cpu_idlestackbegin[];
240 extern int cpu_idlestackend[];
241 extern int cop_idlestackbegin[];
242 extern int cop_idlestackend[];
243 static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
245 [CPU] = cpu_idlestackbegin,
246 [COP] = cop_idlestackbegin
249 #if CONFIG_CPU == PP5002
250 /* Bytes to emulate the PP502x mailbox bits */
251 struct core_semaphores
253 volatile uint8_t intend_wake; /* 00h */
254 volatile uint8_t stay_awake; /* 01h */
255 volatile uint8_t intend_sleep; /* 02h */
256 volatile uint8_t unused; /* 03h */
259 static struct core_semaphores core_semaphores[NUM_CORES] NOCACHEBSS_ATTR;
260 #endif
262 #endif /* NUM_CORES */
264 #if CONFIG_CORELOCK == SW_CORELOCK
265 /* Software core locks using Peterson's mutual exclusion algorithm */
267 /*---------------------------------------------------------------------------
268 * Initialize the corelock structure.
269 *---------------------------------------------------------------------------
271 void corelock_init(struct corelock *cl)
273 memset(cl, 0, sizeof (*cl));
276 #if 1 /* Assembly locks to minimize overhead */
277 /*---------------------------------------------------------------------------
278 * Wait for the corelock to become free and acquire it when it does.
279 *---------------------------------------------------------------------------
281 void corelock_lock(struct corelock *cl) __attribute__((naked));
282 void corelock_lock(struct corelock *cl)
284 asm volatile (
285 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
286 "ldrb r1, [r1] \n"
287 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
288 "and r2, r1, #1 \n" /* r2 = othercore */
289 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
290 "1: \n"
291 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
292 "cmp r3, #0 \n"
293 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core ? */
294 "cmpne r3, r1, lsr #7 \n"
295 "bxeq lr \n" /* yes? lock acquired */
296 "b 1b \n" /* keep trying */
297 : : "i"(&PROCESSOR_ID)
299 (void)cl;
302 /*---------------------------------------------------------------------------
303 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
304 *---------------------------------------------------------------------------
306 int corelock_try_lock(struct corelock *cl) __attribute__((naked));
307 int corelock_try_lock(struct corelock *cl)
309 asm volatile (
310 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
311 "ldrb r1, [r1] \n"
312 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
313 "and r2, r1, #1 \n" /* r2 = othercore */
314 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
315 "1: \n"
316 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
317 "cmp r3, #0 \n"
318 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core? */
319 "cmpne r3, r1, lsr #7 \n"
320 "moveq r0, #1 \n" /* yes? lock acquired */
321 "bxeq lr \n"
322 "mov r2, #0 \n" /* cl->myl[core] = 0 */
323 "strb r2, [r0, r1, lsr #7] \n"
324 "mov r0, r2 \n"
325 "bx lr \n" /* acquisition failed */
326 : : "i"(&PROCESSOR_ID)
329 return 0;
330 (void)cl;
333 /*---------------------------------------------------------------------------
334 * Release ownership of the corelock
335 *---------------------------------------------------------------------------
337 void corelock_unlock(struct corelock *cl) __attribute__((naked));
338 void corelock_unlock(struct corelock *cl)
340 asm volatile (
341 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
342 "ldrb r1, [r1] \n"
343 "mov r2, #0 \n" /* cl->myl[core] = 0 */
344 "strb r2, [r0, r1, lsr #7] \n"
345 "bx lr \n"
346 : : "i"(&PROCESSOR_ID)
348 (void)cl;
350 #else /* C versions for reference */
351 /*---------------------------------------------------------------------------
352 * Wait for the corelock to become free and aquire it when it does.
353 *---------------------------------------------------------------------------
355 void corelock_lock(struct corelock *cl)
357 const unsigned int core = CURRENT_CORE;
358 const unsigned int othercore = 1 - core;
360 cl->myl[core] = core;
361 cl->turn = othercore;
363 for (;;)
365 if (cl->myl[othercore] == 0 || cl->turn == core)
366 break;
370 /*---------------------------------------------------------------------------
371 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
372 *---------------------------------------------------------------------------
374 int corelock_try_lock(struct corelock *cl)
376 const unsigned int core = CURRENT_CORE;
377 const unsigned int othercore = 1 - core;
379 cl->myl[core] = core;
380 cl->turn = othercore;
382 if (cl->myl[othercore] == 0 || cl->turn == core)
384 return 1;
387 cl->myl[core] = 0;
388 return 0;
391 /*---------------------------------------------------------------------------
392 * Release ownership of the corelock
393 *---------------------------------------------------------------------------
395 void corelock_unlock(struct corelock *cl)
397 cl->myl[CURRENT_CORE] = 0;
399 #endif /* ASM / C selection */
401 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
403 /*---------------------------------------------------------------------------
404 * Put core in a power-saving state if waking list wasn't repopulated and if
405 * no other core requested a wakeup for it to perform a task.
406 *---------------------------------------------------------------------------
408 #if NUM_CORES == 1
409 /* Shared single-core build debugging version */
410 static inline void core_sleep(struct thread_entry **waking)
412 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
413 if (*waking == NULL)
415 PROC_CTL(CURRENT_CORE) = PROC_SLEEP;
416 nop; nop; nop;
418 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
420 #elif defined (CPU_PP502x)
421 static inline void core_sleep(unsigned int core,
422 struct thread_entry **waking)
424 #if 1
425 /* Disabling IRQ and FIQ is important to making the fixed-time sequence
426 * non-interruptable */
427 asm volatile (
428 "mrs r2, cpsr \n" /* Disable IRQ, FIQ */
429 "orr r2, r2, #0xc0 \n"
430 "msr cpsr_c, r2 \n"
431 "mov r0, #4 \n" /* r0 = 0x4 << core */
432 "mov r0, r0, lsl %[c] \n"
433 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
434 "ldr r1, [%[waking]] \n" /* *waking == NULL ? */
435 "cmp r1, #0 \n"
436 "ldreq r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
437 "tsteq r1, r0, lsl #2 \n"
438 "moveq r1, #0x80000000 \n" /* Then sleep */
439 "streq r1, [%[ctl], %[c], lsl #2] \n"
440 "moveq r1, #0 \n" /* Clear control reg */
441 "streq r1, [%[ctl], %[c], lsl #2] \n"
442 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
443 "str r1, [%[mbx], #8] \n"
444 "1: \n" /* Wait for wake procedure to finish */
445 "ldr r1, [%[mbx], #0] \n"
446 "tst r1, r0, lsr #2 \n"
447 "bne 1b \n"
448 "bic r2, r2, #0xc0 \n" /* Enable interrupts */
449 "msr cpsr_c, r2 \n"
451 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
452 [waking]"r"(waking), [c]"r"(core)
453 : "r0", "r1", "r2");
454 #else /* C version for reference */
455 /* Disable IRQ, FIQ */
456 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
458 /* Signal intent to sleep */
459 MBX_MSG_SET = 0x4 << core;
461 /* Something waking or other processor intends to wake us? */
462 if (*waking == NULL && (MBX_MSG_STAT & (0x10 << core)) == 0)
464 PROC_CTL(core) = PROC_SLEEP; nop; /* Snooze */
465 PROC_CTL(core) = 0; /* Clear control reg */
468 /* Signal wake - clear wake flag */
469 MBX_MSG_CLR = 0x14 << core;
471 /* Wait for other processor to finish wake procedure */
472 while (MBX_MSG_STAT & (0x1 << core));
474 /* Enable IRQ, FIQ */
475 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
476 #endif /* ASM/C selection */
478 #elif CONFIG_CPU == PP5002
479 /* PP5002 has no mailboxes - emulate using bytes */
480 static inline void core_sleep(unsigned int core,
481 struct thread_entry **waking)
483 #if 1
484 asm volatile (
485 "mrs r1, cpsr \n" /* Disable IRQ, FIQ */
486 "orr r1, r1, #0xc0 \n"
487 "msr cpsr_c, r1 \n"
488 "mov r0, #1 \n" /* Signal intent to sleep */
489 "strb r0, [%[sem], #2] \n"
490 "ldr r0, [%[waking]] \n" /* *waking == NULL? */
491 "cmp r0, #0 \n"
492 "ldreqb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
493 "cmpeq r0, #0 \n"
494 "moveq r0, #0xca \n" /* Then sleep */
495 "streqb r0, [%[ctl], %[c], lsl #2] \n"
496 "nop \n" /* nop's needed because of pipeline */
497 "nop \n"
498 "nop \n"
499 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
500 "strb r0, [%[sem], #1] \n"
501 "strb r0, [%[sem], #2] \n"
502 "1: \n" /* Wait for wake procedure to finish */
503 "ldrb r0, [%[sem], #0] \n"
504 "cmp r0, #0 \n"
505 "bne 1b \n"
506 "bic r1, r1, #0xc0 \n" /* Enable interrupts */
507 "msr cpsr_c, r1 \n"
509 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
510 [waking]"r"(waking), [ctl]"r"(&PROC_CTL(CPU))
511 : "r0", "r1"
513 #else /* C version for reference */
514 /* Disable IRQ, FIQ */
515 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
517 /* Signal intent to sleep */
518 core_semaphores[core].intend_sleep = 1;
520 /* Something waking or other processor intends to wake us? */
521 if (*waking == NULL && core_semaphores[core].stay_awake == 0)
523 PROC_CTL(core) = PROC_SLEEP; /* Snooze */
524 nop; nop; nop;
527 /* Signal wake - clear wake flag */
528 core_semaphores[core].stay_awake = 0;
529 core_semaphores[core].intend_sleep = 0;
531 /* Wait for other processor to finish wake procedure */
532 while (core_semaphores[core].intend_wake != 0);
534 /* Enable IRQ, FIQ */
535 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
536 #endif /* ASM/C selection */
538 #endif /* CPU type */
540 /*---------------------------------------------------------------------------
541 * Wake another processor core that is sleeping or prevent it from doing so
542 * if it was already destined. FIQ, IRQ should be disabled before calling.
543 *---------------------------------------------------------------------------
545 #if NUM_CORES == 1
546 /* Shared single-core build debugging version */
547 void core_wake(void)
549 /* No wakey - core already wakey */
551 #elif defined (CPU_PP502x)
552 void core_wake(unsigned int othercore)
554 #if 1
555 /* avoid r0 since that contains othercore */
556 asm volatile (
557 "mrs r3, cpsr \n" /* Disable IRQ */
558 "orr r1, r3, #0x80 \n"
559 "msr cpsr_c, r1 \n"
560 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
561 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
562 "str r2, [%[mbx], #4] \n"
563 "1: \n" /* If it intends to sleep, let it first */
564 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
565 "eor r1, r1, #0xc \n"
566 "tst r1, r2, lsr #2 \n"
567 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
568 "tsteq r1, #0x80000000 \n"
569 "beq 1b \n" /* Wait for sleep or wake */
570 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
571 "movne r1, #0x0 \n"
572 "strne r1, [%[ctl], %[oc], lsl #2] \n"
573 "mov r1, r2, lsr #4 \n"
574 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
575 "msr cpsr_c, r3 \n" /* Restore int status */
577 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
578 [oc]"r"(othercore)
579 : "r1", "r2", "r3");
580 #else /* C version for reference */
581 /* Disable interrupts - avoid reentrancy from the tick */
582 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
584 /* Signal intent to wake other processor - set stay awake */
585 MBX_MSG_SET = 0x11 << othercore;
587 /* If it intends to sleep, wait until it does or aborts */
588 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
589 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
591 /* If sleeping, wake it up */
592 if (PROC_CTL(othercore) & PROC_SLEEP)
593 PROC_CTL(othercore) = 0;
595 /* Done with wake procedure */
596 MBX_MSG_CLR = 0x1 << othercore;
597 set_irq_level(oldlevel);
598 #endif /* ASM/C selection */
600 #elif CONFIG_CPU == PP5002
601 /* PP5002 has no mailboxes - emulate using bytes */
602 void core_wake(unsigned int othercore)
604 #if 1
605 /* avoid r0 since that contains othercore */
606 asm volatile (
607 "mrs r3, cpsr \n" /* Disable IRQ */
608 "orr r1, r3, #0x80 \n"
609 "msr cpsr_c, r1 \n"
610 "mov r1, #1 \n" /* Signal intent to wake other core */
611 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
612 "strh r1, [%[sem], #0] \n"
613 "mov r2, #0x8000 \n"
614 "1: \n" /* If it intends to sleep, let it first */
615 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
616 "cmp r1, #1 \n"
617 "ldr r1, [%[st]] \n" /* && not sleeping ? */
618 "tsteq r1, r2, lsr %[oc] \n"
619 "beq 1b \n" /* Wait for sleep or wake */
620 "tst r1, r2, lsr %[oc] \n"
621 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
622 "movne r1, #0xce \n"
623 "strneb r1, [r2, %[oc], lsl #2] \n"
624 "mov r1, #0 \n" /* Done with wake procedure */
625 "strb r1, [%[sem], #0] \n"
626 "msr cpsr_c, r3 \n" /* Restore int status */
628 : [sem]"r"(&core_semaphores[othercore]),
629 [st]"r"(&PROC_STAT),
630 [oc]"r"(othercore)
631 : "r1", "r2", "r3"
633 #else /* C version for reference */
634 /* Disable interrupts - avoid reentrancy from the tick */
635 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
637 /* Signal intent to wake other processor - set stay awake */
638 core_semaphores[othercore].intend_wake = 1;
639 core_semaphores[othercore].stay_awake = 1;
641 /* If it intends to sleep, wait until it does or aborts */
642 while (core_semaphores[othercore].intend_sleep != 0 &&
643 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
645 /* If sleeping, wake it up */
646 if (PROC_STAT & PROC_SLEEPING(othercore))
647 PROC_CTL(othercore) = PROC_WAKE;
649 /* Done with wake procedure */
650 core_semaphores[othercore].intend_wake = 0;
651 set_irq_level(oldlevel);
652 #endif /* ASM/C selection */
654 #endif /* CPU type */
656 #if NUM_CORES > 1
657 /*---------------------------------------------------------------------------
658 * Switches to a stack that always resides in the Rockbox core.
660 * Needed when a thread suicides on a core other than the main CPU since the
661 * stack used when idling is the stack of the last thread to run. This stack
662 * may not reside in the core in which case the core will continue to use a
663 * stack from an unloaded module until another thread runs on it.
664 *---------------------------------------------------------------------------
666 static inline void switch_to_idle_stack(const unsigned int core)
668 asm volatile (
669 "str sp, [%0] \n" /* save original stack pointer on idle stack */
670 "mov sp, %0 \n" /* switch stacks */
671 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
672 (void)core;
675 /*---------------------------------------------------------------------------
676 * Perform core switch steps that need to take place inside switch_thread.
678 * These steps must take place while before changing the processor and after
679 * having entered switch_thread since switch_thread may not do a normal return
680 * because the stack being used for anything the compiler saved will not belong
681 * to the thread's destination core and it may have been recycled for other
682 * purposes by the time a normal context load has taken place. switch_thread
683 * will also clobber anything stashed in the thread's context or stored in the
684 * nonvolatile registers if it is saved there before the call since the
685 * compiler's order of operations cannot be known for certain.
687 static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
689 /* Flush our data to ram */
690 flush_icache();
691 /* Stash thread in r4 slot */
692 thread->context.r[0] = (unsigned int)thread;
693 /* Stash restart address in r5 slot */
694 thread->context.r[1] = (unsigned int)thread->context.start;
695 /* Save sp in context.sp while still running on old core */
696 thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1];
699 /*---------------------------------------------------------------------------
700 * Machine-specific helper function for switching the processor a thread is
701 * running on. Basically, the thread suicides on the departing core and is
702 * reborn on the destination. Were it not for gcc's ill-behavior regarding
703 * naked functions written in C where it actually clobbers non-volatile
704 * registers before the intended prologue code, this would all be much
705 * simpler. Generic setup is done in switch_core itself.
708 /*---------------------------------------------------------------------------
709 * This actually performs the core switch.
711 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
712 __attribute__((naked));
713 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
715 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
716 * Stack access also isn't permitted until restoring the original stack and
717 * context. */
718 asm volatile (
719 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
720 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
721 "ldr r2, [r2, r0, lsl #2] \n"
722 "add r2, r2, %0*4 \n"
723 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
724 "mov sp, r2 \n" /* switch stacks */
725 "adr r2, 1f \n" /* r2 = new core restart address */
726 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
727 "mov r0, r1 \n" /* switch_thread(thread) */
728 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
729 "1: \n"
730 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
731 "mov r1, #0 \n" /* Clear start address */
732 "str r1, [r0, #40] \n"
733 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
734 "mov lr, pc \n"
735 "bx r0 \n"
736 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
737 ".ltorg \n" /* Dump constant pool */
738 : : "i"(IDLE_STACK_WORDS)
740 (void)core; (void)thread;
742 #endif /* NUM_CORES */
744 #elif CONFIG_CPU == S3C2440
746 /*---------------------------------------------------------------------------
747 * Put core in a power-saving state if waking list wasn't repopulated.
748 *---------------------------------------------------------------------------
750 static inline void core_sleep(struct thread_entry **waking)
752 /* FIQ also changes the CLKCON register so FIQ must be disabled
753 when changing it here */
754 asm volatile (
755 "mrs r0, cpsr \n" /* Disable IRQ, FIQ */
756 "orr r0, r0, #0xc0 \n"
757 "msr cpsr_c, r0 \n"
758 "ldr r1, [%0] \n" /* Check *waking */
759 "cmp r1, #0 \n"
760 "bne 2f \n" /* != NULL -> exit */
761 "bic r0, r0, #0xc0 \n" /* Prepare IRQ, FIQ enable */
762 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
763 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
764 "orr r2, r2, #4 \n"
765 "str r2, [r1, #0xc] \n"
766 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
767 "mov r3, #0 \n" /* wait for IDLE */
768 "1: \n"
769 "add r3, r3, #1 \n"
770 "cmp r3, #10 \n"
771 "bne 1b \n"
772 "orr r0, r0, #0xc0 \n" /* Disable IRQ, FIQ */
773 "msr cpsr_c, r0 \n"
774 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
775 "bic r2, r2, #4 \n"
776 "str r2, [r1, #0xc] \n"
777 "2: \n"
778 "bic r0, r0, #0xc0 \n" /* Enable IRQ, FIQ */
779 "msr cpsr_c, r0 \n"
780 : : "r"(waking) : "r0", "r1", "r2", "r3");
782 #elif defined(CPU_TCC77X)
783 static inline void core_sleep(struct thread_entry **waking)
785 #warning TODO: Implement core_sleep
787 #else
788 static inline void core_sleep(struct thread_entry **waking)
790 (void) waking;
791 #warning core_sleep not implemented, battery life will be decreased
793 #endif /* CONFIG_CPU == */
795 #elif defined(CPU_COLDFIRE)
796 /*---------------------------------------------------------------------------
797 * Start the thread running and terminate it if it returns
798 *---------------------------------------------------------------------------
800 void start_thread(void); /* Provide C access to ASM label */
801 static void __start_thread(void) __attribute__((used));
802 static void __start_thread(void)
804 /* a0=macsr, a1=context */
805 asm volatile (
806 "start_thread: \n" /* Start here - no naked attribute */
807 "move.l %a0, %macsr \n" /* Set initial mac status reg */
808 "lea.l 48(%a1), %a1 \n"
809 "move.l (%a1)+, %sp \n" /* Set initial stack */
810 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
811 "clr.l (%a1) \n" /* Mark thread running */
812 "jsr (%a2) \n" /* Call thread function */
813 "clr.l -(%sp) \n" /* remove_thread(NULL) */
814 "jsr remove_thread \n"
818 /* Set EMAC unit to fractional mode with saturation for each new thread,
819 * since that's what'll be the most useful for most things which the dsp
820 * will do. Codecs should still initialize their preferred modes
821 * explicitly. Context pointer is placed in d2 slot and start_thread
822 * pointer in d3 slot. thread function pointer is placed in context.start.
823 * See load_context for what happens when thread is initially going to
824 * run.
826 #define THREAD_STARTUP_INIT(core, thread, function) \
827 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
828 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
829 (thread)->context.d[1] = (unsigned int)start_thread, \
830 (thread)->context.start = (void *)(function); })
832 /*---------------------------------------------------------------------------
833 * Store non-volatile context.
834 *---------------------------------------------------------------------------
836 static inline void store_context(void* addr)
838 asm volatile (
839 "move.l %%macsr,%%d0 \n"
840 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
841 : : "a" (addr) : "d0" /* only! */
845 /*---------------------------------------------------------------------------
846 * Load non-volatile context.
847 *---------------------------------------------------------------------------
849 static inline void load_context(const void* addr)
851 asm volatile (
852 "move.l 52(%0), %%d0 \n" /* Get start address */
853 "beq.b 1f \n" /* NULL -> already running */
854 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
855 "jmp (%%a2) \n" /* Start the thread */
856 "1: \n"
857 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
858 "move.l %%d0, %%macsr \n"
859 : : "a" (addr) : "d0" /* only! */
863 /*---------------------------------------------------------------------------
864 * Put core in a power-saving state if waking list wasn't repopulated.
865 *---------------------------------------------------------------------------
867 static inline void core_sleep(struct thread_entry **waking)
869 asm volatile (
870 "moveq.l %1, %%d0 \n" /* Disable interrupts (not audio DMA) */
871 "lsl.l #8, %%d0 \n"
872 "move.w %%d0, %%sr \n"
873 "tst.l (%0) \n" /* Check *waking */
874 "beq.b 1f \n" /* != NULL -> exit */
875 "moveq.l #0x20, %%d0 \n" /* Enable interrupts */
876 "lsl.l #8, %%d0 \n"
877 "move.w %%d0, %%sr \n"
878 ".word 0x51fb \n" /* tpf.l - eat stop instruction */
879 "1: \n"
880 "stop #0x2000 \n" /* Supervisor mode, interrupts enabled
881 upon wakeup */
882 : : "a"(waking), "i"((0x2000 | HIGHEST_IRQ_LEVEL) >> 8) : "d0"
886 #elif CONFIG_CPU == SH7034
887 /*---------------------------------------------------------------------------
888 * Start the thread running and terminate it if it returns
889 *---------------------------------------------------------------------------
891 void start_thread(void); /* Provide C access to ASM label */
892 static void __start_thread(void) __attribute__((used));
893 static void __start_thread(void)
895 /* r8 = context */
896 asm volatile (
897 "_start_thread: \n" /* Start here - no naked attribute */
898 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
899 "mov.l @(28, r8), r15 \n" /* Set initial sp */
900 "mov #0, r1 \n" /* Start the thread */
901 "jsr @r0 \n"
902 "mov.l r1, @(36, r8) \n" /* Clear start address */
903 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
904 "jmp @r0 \n"
905 "mov #0, r4 \n"
906 "1: \n"
907 ".long _remove_thread \n"
911 /* Place context pointer in r8 slot, function pointer in r9 slot, and
912 * start_thread pointer in context_start */
913 #define THREAD_STARTUP_INIT(core, thread, function) \
914 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
915 (thread)->context.r[1] = (unsigned int)(function), \
916 (thread)->context.start = (void*)start_thread; })
918 /*---------------------------------------------------------------------------
919 * Store non-volatile context.
920 *---------------------------------------------------------------------------
922 static inline void store_context(void* addr)
924 asm volatile (
925 "add #36, %0 \n" /* Start at last reg. By the time routine */
926 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
927 "mov.l r15,@-%0 \n"
928 "mov.l r14,@-%0 \n"
929 "mov.l r13,@-%0 \n"
930 "mov.l r12,@-%0 \n"
931 "mov.l r11,@-%0 \n"
932 "mov.l r10,@-%0 \n"
933 "mov.l r9, @-%0 \n"
934 "mov.l r8, @-%0 \n"
935 : : "r" (addr)
939 /*---------------------------------------------------------------------------
940 * Load non-volatile context.
941 *---------------------------------------------------------------------------
943 static inline void load_context(const void* addr)
945 asm volatile (
946 "mov.l @(36, %0), r0 \n" /* Get start address */
947 "tst r0, r0 \n"
948 "bt .running \n" /* NULL -> already running */
949 "jmp @r0 \n" /* r8 = context */
950 ".running: \n"
951 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
952 "mov.l @%0+, r9 \n"
953 "mov.l @%0+, r10 \n"
954 "mov.l @%0+, r11 \n"
955 "mov.l @%0+, r12 \n"
956 "mov.l @%0+, r13 \n"
957 "mov.l @%0+, r14 \n"
958 "mov.l @%0+, r15 \n"
959 "lds.l @%0+, pr \n"
960 : : "r" (addr) : "r0" /* only! */
964 /*---------------------------------------------------------------------------
965 * Put core in a power-saving state if waking list wasn't repopulated.
966 *---------------------------------------------------------------------------
968 static inline void core_sleep(struct thread_entry **waking)
970 asm volatile (
971 "mov %2, r1 \n" /* Disable interrupts */
972 "ldc r1, sr \n"
973 "mov.l @%1, r1 \n" /* Check *waking */
974 "tst r1, r1 \n"
975 "bf 1f \n" /* *waking != NULL ? exit */
976 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
977 "mov #0, r1 \n" /* Enable interrupts */
978 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
979 "bra 2f \n" /* bra and sleep are executed at once */
980 "sleep \n" /* Execute standby */
981 "1: \n"
982 "mov #0, r1 \n" /* Enable interrupts */
983 "ldc r1, sr \n"
984 "2: \n"
986 : "z"(&SBYCR-GBR), "r"(waking), "i"(HIGHEST_IRQ_LEVEL)
987 : "r1");
990 #endif /* CONFIG_CPU == */
993 * End Processor-specific section
994 ***************************************************************************/
996 #if THREAD_EXTRA_CHECKS
997 static void thread_panicf(const char *msg, struct thread_entry *thread)
999 #if NUM_CORES > 1
1000 const unsigned int core = thread->core;
1001 #endif
1002 static char name[32];
1003 thread_get_name(name, 32, thread);
1004 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
1006 static void thread_stkov(struct thread_entry *thread)
1008 thread_panicf("Stkov", thread);
1010 #define THREAD_PANICF(msg, thread) \
1011 thread_panicf(msg, thread)
1012 #define THREAD_ASSERT(exp, msg, thread) \
1013 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
1014 #else
1015 static void thread_stkov(struct thread_entry *thread)
1017 #if NUM_CORES > 1
1018 const unsigned int core = thread->core;
1019 #endif
1020 static char name[32];
1021 thread_get_name(name, 32, thread);
1022 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
1024 #define THREAD_PANICF(msg, thread)
1025 #define THREAD_ASSERT(exp, msg, thread)
1026 #endif /* THREAD_EXTRA_CHECKS */
1028 /*---------------------------------------------------------------------------
1029 * Lock a list pointer and returns its value
1030 *---------------------------------------------------------------------------
1032 #if CONFIG_CORELOCK == SW_CORELOCK
1033 /* Separate locking function versions */
1035 /* Thread locking */
1036 #define GET_THREAD_STATE(thread) \
1037 ({ corelock_lock(&(thread)->cl); (thread)->state; })
1038 #define TRY_GET_THREAD_STATE(thread) \
1039 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
1040 #define UNLOCK_THREAD(thread, state) \
1041 ({ corelock_unlock(&(thread)->cl); })
1042 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1043 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
1045 /* List locking */
1046 #define LOCK_LIST(tqp) \
1047 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
1048 #define UNLOCK_LIST(tqp, mod) \
1049 ({ corelock_unlock(&(tqp)->cl); })
1050 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1051 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
1053 /* Select the queue pointer directly */
1054 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1055 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1056 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1057 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1059 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1060 /* Native swap/exchange versions */
1062 /* Thread locking */
1063 #define GET_THREAD_STATE(thread) \
1064 ({ unsigned _s; \
1065 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
1066 _s; })
1067 #define TRY_GET_THREAD_STATE(thread) \
1068 ({ xchg8(&(thread)->state, STATE_BUSY); })
1069 #define UNLOCK_THREAD(thread, _state) \
1070 ({ (thread)->state = (_state); })
1071 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1072 ({ (thread)->state = (_state); })
1074 /* List locking */
1075 #define LOCK_LIST(tqp) \
1076 ({ struct thread_entry *_l; \
1077 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
1078 _l; })
1079 #define UNLOCK_LIST(tqp, mod) \
1080 ({ (tqp)->queue = (mod); })
1081 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1082 ({ (tqp)->queue = (mod); })
1084 /* Select the local queue pointer copy returned from LOCK_LIST */
1085 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1086 ({ add_to_list_l(&(tc), (thread)); })
1087 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1088 ({ remove_from_list_l(&(tc), (thread)); })
1090 #else
1091 /* Single-core/non-locked versions */
1093 /* Threads */
1094 #define GET_THREAD_STATE(thread) \
1095 ({ (thread)->state; })
1096 #define UNLOCK_THREAD(thread, _state)
1097 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1098 ({ (thread)->state = (_state); })
1100 /* Lists */
1101 #define LOCK_LIST(tqp) \
1102 ({ (tqp)->queue; })
1103 #define UNLOCK_LIST(tqp, mod)
1104 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1105 ({ (tqp)->queue = (mod); })
1107 /* Select the queue pointer directly */
1108 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1109 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1110 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1111 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1113 #endif /* locking selection */
1115 #if THREAD_EXTRA_CHECKS
1116 /*---------------------------------------------------------------------------
1117 * Lock the thread slot to obtain the state and then unlock it. Waits for
1118 * it not to be busy. Used for debugging.
1119 *---------------------------------------------------------------------------
1121 static unsigned peek_thread_state(struct thread_entry *thread)
1123 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1124 unsigned state = GET_THREAD_STATE(thread);
1125 UNLOCK_THREAD(thread, state);
1126 set_irq_level(oldlevel);
1127 return state;
1129 #endif /* THREAD_EXTRA_CHECKS */
1131 /*---------------------------------------------------------------------------
1132 * Adds a thread to a list of threads using "intert last". Uses the "l"
1133 * links.
1134 *---------------------------------------------------------------------------
1136 static void add_to_list_l(struct thread_entry **list,
1137 struct thread_entry *thread)
1139 struct thread_entry *l = *list;
1141 if (l == NULL)
1143 /* Insert into unoccupied list */
1144 thread->l.next = thread;
1145 thread->l.prev = thread;
1146 *list = thread;
1147 return;
1150 /* Insert last */
1151 thread->l.next = l;
1152 thread->l.prev = l->l.prev;
1153 thread->l.prev->l.next = thread;
1154 l->l.prev = thread;
1156 /* Insert next
1157 thread->l.next = l->l.next;
1158 thread->l.prev = l;
1159 thread->l.next->l.prev = thread;
1160 l->l.next = thread;
1164 /*---------------------------------------------------------------------------
1165 * Locks a list, adds the thread entry and unlocks the list on multicore.
1166 * Defined as add_to_list_l on single-core.
1167 *---------------------------------------------------------------------------
1169 #if NUM_CORES > 1
1170 static void add_to_list_l_locked(struct thread_queue *tq,
1171 struct thread_entry *thread)
1173 struct thread_entry *t = LOCK_LIST(tq);
1174 ADD_TO_LIST_L_SELECT(t, tq, thread);
1175 UNLOCK_LIST(tq, t);
1176 (void)t;
1178 #else
1179 #define add_to_list_l_locked(tq, thread) \
1180 add_to_list_l(&(tq)->queue, (thread))
1181 #endif
1183 /*---------------------------------------------------------------------------
1184 * Removes a thread from a list of threads. Uses the "l" links.
1185 *---------------------------------------------------------------------------
1187 static void remove_from_list_l(struct thread_entry **list,
1188 struct thread_entry *thread)
1190 struct thread_entry *prev, *next;
1192 next = thread->l.next;
1194 if (thread == next)
1196 /* The only item */
1197 *list = NULL;
1198 return;
1201 if (thread == *list)
1203 /* List becomes next item */
1204 *list = next;
1207 prev = thread->l.prev;
1209 /* Fix links to jump over the removed entry. */
1210 prev->l.next = next;
1211 next->l.prev = prev;
1214 /*---------------------------------------------------------------------------
1215 * Locks a list, removes the thread entry and unlocks the list on multicore.
1216 * Defined as remove_from_list_l on single-core.
1217 *---------------------------------------------------------------------------
1219 #if NUM_CORES > 1
1220 static void remove_from_list_l_locked(struct thread_queue *tq,
1221 struct thread_entry *thread)
1223 struct thread_entry *t = LOCK_LIST(tq);
1224 REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
1225 UNLOCK_LIST(tq, t);
1226 (void)t;
1228 #else
1229 #define remove_from_list_l_locked(tq, thread) \
1230 remove_from_list_l(&(tq)->queue, (thread))
1231 #endif
1233 /*---------------------------------------------------------------------------
1234 * Add a thread from the core's timout list by linking the pointers in its
1235 * tmo structure.
1236 *---------------------------------------------------------------------------
1238 static void add_to_list_tmo(struct thread_entry *thread)
1240 /* Insert first */
1241 struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout;
1243 thread->tmo.prev = thread;
1244 thread->tmo.next = t;
1246 if (t != NULL)
1248 /* Fix second item's prev pointer to point to this thread */
1249 t->tmo.prev = thread;
1252 cores[IF_COP_CORE(thread->core)].timeout = thread;
1255 /*---------------------------------------------------------------------------
1256 * Remove a thread from the core's timout list by unlinking the pointers in
1257 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1258 * is cancelled.
1259 *---------------------------------------------------------------------------
1261 static void remove_from_list_tmo(struct thread_entry *thread)
1263 struct thread_entry *next = thread->tmo.next;
1264 struct thread_entry *prev;
1266 if (thread == cores[IF_COP_CORE(thread->core)].timeout)
1268 /* Next item becomes list head */
1269 cores[IF_COP_CORE(thread->core)].timeout = next;
1271 if (next != NULL)
1273 /* Fix new list head's prev to point to itself. */
1274 next->tmo.prev = next;
1277 thread->tmo.prev = NULL;
1278 return;
1281 prev = thread->tmo.prev;
1283 if (next != NULL)
1285 next->tmo.prev = prev;
1288 prev->tmo.next = next;
1289 thread->tmo.prev = NULL;
1292 /*---------------------------------------------------------------------------
1293 * Schedules a thread wakeup on the specified core. Threads will be made
1294 * ready to run when the next task switch occurs. Note that this does not
1295 * introduce an on-core delay since the soonest the next thread may run is
1296 * no sooner than that. Other cores and on-core interrupts may only ever
1297 * add to the list.
1298 *---------------------------------------------------------------------------
1300 static void core_schedule_wakeup(struct thread_entry *thread)
1302 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1303 const unsigned int core = IF_COP_CORE(thread->core);
1304 add_to_list_l_locked(&cores[core].waking, thread);
1305 #if NUM_CORES > 1
1306 if (core != CURRENT_CORE)
1308 core_wake(core);
1310 #endif
1311 set_irq_level(oldlevel);
1314 /*---------------------------------------------------------------------------
1315 * If the waking list was populated, move all threads on it onto the running
1316 * list so they may be run ASAP.
1317 *---------------------------------------------------------------------------
1319 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1321 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1322 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
1323 struct thread_entry *r = cores[IF_COP_CORE(core)].running;
1325 /* Tranfer all threads on waking list to running list in one
1326 swoop */
1327 if (r != NULL)
1329 /* Place waking threads at the end of the running list. */
1330 struct thread_entry *tmp;
1331 w->l.prev->l.next = r;
1332 r->l.prev->l.next = w;
1333 tmp = r->l.prev;
1334 r->l.prev = w->l.prev;
1335 w->l.prev = tmp;
1337 else
1339 /* Just transfer the list as-is */
1340 cores[IF_COP_CORE(core)].running = w;
1342 /* Just leave any timeout threads on the timeout list. If a timeout check
1343 * is due, they will be removed there. If they do a timeout again before
1344 * being removed, they will just stay on the list with a new expiration
1345 * tick. */
1347 /* Waking list is clear - NULL and unlock it */
1348 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
1349 set_irq_level(oldlevel);
1352 /*---------------------------------------------------------------------------
1353 * Check the core's timeout list when at least one thread is due to wake.
1354 * Filtering for the condition is done before making the call. Resets the
1355 * tick when the next check will occur.
1356 *---------------------------------------------------------------------------
1358 static void check_tmo_threads(void)
1360 const unsigned int core = CURRENT_CORE;
1361 const long tick = current_tick; /* snapshot the current tick */
1362 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1363 struct thread_entry *next = cores[core].timeout;
1365 /* If there are no processes waiting for a timeout, just keep the check
1366 tick from falling into the past. */
1367 if (next != NULL)
1369 /* Check sleeping threads. */
1370 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1374 /* Must make sure noone else is examining the state, wait until
1375 slot is no longer busy */
1376 struct thread_entry *curr = next;
1377 next = curr->tmo.next;
1379 unsigned state = GET_THREAD_STATE(curr);
1381 if (state < TIMEOUT_STATE_FIRST)
1383 /* Cleanup threads no longer on a timeout but still on the
1384 * list. */
1385 remove_from_list_tmo(curr);
1386 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1388 else if (TIME_BEFORE(tick, curr->tmo_tick))
1390 /* Timeout still pending - this will be the usual case */
1391 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1393 /* Earliest timeout found so far - move the next check up
1394 to its time */
1395 next_tmo_check = curr->tmo_tick;
1397 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1399 else
1401 /* Sleep timeout has been reached so bring the thread back to
1402 * life again. */
1403 if (state == STATE_BLOCKED_W_TMO)
1405 remove_from_list_l_locked(curr->bqp, curr);
1408 remove_from_list_tmo(curr);
1409 add_to_list_l(&cores[core].running, curr);
1410 UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING);
1413 /* Break the loop once we have walked through the list of all
1414 * sleeping processes or have removed them all. */
1416 while (next != NULL);
1418 set_irq_level(oldlevel);
1421 cores[core].next_tmo_check = next_tmo_check;
1424 /*---------------------------------------------------------------------------
1425 * Performs operations that must be done before blocking a thread but after
1426 * the state is saved - follows reverse of locking order. blk_ops.flags is
1427 * assumed to be nonzero.
1428 *---------------------------------------------------------------------------
1430 static inline void run_blocking_ops(
1431 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
1433 #if NUM_CORES > 1
1434 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
1435 const unsigned flags = ops->flags;
1437 if (flags == 0)
1438 return;
1440 if (flags & TBOP_SWITCH_CORE)
1442 core_switch_blk_op(core, thread);
1445 #if CONFIG_CORELOCK == SW_CORELOCK
1446 if (flags & TBOP_UNLOCK_LIST)
1448 UNLOCK_LIST(ops->list_p, NULL);
1451 if (flags & TBOP_UNLOCK_CORELOCK)
1453 corelock_unlock(ops->cl_p);
1456 if (flags & TBOP_UNLOCK_THREAD)
1458 UNLOCK_THREAD(ops->thread, 0);
1460 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1461 /* Write updated variable value into memory location */
1462 switch (flags & TBOP_VAR_TYPE_MASK)
1464 case TBOP_UNLOCK_LIST:
1465 UNLOCK_LIST(ops->list_p, ops->list_v);
1466 break;
1467 case TBOP_SET_VARi:
1468 *ops->var_ip = ops->var_iv;
1469 break;
1470 case TBOP_SET_VARu8:
1471 *ops->var_u8p = ops->var_u8v;
1472 break;
1474 #endif /* CONFIG_CORELOCK == */
1476 /* Unlock thread's slot */
1477 if (flags & TBOP_UNLOCK_CURRENT)
1479 UNLOCK_THREAD(thread, ops->state);
1482 /* Reset the IRQ level */
1483 if (flags & TBOP_IRQ_LEVEL)
1485 set_irq_level(ops->irq_level);
1488 ops->flags = 0;
1489 #else
1490 int level = cores[CURRENT_CORE].irq_level;
1491 if (level == STAY_IRQ_LEVEL)
1492 return;
1494 cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL;
1495 set_irq_level(level);
1496 #endif /* NUM_CORES */
1500 /*---------------------------------------------------------------------------
1501 * Runs any operations that may cause threads to be ready to run and then
1502 * sleeps the processor core until the next interrupt if none are.
1503 *---------------------------------------------------------------------------
1505 static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1507 for (;;)
1509 /* We want to do these ASAP as it may change the decision to sleep
1510 * the core or a core has woken because an interrupt occurred
1511 * and posted a message to a queue. */
1512 if (cores[IF_COP_CORE(core)].waking.queue != NULL)
1514 core_perform_wakeup(IF_COP(core));
1517 /* If there are threads on a timeout and the earliest wakeup is due,
1518 * check the list and wake any threads that need to start running
1519 * again. */
1520 if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
1522 check_tmo_threads();
1525 /* If there is a ready to run task, return its ID and keep core
1526 * awake. */
1527 if (cores[IF_COP_CORE(core)].running != NULL)
1529 return cores[IF_COP_CORE(core)].running;
1532 /* Enter sleep mode to reduce power usage - woken up on interrupt or
1533 * wakeup request from another core. May abort if the waking list
1534 * became populated (again). See beginning of this file for the
1535 * algorithm to atomically determine this. */
1536 core_sleep(IF_COP(core, ) &cores[IF_COP_CORE(core)].waking.queue);
1540 #ifdef RB_PROFILE
1541 void profile_thread(void)
1543 profstart(cores[CURRENT_CORE].running - threads);
1545 #endif
1547 /*---------------------------------------------------------------------------
1548 * Prepares a thread to block on an object's list and/or for a specified
1549 * duration - expects object and slot to be appropriately locked if needed.
1550 *---------------------------------------------------------------------------
1552 static inline void _block_thread_on_l(struct thread_queue *list,
1553 struct thread_entry *thread,
1554 unsigned state
1555 IF_SWCL(, const bool nolock))
1557 /* If inlined, unreachable branches will be pruned with no size penalty
1558 because constant params are used for state and nolock. */
1559 const unsigned int core = IF_COP_CORE(thread->core);
1561 /* Remove the thread from the list of running threads. */
1562 remove_from_list_l(&cores[core].running, thread);
1564 /* Add a timeout to the block if not infinite */
1565 switch (state)
1567 case STATE_BLOCKED:
1568 /* Put the thread into a new list of inactive threads. */
1569 #if CONFIG_CORELOCK == SW_CORELOCK
1570 if (nolock)
1572 thread->bqp = NULL; /* Indicate nolock list */
1573 thread->bqnlp = (struct thread_entry **)list;
1574 add_to_list_l((struct thread_entry **)list, thread);
1576 else
1577 #endif
1579 thread->bqp = list;
1580 add_to_list_l_locked(list, thread);
1582 break;
1583 case STATE_BLOCKED_W_TMO:
1584 /* Put the thread into a new list of inactive threads. */
1585 #if CONFIG_CORELOCK == SW_CORELOCK
1586 if (nolock)
1588 thread->bqp = NULL; /* Indicate nolock list */
1589 thread->bqnlp = (struct thread_entry **)list;
1590 add_to_list_l((struct thread_entry **)list, thread);
1592 else
1593 #endif
1595 thread->bqp = list;
1596 add_to_list_l_locked(list, thread);
1598 /* Fall-through */
1599 case STATE_SLEEPING:
1600 /* If this thread times out sooner than any other thread, update
1601 next_tmo_check to its timeout */
1602 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1604 cores[core].next_tmo_check = thread->tmo_tick;
1607 if (thread->tmo.prev == NULL)
1609 add_to_list_tmo(thread);
1611 /* else thread was never removed from list - just keep it there */
1612 break;
1615 #ifdef HAVE_PRIORITY_SCHEDULING
1616 /* Reset priorities */
1617 if (thread->priority == cores[core].highest_priority)
1618 cores[core].highest_priority = LOWEST_PRIORITY;
1619 #endif
1621 #if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
1622 /* Safe to set state now */
1623 thread->state = state;
1624 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1625 cores[core].blk_ops.state = state;
1626 #endif
1628 #if NUM_CORES > 1
1629 /* Delay slot unlock until task switch */
1630 cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
1631 #endif
1634 static inline void block_thread_on_l(
1635 struct thread_queue *list, struct thread_entry *thread, unsigned state)
1637 _block_thread_on_l(list, thread, state IF_SWCL(, false));
1640 static inline void block_thread_on_l_no_listlock(
1641 struct thread_entry **list, struct thread_entry *thread, unsigned state)
1643 _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
1646 /*---------------------------------------------------------------------------
1647 * Switch thread in round robin fashion for any given priority. Any thread
1648 * that removed itself from the running list first must specify itself in
1649 * the paramter.
1651 * INTERNAL: Intended for use by kernel and not for programs.
1652 *---------------------------------------------------------------------------
1654 void switch_thread(struct thread_entry *old)
1656 const unsigned int core = CURRENT_CORE;
1657 struct thread_entry *thread = cores[core].running;
1659 if (old == NULL)
1661 /* Move to next thread */
1662 old = thread;
1663 cores[core].running = old->l.next;
1665 /* else running list is already at next thread */
1667 #ifdef RB_PROFILE
1668 profile_thread_stopped(old - threads);
1669 #endif
1671 /* Begin task switching by saving our current context so that we can
1672 * restore the state of the current thread later to the point prior
1673 * to this call. */
1674 store_context(&old->context);
1676 /* Check if the current thread stack is overflown */
1677 if(((unsigned int *)old->stack)[0] != DEADBEEF)
1678 thread_stkov(old);
1680 /* Run any blocking operations requested before switching/sleeping */
1681 run_blocking_ops(IF_COP(core, old));
1683 /* Go through the list of sleeping task to check if we need to wake up
1684 * any of them due to timeout. Also puts core into sleep state until
1685 * there is at least one running process again. */
1686 thread = sleep_core(IF_COP(core));
1688 #ifdef HAVE_PRIORITY_SCHEDULING
1689 /* Select the new task based on priorities and the last time a process
1690 * got CPU time. */
1691 for (;;)
1693 int priority = MIN(thread->priority, thread->priority_x);
1695 if (priority < cores[core].highest_priority)
1696 cores[core].highest_priority = priority;
1698 if (priority == cores[core].highest_priority ||
1699 (current_tick - thread->last_run > priority * 8))
1701 cores[core].running = thread;
1702 break;
1705 thread = thread->l.next;
1708 /* Reset the value of thread's last running time to the current time. */
1709 thread->last_run = current_tick;
1710 #endif /* HAVE_PRIORITY_SCHEDULING */
1712 /* And finally give control to the next thread. */
1713 load_context(&thread->context);
1715 #ifdef RB_PROFILE
1716 profile_thread_started(thread - threads);
1717 #endif
1720 /*---------------------------------------------------------------------------
1721 * Change the boost state of a thread boosting or unboosting the CPU
1722 * as required. Require thread slot to be locked first.
1723 *---------------------------------------------------------------------------
1725 static inline void boost_thread(struct thread_entry *thread, bool boost)
1727 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1728 if ((thread->boosted != 0) != boost)
1730 thread->boosted = boost;
1731 cpu_boost(boost);
1733 #endif
1734 (void)thread; (void)boost;
1737 /*---------------------------------------------------------------------------
1738 * Sleeps a thread for a specified number of ticks and unboost the thread if
1739 * if it is boosted. If ticks is zero, it does not delay but instead switches
1740 * tasks.
1742 * INTERNAL: Intended for use by kernel and not for programs.
1743 *---------------------------------------------------------------------------
1745 void sleep_thread(int ticks)
1747 /* Get the entry for the current running thread. */
1748 struct thread_entry *current = cores[CURRENT_CORE].running;
1750 #if NUM_CORES > 1
1751 /* Lock thread slot */
1752 GET_THREAD_STATE(current);
1753 #endif
1755 /* Set our timeout, change lists, and finally switch threads.
1756 * Unlock during switch on mulicore. */
1757 current->tmo_tick = current_tick + ticks + 1;
1758 block_thread_on_l(NULL, current, STATE_SLEEPING);
1759 switch_thread(current);
1761 /* Our status should be STATE_RUNNING */
1762 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1763 "S:R->!*R", current);
1766 /*---------------------------------------------------------------------------
1767 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1768 * Caller with interrupt-accessible lists should disable interrupts first
1769 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1771 * INTERNAL: Intended for use by kernel objects and not for programs.
1772 *---------------------------------------------------------------------------
1774 IF_SWCL(static inline) void _block_thread(struct thread_queue *list
1775 IF_SWCL(, const bool nolock))
1777 /* Get the entry for the current running thread. */
1778 struct thread_entry *current = cores[CURRENT_CORE].running;
1780 /* Set the state to blocked and ask the scheduler to switch tasks,
1781 * this takes us off of the run queue until we are explicitly woken */
1783 #if NUM_CORES > 1
1784 /* Lock thread slot */
1785 GET_THREAD_STATE(current);
1786 #endif
1788 #if CONFIG_CORELOCK == SW_CORELOCK
1789 /* One branch optimized away during inlining */
1790 if (nolock)
1792 block_thread_on_l_no_listlock((struct thread_entry **)list,
1793 current, STATE_BLOCKED);
1795 else
1796 #endif
1798 block_thread_on_l(list, current, STATE_BLOCKED);
1801 switch_thread(current);
1803 /* Our status should be STATE_RUNNING */
1804 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1805 "B:R->!*R", current);
1808 #if CONFIG_CORELOCK == SW_CORELOCK
1809 /* Inline lock/nolock version of _block_thread into these functions */
1810 void block_thread(struct thread_queue *tq)
1812 _block_thread(tq, false);
1815 void block_thread_no_listlock(struct thread_entry **list)
1817 _block_thread((struct thread_queue *)list, true);
1819 #endif /* CONFIG_CORELOCK */
1821 /*---------------------------------------------------------------------------
1822 * Block a thread on a blocking queue for a specified time interval or until
1823 * explicitly woken - whichever happens first.
1824 * Caller with interrupt-accessible lists should disable interrupts first
1825 * and request that interrupt level be restored after switching out the
1826 * current thread.
1828 * INTERNAL: Intended for use by kernel objects and not for programs.
1829 *---------------------------------------------------------------------------
1831 void block_thread_w_tmo(struct thread_queue *list, int timeout)
1833 /* Get the entry for the current running thread. */
1834 struct thread_entry *current = cores[CURRENT_CORE].running;
1836 #if NUM_CORES > 1
1837 /* Lock thread slot */
1838 GET_THREAD_STATE(current);
1839 #endif
1841 /* Set the state to blocked with the specified timeout */
1842 current->tmo_tick = current_tick + timeout;
1843 /* Set the list for explicit wakeup */
1844 block_thread_on_l(list, current, STATE_BLOCKED_W_TMO);
1846 /* Now force a task switch and block until we have been woken up
1847 * by another thread or timeout is reached - whichever happens first */
1848 switch_thread(current);
1850 /* Our status should be STATE_RUNNING */
1851 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1852 "T:R->!*R", current);
1855 /*---------------------------------------------------------------------------
1856 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
1857 * that called sleep().
1858 * Caller with interrupt-accessible lists should disable interrupts first.
1859 * This code should be considered a critical section by the caller.
1861 * INTERNAL: Intended for use by kernel objects and not for programs.
1862 *---------------------------------------------------------------------------
1864 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
1865 struct thread_queue *list IF_SWCL(, const bool nolock))
1867 struct thread_entry *t;
1868 struct thread_entry *thread;
1869 unsigned state;
1871 /* Wake up the last thread first. */
1872 #if CONFIG_CORELOCK == SW_CORELOCK
1873 /* One branch optimized away during inlining */
1874 if (nolock)
1876 t = list->queue;
1878 else
1879 #endif
1881 t = LOCK_LIST(list);
1884 /* Check if there is a blocked thread at all. */
1885 if (t == NULL)
1887 #if CONFIG_CORELOCK == SW_CORELOCK
1888 if (!nolock)
1889 #endif
1891 UNLOCK_LIST(list, NULL);
1893 return NULL;
1896 thread = t;
1898 #if NUM_CORES > 1
1899 #if CONFIG_CORELOCK == SW_CORELOCK
1900 if (nolock)
1902 /* Lock thread only, not list */
1903 state = GET_THREAD_STATE(thread);
1905 else
1906 #endif
1908 /* This locks in reverse order from other routines so a retry in the
1909 correct order may be needed */
1910 state = TRY_GET_THREAD_STATE(thread);
1911 if (state == STATE_BUSY)
1913 /* Unlock list and retry slot, then list */
1914 UNLOCK_LIST(list, t);
1915 state = GET_THREAD_STATE(thread);
1916 t = LOCK_LIST(list);
1917 /* Be sure thread still exists here - it couldn't have re-added
1918 itself if it was woken elsewhere because this function is
1919 serialized within the object that owns the list. */
1920 if (thread != t)
1922 /* Thread disappeared :( */
1923 UNLOCK_LIST(list, t);
1924 UNLOCK_THREAD(thread, state);
1925 return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
1929 #else /* NUM_CORES == 1 */
1930 state = GET_THREAD_STATE(thread);
1931 #endif /* NUM_CORES */
1933 /* Determine thread's current state. */
1934 switch (state)
1936 case STATE_BLOCKED:
1937 case STATE_BLOCKED_W_TMO:
1938 /* Remove thread from object's blocked list - select t or list depending
1939 on locking type at compile time */
1940 REMOVE_FROM_LIST_L_SELECT(t, list, thread);
1941 #if CONFIG_CORELOCK == SW_CORELOCK
1942 /* Statment optimized away during inlining if nolock != false */
1943 if (!nolock)
1944 #endif
1946 UNLOCK_LIST(list, t); /* Unlock list - removal complete */
1949 #ifdef HAVE_PRIORITY_SCHEDULING
1950 /* Give the task a kick to avoid a stall after wakeup.
1951 Not really proper treatment - TODO later. */
1952 thread->last_run = current_tick - 8*LOWEST_PRIORITY;
1953 #endif
1954 core_schedule_wakeup(thread);
1955 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
1956 return thread;
1957 default:
1958 /* Nothing to do. State is not blocked. */
1959 #if THREAD_EXTRA_CHECKS
1960 THREAD_PANICF("wakeup_thread->block invalid", thread);
1961 case STATE_RUNNING:
1962 case STATE_KILLED:
1963 #endif
1964 #if CONFIG_CORELOCK == SW_CORELOCK
1965 /* Statement optimized away during inlining if nolock != false */
1966 if (!nolock)
1967 #endif
1969 UNLOCK_LIST(list, t); /* Unlock the object's list */
1971 UNLOCK_THREAD(thread, state); /* Unlock thread slot */
1972 return NULL;
1976 #if CONFIG_CORELOCK == SW_CORELOCK
1977 /* Inline lock/nolock version of _wakeup_thread into these functions */
1978 struct thread_entry * wakeup_thread(struct thread_queue *tq)
1980 return _wakeup_thread(tq, false);
1983 struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
1985 return _wakeup_thread((struct thread_queue *)list, true);
1987 #endif /* CONFIG_CORELOCK */
1989 /*---------------------------------------------------------------------------
1990 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1991 * will be locked on multicore.
1992 *---------------------------------------------------------------------------
1994 static int find_empty_thread_slot(void)
1996 #if NUM_CORES > 1
1997 /* Any slot could be on an IRQ-accessible list */
1998 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1999 #endif
2000 /* Thread slots are not locked on single core */
2002 int n;
2004 for (n = 0; n < MAXTHREADS; n++)
2006 /* Obtain current slot state - lock it on multicore */
2007 unsigned state = GET_THREAD_STATE(&threads[n]);
2009 if (state == STATE_KILLED
2010 #if NUM_CORES > 1
2011 && threads[n].name != THREAD_DESTRUCT
2012 #endif
2015 /* Slot is empty - leave it locked and caller will unlock */
2016 break;
2019 /* Finished examining slot - no longer busy - unlock on multicore */
2020 UNLOCK_THREAD(&threads[n], state);
2023 #if NUM_CORES > 1
2024 set_irq_level(oldlevel); /* Reenable interrups - this slot is
2025 not accesible to them yet */
2026 #endif
2028 return n;
2032 /*---------------------------------------------------------------------------
2033 * Place the current core in idle mode - woken up on interrupt or wake
2034 * request from another core.
2035 *---------------------------------------------------------------------------
2037 void core_idle(void)
2039 const unsigned int core = CURRENT_CORE;
2040 core_sleep(IF_COP(core,) &cores[core].waking.queue);
2043 /*---------------------------------------------------------------------------
2044 * Create a thread
2045 * If using a dual core architecture, specify which core to start the thread
2046 * on, and whether to fall back to the other core if it can't be created
2047 * Return ID if context area could be allocated, else NULL.
2048 *---------------------------------------------------------------------------
2050 struct thread_entry*
2051 create_thread(void (*function)(void), void* stack, int stack_size,
2052 unsigned flags, const char *name
2053 IF_PRIO(, int priority)
2054 IF_COP(, unsigned int core))
2056 unsigned int i;
2057 unsigned int stacklen;
2058 unsigned int *stackptr;
2059 int slot;
2060 struct thread_entry *thread;
2061 unsigned state;
2063 slot = find_empty_thread_slot();
2064 if (slot >= MAXTHREADS)
2066 return NULL;
2069 /* Munge the stack to make it easy to spot stack overflows */
2070 stacklen = stack_size / sizeof(int);
2071 stackptr = stack;
2072 for(i = 0;i < stacklen;i++)
2074 stackptr[i] = DEADBEEF;
2077 /* Store interesting information */
2078 thread = &threads[slot];
2079 thread->name = name;
2080 thread->stack = stack;
2081 thread->stack_size = stack_size;
2082 thread->bqp = NULL;
2083 #if CONFIG_CORELOCK == SW_CORELOCK
2084 thread->bqnlp = NULL;
2085 #endif
2086 thread->queue = NULL;
2087 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2088 thread->boosted = 0;
2089 #endif
2090 #ifdef HAVE_PRIORITY_SCHEDULING
2091 thread->priority_x = LOWEST_PRIORITY;
2092 thread->priority = priority;
2093 thread->last_run = current_tick - priority * 8;
2094 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2095 #endif
2097 #if NUM_CORES > 1
2098 thread->core = core;
2100 /* Writeback stack munging or anything else before starting */
2101 if (core != CURRENT_CORE)
2103 flush_icache();
2105 #endif
2107 /* Thread is not on any timeout list but be a bit paranoid */
2108 thread->tmo.prev = NULL;
2110 state = (flags & CREATE_THREAD_FROZEN) ?
2111 STATE_FROZEN : STATE_RUNNING;
2113 /* Align stack to an even 32 bit boundary */
2114 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
2116 /* Load the thread's context structure with needed startup information */
2117 THREAD_STARTUP_INIT(core, thread, function);
2119 if (state == STATE_RUNNING)
2121 #if NUM_CORES > 1
2122 if (core != CURRENT_CORE)
2124 /* Next task switch on other core moves thread to running list */
2125 core_schedule_wakeup(thread);
2127 else
2128 #endif
2130 /* Place on running list immediately */
2131 add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
2135 /* remove lock and set state */
2136 UNLOCK_THREAD_SET_STATE(thread, state);
2138 return thread;
2141 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2142 void trigger_cpu_boost(void)
2144 /* No IRQ disable nescessary since the current thread cannot be blocked
2145 on an IRQ-accessible list */
2146 struct thread_entry *current = cores[CURRENT_CORE].running;
2147 unsigned state;
2149 state = GET_THREAD_STATE(current);
2150 boost_thread(current, true);
2151 UNLOCK_THREAD(current, state);
2153 (void)state;
2156 void cancel_cpu_boost(void)
2158 struct thread_entry *current = cores[CURRENT_CORE].running;
2159 unsigned state;
2161 state = GET_THREAD_STATE(current);
2162 boost_thread(current, false);
2163 UNLOCK_THREAD(current, state);
2165 (void)state;
2167 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
2169 /*---------------------------------------------------------------------------
2170 * Remove a thread from the scheduler.
2171 * Parameter is the ID as returned from create_thread().
2173 * Use with care on threads that are not under careful control as this may
2174 * leave various objects in an undefined state. When trying to kill a thread
2175 * on another processor, be sure you know what it's doing and won't be
2176 * switching around itself.
2177 *---------------------------------------------------------------------------
2179 void remove_thread(struct thread_entry *thread)
2181 #if NUM_CORES > 1
2182 /* core is not constant here because of core switching */
2183 unsigned int core = CURRENT_CORE;
2184 unsigned int old_core = NUM_CORES;
2185 #else
2186 const unsigned int core = CURRENT_CORE;
2187 #endif
2188 unsigned state;
2189 int oldlevel;
2191 if (thread == NULL)
2192 thread = cores[core].running;
2194 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2195 state = GET_THREAD_STATE(thread);
2197 if (state == STATE_KILLED)
2199 goto thread_killed;
2202 #if NUM_CORES > 1
2203 if (thread->core != core)
2205 /* Switch cores and safely extract the thread there */
2206 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
2207 condition if the thread runs away to another processor. */
2208 unsigned int new_core = thread->core;
2209 const char *old_name = thread->name;
2211 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2212 UNLOCK_THREAD(thread, state);
2213 set_irq_level(oldlevel);
2215 old_core = switch_core(new_core);
2217 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2218 state = GET_THREAD_STATE(thread);
2220 core = new_core;
2222 if (state == STATE_KILLED)
2224 /* Thread suicided before we could kill it */
2225 goto thread_killed;
2228 /* Reopen slot - it's locked again anyway */
2229 thread->name = old_name;
2231 if (thread->core != core)
2233 /* We won't play thread tag - just forget it */
2234 UNLOCK_THREAD(thread, state);
2235 set_irq_level(oldlevel);
2236 goto thread_kill_abort;
2239 /* Perform the extraction and switch ourselves back to the original
2240 processor */
2242 #endif /* NUM_CORES > 1 */
2244 #ifdef HAVE_PRIORITY_SCHEDULING
2245 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2246 #endif
2247 if (thread->tmo.prev != NULL)
2249 /* Clean thread off the timeout list if a timeout check hasn't
2250 * run yet */
2251 remove_from_list_tmo(thread);
2254 boost_thread(thread, false);
2256 if (thread == cores[core].running)
2258 /* Suicide - thread has unconditional rights to do this */
2259 /* Maintain locks until switch-out */
2260 #if NUM_CORES > 1
2261 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2262 cores[core].blk_ops.irq_level = oldlevel;
2263 #else
2264 cores[core].irq_level = oldlevel;
2265 #endif
2266 block_thread_on_l(NULL, thread, STATE_KILLED);
2268 #if NUM_CORES > 1
2269 /* Switch to the idle stack if not on the main core (where "main"
2270 * runs) */
2271 if (core != CPU)
2273 switch_to_idle_stack(core);
2276 flush_icache();
2277 #endif
2278 /* Signal this thread */
2279 thread_queue_wake_no_listlock(&thread->queue);
2280 /* Switch tasks and never return */
2281 switch_thread(thread);
2282 /* This should never and must never be reached - if it is, the
2283 * state is corrupted */
2284 THREAD_PANICF("remove_thread->K:*R", thread);
2287 #if NUM_CORES > 1
2288 if (thread->name == THREAD_DESTRUCT)
2290 /* Another core is doing this operation already */
2291 UNLOCK_THREAD(thread, state);
2292 set_irq_level(oldlevel);
2293 return;
2295 #endif
2296 if (cores[core].waking.queue != NULL)
2298 /* Get any threads off the waking list and onto the running
2299 * list first - waking and running cannot be distinguished by
2300 * state */
2301 core_perform_wakeup(IF_COP(core));
2304 switch (state)
2306 case STATE_RUNNING:
2307 /* Remove thread from ready to run tasks */
2308 remove_from_list_l(&cores[core].running, thread);
2309 break;
2310 case STATE_BLOCKED:
2311 case STATE_BLOCKED_W_TMO:
2312 /* Remove thread from the queue it's blocked on - including its
2313 * own if waiting there */
2314 #if CONFIG_CORELOCK == SW_CORELOCK
2315 /* One or the other will be valid */
2316 if (thread->bqp == NULL)
2318 remove_from_list_l(thread->bqnlp, thread);
2320 else
2321 #endif /* CONFIG_CORELOCK */
2323 remove_from_list_l_locked(thread->bqp, thread);
2325 break;
2326 /* Otherwise thread is killed or is frozen and hasn't run yet */
2329 /* If thread was waiting on itself, it will have been removed above.
2330 * The wrong order would result in waking the thread first and deadlocking
2331 * since the slot is already locked. */
2332 thread_queue_wake_no_listlock(&thread->queue);
2334 thread_killed: /* Thread was already killed */
2335 /* Removal complete - safe to unlock state and reenable interrupts */
2336 UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED);
2337 set_irq_level(oldlevel);
2339 #if NUM_CORES > 1
2340 thread_kill_abort: /* Something stopped us from killing the thread */
2341 if (old_core < NUM_CORES)
2343 /* Did a removal on another processor's thread - switch back to
2344 native core */
2345 switch_core(old_core);
2347 #endif
2350 /*---------------------------------------------------------------------------
2351 * Block the current thread until another thread terminates. A thread may
2352 * wait on itself to terminate which prevents it from running again and it
2353 * will need to be killed externally.
2354 * Parameter is the ID as returned from create_thread().
2355 *---------------------------------------------------------------------------
2357 void thread_wait(struct thread_entry *thread)
2359 const unsigned int core = CURRENT_CORE;
2360 struct thread_entry *current = cores[core].running;
2361 unsigned thread_state;
2362 #if NUM_CORES > 1
2363 int oldlevel;
2364 unsigned current_state;
2365 #endif
2367 if (thread == NULL)
2368 thread = current;
2370 #if NUM_CORES > 1
2371 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2372 #endif
2374 thread_state = GET_THREAD_STATE(thread);
2376 #if NUM_CORES > 1
2377 /* We can't lock the same slot twice. The waitee will also lock itself
2378 first then the thread slots that will be locked and woken in turn.
2379 The same order must be observed here as well. */
2380 if (thread == current)
2382 current_state = thread_state;
2384 else
2386 current_state = GET_THREAD_STATE(current);
2388 #endif
2390 if (thread_state != STATE_KILLED)
2392 #if NUM_CORES > 1
2393 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2394 cores[core].blk_ops.irq_level = oldlevel;
2395 #endif
2396 /* Unlock the waitee state at task switch - not done for self-wait
2397 because the would double-unlock the state and potentially
2398 corrupt another's busy assert on the slot */
2399 if (thread != current)
2401 #if CONFIG_CORELOCK == SW_CORELOCK
2402 cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
2403 cores[core].blk_ops.thread = thread;
2404 #elif CONFIG_CORELOCK == CORELOCK_SWAP
2405 cores[core].blk_ops.flags |= TBOP_SET_VARu8;
2406 cores[core].blk_ops.var_u8p = &thread->state;
2407 cores[core].blk_ops.var_u8v = thread_state;
2408 #endif
2410 block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED);
2411 switch_thread(current);
2412 return;
2415 /* Unlock both slots - obviously the current thread can't have
2416 STATE_KILLED so the above if clause will always catch a thread
2417 waiting on itself */
2418 #if NUM_CORES > 1
2419 UNLOCK_THREAD(current, current_state);
2420 UNLOCK_THREAD(thread, thread_state);
2421 set_irq_level(oldlevel);
2422 #endif
2425 #ifdef HAVE_PRIORITY_SCHEDULING
2426 /*---------------------------------------------------------------------------
2427 * Sets the thread's relative priority for the core it runs on.
2428 *---------------------------------------------------------------------------
2430 int thread_set_priority(struct thread_entry *thread, int priority)
2432 unsigned old_priority = (unsigned)-1;
2434 if (thread == NULL)
2435 thread = cores[CURRENT_CORE].running;
2437 #if NUM_CORES > 1
2438 /* Thread could be on any list and therefore on an interrupt accessible
2439 one - disable interrupts */
2440 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2441 #endif
2442 unsigned state = GET_THREAD_STATE(thread);
2444 /* Make sure it's not killed */
2445 if (state != STATE_KILLED)
2447 old_priority = thread->priority;
2448 thread->priority = priority;
2449 cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY;
2452 #if NUM_CORES > 1
2453 UNLOCK_THREAD(thread, state);
2454 set_irq_level(oldlevel);
2455 #endif
2456 return old_priority;
2459 /*---------------------------------------------------------------------------
2460 * Returns the current priority for a thread.
2461 *---------------------------------------------------------------------------
2463 int thread_get_priority(struct thread_entry *thread)
2465 /* Simple, quick probe. */
2466 if (thread == NULL)
2467 thread = cores[CURRENT_CORE].running;
2469 return (unsigned)thread->priority;
2472 /*---------------------------------------------------------------------------
2473 * Yield that guarantees thread execution once per round regardless of
2474 * thread's scheduler priority - basically a transient realtime boost
2475 * without altering the scheduler's thread precedence.
2477 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2478 *---------------------------------------------------------------------------
2480 void priority_yield(void)
2482 const unsigned int core = CURRENT_CORE;
2483 struct thread_entry *thread = cores[core].running;
2484 thread->priority_x = HIGHEST_PRIORITY;
2485 switch_thread(NULL);
2486 thread->priority_x = LOWEST_PRIORITY;
2487 cores[core].highest_priority = LOWEST_PRIORITY;
2489 #endif /* HAVE_PRIORITY_SCHEDULING */
2491 /* Resumes a frozen thread - similar logic to wakeup_thread except that
2492 the thread is on no scheduler list at all. It exists simply by virtue of
2493 the slot having a state of STATE_FROZEN. */
2494 void thread_thaw(struct thread_entry *thread)
2496 #if NUM_CORES > 1
2497 /* Thread could be on any list and therefore on an interrupt accessible
2498 one - disable interrupts */
2499 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2500 #endif
2501 unsigned state = GET_THREAD_STATE(thread);
2503 if (state == STATE_FROZEN)
2505 const unsigned int core = CURRENT_CORE;
2506 #if NUM_CORES > 1
2507 if (thread->core != core)
2509 core_schedule_wakeup(thread);
2511 else
2512 #endif
2514 add_to_list_l(&cores[core].running, thread);
2517 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
2518 #if NUM_CORES > 1
2519 set_irq_level(oldlevel);
2520 #endif
2521 return;
2524 #if NUM_CORES > 1
2525 UNLOCK_THREAD(thread, state);
2526 set_irq_level(oldlevel);
2527 #endif
2530 /*---------------------------------------------------------------------------
2531 * Return the ID of the currently executing thread.
2532 *---------------------------------------------------------------------------
2534 struct thread_entry * thread_get_current(void)
2536 return cores[CURRENT_CORE].running;
2539 #if NUM_CORES > 1
2540 /*---------------------------------------------------------------------------
2541 * Switch the processor that the currently executing thread runs on.
2542 *---------------------------------------------------------------------------
2544 unsigned int switch_core(unsigned int new_core)
2546 const unsigned int core = CURRENT_CORE;
2547 struct thread_entry *current = cores[core].running;
2548 struct thread_entry *w;
2549 int oldlevel;
2551 /* Interrupts can access the lists that will be used - disable them */
2552 unsigned state = GET_THREAD_STATE(current);
2554 if (core == new_core)
2556 /* No change - just unlock everything and return same core */
2557 UNLOCK_THREAD(current, state);
2558 return core;
2561 /* Get us off the running list for the current core */
2562 remove_from_list_l(&cores[core].running, current);
2564 /* Stash return value (old core) in a safe place */
2565 current->retval = core;
2567 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2568 * the other core will likely attempt a removal from the wrong list! */
2569 if (current->tmo.prev != NULL)
2571 remove_from_list_tmo(current);
2574 /* Change the core number for this thread slot */
2575 current->core = new_core;
2577 /* Do not use core_schedule_wakeup here since this will result in
2578 * the thread starting to run on the other core before being finished on
2579 * this one. Delay the wakeup list unlock to keep the other core stuck
2580 * until this thread is ready. */
2581 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2582 w = LOCK_LIST(&cores[new_core].waking);
2583 ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current);
2585 /* Make a callback into device-specific code, unlock the wakeup list so
2586 * that execution may resume on the new core, unlock our slot and finally
2587 * restore the interrupt level */
2588 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
2589 TBOP_UNLOCK_LIST | TBOP_IRQ_LEVEL;
2590 cores[core].blk_ops.irq_level = oldlevel;
2591 cores[core].blk_ops.list_p = &cores[new_core].waking;
2592 #if CONFIG_CORELOCK == CORELOCK_SWAP
2593 cores[core].blk_ops.state = STATE_RUNNING;
2594 cores[core].blk_ops.list_v = w;
2595 #endif
2597 #ifdef HAVE_PRIORITY_SCHEDULING
2598 current->priority_x = HIGHEST_PRIORITY;
2599 cores[core].highest_priority = LOWEST_PRIORITY;
2600 #endif
2601 /* Do the stack switching, cache_maintenence and switch_thread call -
2602 requires native code */
2603 switch_thread_core(core, current);
2605 #ifdef HAVE_PRIORITY_SCHEDULING
2606 current->priority_x = LOWEST_PRIORITY;
2607 cores[current->core].highest_priority = LOWEST_PRIORITY;
2608 #endif
2610 /* Finally return the old core to caller */
2611 return current->retval;
2612 (void)state;
2614 #endif /* NUM_CORES > 1 */
2616 /*---------------------------------------------------------------------------
2617 * Initialize threading API. This assumes interrupts are not yet enabled. On
2618 * multicore setups, no core is allowed to proceed until create_thread calls
2619 * are safe to perform.
2620 *---------------------------------------------------------------------------
2622 void init_threads(void)
2624 const unsigned int core = CURRENT_CORE;
2625 struct thread_entry *thread;
2626 int slot;
2628 /* CPU will initialize first and then sleep */
2629 slot = find_empty_thread_slot();
2631 if (slot >= MAXTHREADS)
2633 /* WTF? There really must be a slot available at this stage.
2634 * This can fail if, for example, .bss isn't zero'ed out by the loader
2635 * or threads is in the wrong section. */
2636 THREAD_PANICF("init_threads->no slot", NULL);
2639 /* Initialize initially non-zero members of core */
2640 thread_queue_init(&cores[core].waking);
2641 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2642 #if NUM_CORES == 1
2643 cores[core].irq_level = STAY_IRQ_LEVEL;
2644 #endif
2645 #ifdef HAVE_PRIORITY_SCHEDULING
2646 cores[core].highest_priority = LOWEST_PRIORITY;
2647 #endif
2649 /* Initialize initially non-zero members of slot */
2650 thread = &threads[slot];
2651 thread->name = main_thread_name;
2652 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); /* No sync worries yet */
2653 #if NUM_CORES > 1
2654 thread->core = core;
2655 #endif
2656 #ifdef HAVE_PRIORITY_SCHEDULING
2657 thread->priority = PRIORITY_USER_INTERFACE;
2658 thread->priority_x = LOWEST_PRIORITY;
2659 #endif
2660 #if CONFIG_CORELOCK == SW_CORELOCK
2661 corelock_init(&thread->cl);
2662 #endif
2664 add_to_list_l(&cores[core].running, thread);
2666 if (core == CPU)
2668 thread->stack = stackbegin;
2669 thread->stack_size = (int)stackend - (int)stackbegin;
2670 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2671 /* TODO: HAL interface for this */
2672 /* Wake up coprocessor and let it initialize kernel and threads */
2673 #ifdef CPU_PP502x
2674 MBX_MSG_CLR = 0x3f;
2675 #endif
2676 COP_CTL = PROC_WAKE;
2677 /* Sleep until finished */
2678 CPU_CTL = PROC_SLEEP;
2679 nop; nop; nop; nop;
2681 else
2683 /* Initial stack is the COP idle stack */
2684 thread->stack = cop_idlestackbegin;
2685 thread->stack_size = IDLE_STACK_SIZE;
2686 /* Get COP safely primed inside switch_thread where it will remain
2687 * until a thread actually exists on it */
2688 CPU_CTL = PROC_WAKE;
2689 remove_thread(NULL);
2690 #endif /* NUM_CORES */
2694 /*---------------------------------------------------------------------------
2695 * Returns the maximum percentage of stack a thread ever used while running.
2696 * NOTE: Some large buffer allocations that don't use enough the buffer to
2697 * overwrite stackptr[0] will not be seen.
2698 *---------------------------------------------------------------------------
2700 int thread_stack_usage(const struct thread_entry *thread)
2702 unsigned int *stackptr = thread->stack;
2703 int stack_words = thread->stack_size / sizeof (int);
2704 int i, usage = 0;
2706 for (i = 0; i < stack_words; i++)
2708 if (stackptr[i] != DEADBEEF)
2710 usage = ((stack_words - i) * 100) / stack_words;
2711 break;
2715 return usage;
2718 #if NUM_CORES > 1
2719 /*---------------------------------------------------------------------------
2720 * Returns the maximum percentage of the core's idle stack ever used during
2721 * runtime.
2722 *---------------------------------------------------------------------------
2724 int idle_stack_usage(unsigned int core)
2726 unsigned int *stackptr = idle_stacks[core];
2727 int i, usage = 0;
2729 for (i = 0; i < IDLE_STACK_WORDS; i++)
2731 if (stackptr[i] != DEADBEEF)
2733 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
2734 break;
2738 return usage;
2740 #endif
2742 /*---------------------------------------------------------------------------
2743 * Fills in the buffer with the specified thread's name. If the name is NULL,
2744 * empty, or the thread is in destruct state a formatted ID is written
2745 * instead.
2746 *---------------------------------------------------------------------------
2748 void thread_get_name(char *buffer, int size,
2749 struct thread_entry *thread)
2751 if (size <= 0)
2752 return;
2754 *buffer = '\0';
2756 if (thread)
2758 /* Display thread name if one or ID if none */
2759 const char *name = thread->name;
2760 const char *fmt = "%s";
2761 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2763 name = (const char *)thread;
2764 fmt = "%08lX";
2766 snprintf(buffer, size, fmt, name);