Small correction to avoid overflow within calculate_V()-function. Re-enable full...
[kugel-rb.git] / firmware / thread.c
blob0f4273107f75bee628671e79bf4d2ac9ce8e6799
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
31 /****************************************************************************
32 * ATTENTION!! *
33 * See notes below on implementing processor-specific portions! *
34 ***************************************************************************/
36 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
37 #ifdef DEBUG
38 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
39 #else
40 #define THREAD_EXTRA_CHECKS 0
41 #endif
43 /**
44 * General locking order to guarantee progress. Order must be observed but
45 * all stages are not nescessarily obligatory. Going from 1) to 3) is
46 * perfectly legal.
48 * 1) IRQ
49 * This is first because of the likelyhood of having an interrupt occur that
50 * also accesses one of the objects farther down the list. Any non-blocking
51 * synchronization done may already have a lock on something during normal
52 * execution and if an interrupt handler running on the same processor as
53 * the one that has the resource locked were to attempt to access the
54 * resource, the interrupt handler would wait forever waiting for an unlock
55 * that will never happen. There is no danger if the interrupt occurs on
56 * a different processor because the one that has the lock will eventually
57 * unlock and the other processor's handler may proceed at that time. Not
58 * nescessary when the resource in question is definitely not available to
59 * interrupt handlers.
61 * 2) Kernel Object
62 * 1) May be needed beforehand if the kernel object allows dual-use such as
63 * event queues. The kernel object must have a scheme to protect itself from
64 * access by another processor and is responsible for serializing the calls
65 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
66 * other. Objects' queues are also protected here.
68 * 3) Thread Slot
69 * This locks access to the thread's slot such that its state cannot be
70 * altered by another processor when a state change is in progress such as
71 * when it is in the process of going on a blocked list. An attempt to wake
72 * a thread while it is still blocking will likely desync its state with
73 * the other resources used for that state.
75 * 4) Core Lists
76 * These lists are specific to a particular processor core and are accessible
77 * by all processor cores and interrupt handlers. The running (rtr) list is
78 * the prime example where a thread may be added by any means.
81 /*---------------------------------------------------------------------------
82 * Processor specific: core_sleep/core_wake/misc. notes
84 * ARM notes:
85 * FIQ is not dealt with by the scheduler code and is simply restored if it
86 * must by masked for some reason - because threading modifies a register
87 * that FIQ may also modify and there's no way to accomplish it atomically.
88 * s3c2440 is such a case.
90 * Audio interrupts are generally treated at a higher priority than others
91 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
92 * are not in general safe. Special cases may be constructed on a per-
93 * source basis and blocking operations are not available.
95 * core_sleep procedure to implement for any CPU to ensure an asychronous
96 * wakup never results in requiring a wait until the next tick (up to
97 * 10000uS!). May require assembly and careful instruction ordering.
99 * 1) On multicore, stay awake if directed to do so by another. If so, goto
100 * step 4.
101 * 2) If processor requires, atomically reenable interrupts and perform step
102 * 3.
103 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
104 * on Coldfire) goto step 5.
105 * 4) Enable interrupts.
106 * 5) Exit procedure.
108 * core_wake and multprocessor notes for sleep/wake coordination:
109 * If possible, to wake up another processor, the forcing of an interrupt on
110 * the woken core by the waker core is the easiest way to ensure a non-
111 * delayed wake and immediate execution of any woken threads. If that isn't
112 * available then some careful non-blocking synchonization is needed (as on
113 * PP targets at the moment).
114 *---------------------------------------------------------------------------
117 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
118 * (someday :). */
119 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
120 struct core_entry cores[NUM_CORES] IBSS_ATTR;
121 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
123 static const char main_thread_name[] = "main";
124 extern uintptr_t stackbegin[];
125 extern uintptr_t stackend[];
127 static inline void core_sleep(IF_COP_VOID(unsigned int core))
128 __attribute__((always_inline));
130 void check_tmo_threads(void)
131 __attribute__((noinline));
133 static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
134 __attribute__((always_inline));
136 static void add_to_list_tmo(struct thread_entry *thread)
137 __attribute__((noinline));
139 static void core_schedule_wakeup(struct thread_entry *thread)
140 __attribute__((noinline));
142 #if NUM_CORES > 1
143 static inline void run_blocking_ops(
144 unsigned int core, struct thread_entry *thread)
145 __attribute__((always_inline));
146 #endif
148 static void thread_stkov(struct thread_entry *thread)
149 __attribute__((noinline));
151 static inline void store_context(void* addr)
152 __attribute__((always_inline));
154 static inline void load_context(const void* addr)
155 __attribute__((always_inline));
157 void switch_thread(void)
158 __attribute__((noinline));
160 /****************************************************************************
161 * Processor-specific section
164 #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
165 /* Support a special workaround object for large-sector disks */
166 #define IF_NO_SKIP_YIELD(...) __VA_ARGS__
167 #else
168 #define IF_NO_SKIP_YIELD(...)
169 #endif
171 #if defined(CPU_ARM)
172 /*---------------------------------------------------------------------------
173 * Start the thread running and terminate it if it returns
174 *---------------------------------------------------------------------------
176 static void __attribute__((naked,used)) start_thread(void)
178 /* r0 = context */
179 asm volatile (
180 "ldr sp, [r0, #32] \n" /* Load initial sp */
181 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
182 "mov r1, #0 \n" /* Mark thread as running */
183 "str r1, [r0, #40] \n"
184 #if NUM_CORES > 1
185 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
186 "mov lr, pc \n" /* This could be the first entry into */
187 "bx r0 \n" /* plugin or codec code for this core. */
188 #endif
189 "mov lr, pc \n" /* Call thread function */
190 "bx r4 \n"
191 ); /* No clobber list - new thread doesn't care */
192 thread_exit();
193 //asm volatile (".ltorg"); /* Dump constant pool */
196 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
197 * slot, and thread function pointer in context.start. See load_context for
198 * what happens when thread is initially going to run. */
199 #define THREAD_STARTUP_INIT(core, thread, function) \
200 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
201 (thread)->context.r[1] = (uint32_t)start_thread, \
202 (thread)->context.start = (uint32_t)function; })
204 /*---------------------------------------------------------------------------
205 * Store non-volatile context.
206 *---------------------------------------------------------------------------
208 static inline void store_context(void* addr)
210 asm volatile(
211 "stmia %0, { r4-r11, sp, lr } \n"
212 : : "r" (addr)
216 /*---------------------------------------------------------------------------
217 * Load non-volatile context.
218 *---------------------------------------------------------------------------
220 static inline void load_context(const void* addr)
222 asm volatile(
223 "ldr r0, [%0, #40] \n" /* Load start pointer */
224 "cmp r0, #0 \n" /* Check for NULL */
225 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
226 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
227 : : "r" (addr) : "r0" /* only! */
231 #if defined (CPU_PP)
233 #if NUM_CORES > 1
234 extern uintptr_t cpu_idlestackbegin[];
235 extern uintptr_t cpu_idlestackend[];
236 extern uintptr_t cop_idlestackbegin[];
237 extern uintptr_t cop_idlestackend[];
238 static uintptr_t * const idle_stacks[NUM_CORES] =
240 [CPU] = cpu_idlestackbegin,
241 [COP] = cop_idlestackbegin
244 #if CONFIG_CPU == PP5002
245 /* Bytes to emulate the PP502x mailbox bits */
246 struct core_semaphores
248 volatile uint8_t intend_wake; /* 00h */
249 volatile uint8_t stay_awake; /* 01h */
250 volatile uint8_t intend_sleep; /* 02h */
251 volatile uint8_t unused; /* 03h */
254 static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
255 #endif /* CONFIG_CPU == PP5002 */
257 #endif /* NUM_CORES */
259 #if CONFIG_CORELOCK == SW_CORELOCK
260 /* Software core locks using Peterson's mutual exclusion algorithm */
262 /*---------------------------------------------------------------------------
263 * Initialize the corelock structure.
264 *---------------------------------------------------------------------------
266 void corelock_init(struct corelock *cl)
268 memset(cl, 0, sizeof (*cl));
271 #if 1 /* Assembly locks to minimize overhead */
272 /*---------------------------------------------------------------------------
273 * Wait for the corelock to become free and acquire it when it does.
274 *---------------------------------------------------------------------------
276 void corelock_lock(struct corelock *cl) __attribute__((naked));
277 void corelock_lock(struct corelock *cl)
279 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
280 asm volatile (
281 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
282 "ldrb r1, [r1] \n"
283 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
284 "eor r2, r1, #0xff \n" /* r2 = othercore */
285 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
286 "1: \n"
287 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
288 "cmp r3, #0 \n" /* yes? lock acquired */
289 "bxeq lr \n"
290 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
291 "cmp r3, r1 \n"
292 "bxeq lr \n" /* yes? lock acquired */
293 "b 1b \n" /* keep trying */
294 : : "i"(&PROCESSOR_ID)
296 (void)cl;
299 /*---------------------------------------------------------------------------
300 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
301 *---------------------------------------------------------------------------
303 int corelock_try_lock(struct corelock *cl) __attribute__((naked));
304 int corelock_try_lock(struct corelock *cl)
306 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
307 asm volatile (
308 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
309 "ldrb r1, [r1] \n"
310 "mov r3, r0 \n"
311 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
312 "eor r2, r1, #0xff \n" /* r2 = othercore */
313 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
314 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
315 "eors r0, r0, r2 \n" /* yes? lock acquired */
316 "bxne lr \n"
317 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
318 "ands r0, r0, r1 \n"
319 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
320 "bx lr \n" /* return result */
321 : : "i"(&PROCESSOR_ID)
324 return 0;
325 (void)cl;
328 /*---------------------------------------------------------------------------
329 * Release ownership of the corelock
330 *---------------------------------------------------------------------------
332 void corelock_unlock(struct corelock *cl) __attribute__((naked));
333 void corelock_unlock(struct corelock *cl)
335 asm volatile (
336 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
337 "ldrb r1, [r1] \n"
338 "mov r2, #0 \n" /* cl->myl[core] = 0 */
339 "strb r2, [r0, r1, lsr #7] \n"
340 "bx lr \n"
341 : : "i"(&PROCESSOR_ID)
343 (void)cl;
345 #else /* C versions for reference */
346 /*---------------------------------------------------------------------------
347 * Wait for the corelock to become free and aquire it when it does.
348 *---------------------------------------------------------------------------
350 void corelock_lock(struct corelock *cl)
352 const unsigned int core = CURRENT_CORE;
353 const unsigned int othercore = 1 - core;
355 cl->myl[core] = core;
356 cl->turn = othercore;
358 for (;;)
360 if (cl->myl[othercore] == 0 || cl->turn == core)
361 break;
365 /*---------------------------------------------------------------------------
366 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
367 *---------------------------------------------------------------------------
369 int corelock_try_lock(struct corelock *cl)
371 const unsigned int core = CURRENT_CORE;
372 const unsigned int othercore = 1 - core;
374 cl->myl[core] = core;
375 cl->turn = othercore;
377 if (cl->myl[othercore] == 0 || cl->turn == core)
379 return 1;
382 cl->myl[core] = 0;
383 return 0;
386 /*---------------------------------------------------------------------------
387 * Release ownership of the corelock
388 *---------------------------------------------------------------------------
390 void corelock_unlock(struct corelock *cl)
392 cl->myl[CURRENT_CORE] = 0;
394 #endif /* ASM / C selection */
396 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
398 /*---------------------------------------------------------------------------
399 * Put core in a power-saving state if waking list wasn't repopulated and if
400 * no other core requested a wakeup for it to perform a task.
401 *---------------------------------------------------------------------------
403 #ifdef CPU_PP502x
404 #if NUM_CORES == 1
405 static inline void core_sleep(void)
407 sleep_core(CURRENT_CORE);
408 enable_irq();
410 #else
411 static inline void core_sleep(unsigned int core)
413 #if 1
414 asm volatile (
415 "mov r0, #4 \n" /* r0 = 0x4 << core */
416 "mov r0, r0, lsl %[c] \n"
417 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
418 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
419 "tst r1, r0, lsl #2 \n"
420 "moveq r1, #0x80000000 \n" /* Then sleep */
421 "streq r1, [%[ctl], %[c], lsl #2] \n"
422 "moveq r1, #0 \n" /* Clear control reg */
423 "streq r1, [%[ctl], %[c], lsl #2] \n"
424 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
425 "str r1, [%[mbx], #8] \n"
426 "1: \n" /* Wait for wake procedure to finish */
427 "ldr r1, [%[mbx], #0] \n"
428 "tst r1, r0, lsr #2 \n"
429 "bne 1b \n"
431 : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
432 : "r0", "r1");
433 #else /* C version for reference */
434 /* Signal intent to sleep */
435 MBX_MSG_SET = 0x4 << core;
437 /* Something waking or other processor intends to wake us? */
438 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
440 sleep_core(core);
441 wake_core(core);
444 /* Signal wake - clear wake flag */
445 MBX_MSG_CLR = 0x14 << core;
447 /* Wait for other processor to finish wake procedure */
448 while (MBX_MSG_STAT & (0x1 << core));
449 #endif /* ASM/C selection */
450 enable_irq();
452 #endif /* NUM_CORES */
453 #elif CONFIG_CPU == PP5002
454 #if NUM_CORES == 1
455 static inline void core_sleep(void)
457 sleep_core(CURRENT_CORE);
458 enable_irq();
460 #else
461 /* PP5002 has no mailboxes - emulate using bytes */
462 static inline void core_sleep(unsigned int core)
464 #if 1
465 asm volatile (
466 "mov r0, #1 \n" /* Signal intent to sleep */
467 "strb r0, [%[sem], #2] \n"
468 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
469 "cmp r0, #0 \n"
470 "bne 2f \n"
471 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
472 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
473 * that the correct alternative is executed. Don't change the order
474 * of the next 4 instructions! */
475 "tst pc, #0x0c \n"
476 "mov r0, #0xca \n"
477 "strne r0, [%[ctl], %[c], lsl #2] \n"
478 "streq r0, [%[ctl], %[c], lsl #2] \n"
479 "nop \n" /* nop's needed because of pipeline */
480 "nop \n"
481 "nop \n"
482 "2: \n"
483 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
484 "strb r0, [%[sem], #1] \n"
485 "strb r0, [%[sem], #2] \n"
486 "1: \n" /* Wait for wake procedure to finish */
487 "ldrb r0, [%[sem], #0] \n"
488 "cmp r0, #0 \n"
489 "bne 1b \n"
491 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
492 [ctl]"r"(&CPU_CTL)
493 : "r0"
495 #else /* C version for reference */
496 /* Signal intent to sleep */
497 core_semaphores[core].intend_sleep = 1;
499 /* Something waking or other processor intends to wake us? */
500 if (core_semaphores[core].stay_awake == 0)
502 sleep_core(core);
505 /* Signal wake - clear wake flag */
506 core_semaphores[core].stay_awake = 0;
507 core_semaphores[core].intend_sleep = 0;
509 /* Wait for other processor to finish wake procedure */
510 while (core_semaphores[core].intend_wake != 0);
512 /* Enable IRQ */
513 #endif /* ASM/C selection */
514 enable_irq();
516 #endif /* NUM_CORES */
517 #endif /* PP CPU type */
519 /*---------------------------------------------------------------------------
520 * Wake another processor core that is sleeping or prevent it from doing so
521 * if it was already destined. FIQ, IRQ should be disabled before calling.
522 *---------------------------------------------------------------------------
524 #if NUM_CORES == 1
525 /* Shared single-core build debugging version */
526 void core_wake(void)
528 /* No wakey - core already wakey */
530 #elif defined (CPU_PP502x)
531 void core_wake(unsigned int othercore)
533 #if 1
534 /* avoid r0 since that contains othercore */
535 asm volatile (
536 "mrs r3, cpsr \n" /* Disable IRQ */
537 "orr r1, r3, #0x80 \n"
538 "msr cpsr_c, r1 \n"
539 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
540 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
541 "str r2, [%[mbx], #4] \n"
542 "1: \n" /* If it intends to sleep, let it first */
543 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
544 "eor r1, r1, #0xc \n"
545 "tst r1, r2, lsr #2 \n"
546 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
547 "tsteq r1, #0x80000000 \n"
548 "beq 1b \n" /* Wait for sleep or wake */
549 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
550 "movne r1, #0x0 \n"
551 "strne r1, [%[ctl], %[oc], lsl #2] \n"
552 "mov r1, r2, lsr #4 \n"
553 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
554 "msr cpsr_c, r3 \n" /* Restore IRQ */
556 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
557 [oc]"r"(othercore)
558 : "r1", "r2", "r3");
559 #else /* C version for reference */
560 /* Disable interrupts - avoid reentrancy from the tick */
561 int oldlevel = disable_irq_save();
563 /* Signal intent to wake other processor - set stay awake */
564 MBX_MSG_SET = 0x11 << othercore;
566 /* If it intends to sleep, wait until it does or aborts */
567 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
568 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
570 /* If sleeping, wake it up */
571 if (PROC_CTL(othercore) & PROC_SLEEP)
572 PROC_CTL(othercore) = 0;
574 /* Done with wake procedure */
575 MBX_MSG_CLR = 0x1 << othercore;
576 restore_irq(oldlevel);
577 #endif /* ASM/C selection */
579 #elif CONFIG_CPU == PP5002
580 /* PP5002 has no mailboxes - emulate using bytes */
581 void core_wake(unsigned int othercore)
583 #if 1
584 /* avoid r0 since that contains othercore */
585 asm volatile (
586 "mrs r3, cpsr \n" /* Disable IRQ */
587 "orr r1, r3, #0x80 \n"
588 "msr cpsr_c, r1 \n"
589 "mov r1, #1 \n" /* Signal intent to wake other core */
590 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
591 "strh r1, [%[sem], #0] \n"
592 "mov r2, #0x8000 \n"
593 "1: \n" /* If it intends to sleep, let it first */
594 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
595 "cmp r1, #1 \n"
596 "ldr r1, [%[st]] \n" /* && not sleeping ? */
597 "tsteq r1, r2, lsr %[oc] \n"
598 "beq 1b \n" /* Wait for sleep or wake */
599 "tst r1, r2, lsr %[oc] \n"
600 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
601 "movne r1, #0xce \n"
602 "strne r1, [r2, %[oc], lsl #2] \n"
603 "mov r1, #0 \n" /* Done with wake procedure */
604 "strb r1, [%[sem], #0] \n"
605 "msr cpsr_c, r3 \n" /* Restore IRQ */
607 : [sem]"r"(&core_semaphores[othercore]),
608 [st]"r"(&PROC_STAT),
609 [oc]"r"(othercore)
610 : "r1", "r2", "r3"
612 #else /* C version for reference */
613 /* Disable interrupts - avoid reentrancy from the tick */
614 int oldlevel = disable_irq_save();
616 /* Signal intent to wake other processor - set stay awake */
617 core_semaphores[othercore].intend_wake = 1;
618 core_semaphores[othercore].stay_awake = 1;
620 /* If it intends to sleep, wait until it does or aborts */
621 while (core_semaphores[othercore].intend_sleep != 0 &&
622 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
624 /* If sleeping, wake it up */
625 if (PROC_STAT & PROC_SLEEPING(othercore))
626 wake_core(othercore);
628 /* Done with wake procedure */
629 core_semaphores[othercore].intend_wake = 0;
630 restore_irq(oldlevel);
631 #endif /* ASM/C selection */
633 #endif /* CPU type */
635 #if NUM_CORES > 1
636 /*---------------------------------------------------------------------------
637 * Switches to a stack that always resides in the Rockbox core.
639 * Needed when a thread suicides on a core other than the main CPU since the
640 * stack used when idling is the stack of the last thread to run. This stack
641 * may not reside in the core firmware in which case the core will continue
642 * to use a stack from an unloaded module until another thread runs on it.
643 *---------------------------------------------------------------------------
645 static inline void switch_to_idle_stack(const unsigned int core)
647 asm volatile (
648 "str sp, [%0] \n" /* save original stack pointer on idle stack */
649 "mov sp, %0 \n" /* switch stacks */
650 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
651 (void)core;
654 /*---------------------------------------------------------------------------
655 * Perform core switch steps that need to take place inside switch_thread.
657 * These steps must take place while before changing the processor and after
658 * having entered switch_thread since switch_thread may not do a normal return
659 * because the stack being used for anything the compiler saved will not belong
660 * to the thread's destination core and it may have been recycled for other
661 * purposes by the time a normal context load has taken place. switch_thread
662 * will also clobber anything stashed in the thread's context or stored in the
663 * nonvolatile registers if it is saved there before the call since the
664 * compiler's order of operations cannot be known for certain.
666 static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
668 /* Flush our data to ram */
669 flush_icache();
670 /* Stash thread in r4 slot */
671 thread->context.r[0] = (uint32_t)thread;
672 /* Stash restart address in r5 slot */
673 thread->context.r[1] = thread->context.start;
674 /* Save sp in context.sp while still running on old core */
675 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
678 /*---------------------------------------------------------------------------
679 * Machine-specific helper function for switching the processor a thread is
680 * running on. Basically, the thread suicides on the departing core and is
681 * reborn on the destination. Were it not for gcc's ill-behavior regarding
682 * naked functions written in C where it actually clobbers non-volatile
683 * registers before the intended prologue code, this would all be much
684 * simpler. Generic setup is done in switch_core itself.
687 /*---------------------------------------------------------------------------
688 * This actually performs the core switch.
690 static void __attribute__((naked))
691 switch_thread_core(unsigned int core, struct thread_entry *thread)
693 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
694 * Stack access also isn't permitted until restoring the original stack and
695 * context. */
696 asm volatile (
697 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
698 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
699 "ldr r2, [r2, r0, lsl #2] \n"
700 "add r2, r2, %0*4 \n"
701 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
702 "mov sp, r2 \n" /* switch stacks */
703 "adr r2, 1f \n" /* r2 = new core restart address */
704 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
705 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
706 "1: \n"
707 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
708 "mov r1, #0 \n" /* Clear start address */
709 "str r1, [r0, #40] \n"
710 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
711 "mov lr, pc \n"
712 "bx r0 \n"
713 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
714 ".ltorg \n" /* Dump constant pool */
715 : : "i"(IDLE_STACK_WORDS)
717 (void)core; (void)thread;
720 /*---------------------------------------------------------------------------
721 * Do any device-specific inits for the threads and synchronize the kernel
722 * initializations.
723 *---------------------------------------------------------------------------
725 static void core_thread_init(unsigned int core)
727 if (core == CPU)
729 /* Wake up coprocessor and let it initialize kernel and threads */
730 #ifdef CPU_PP502x
731 MBX_MSG_CLR = 0x3f;
732 #endif
733 wake_core(COP);
734 /* Sleep until COP has finished */
735 sleep_core(CPU);
737 else
739 /* Wake the CPU and return */
740 wake_core(CPU);
743 #endif /* NUM_CORES */
745 #elif CONFIG_CPU == S3C2440
747 /*---------------------------------------------------------------------------
748 * Put core in a power-saving state if waking list wasn't repopulated.
749 *---------------------------------------------------------------------------
751 static inline void core_sleep(void)
753 /* FIQ also changes the CLKCON register so FIQ must be disabled
754 when changing it here */
755 asm volatile (
756 "mrs r0, cpsr \n"
757 "orr r2, r0, #0x40 \n" /* Disable FIQ */
758 "bic r0, r0, #0x80 \n" /* Prepare IRQ enable */
759 "msr cpsr_c, r2 \n"
760 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
761 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
762 "orr r2, r2, #4 \n"
763 "str r2, [r1, #0xc] \n"
764 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
765 "mov r2, #0 \n" /* wait for IDLE */
766 "1: \n"
767 "add r2, r2, #1 \n"
768 "cmp r2, #10 \n"
769 "bne 1b \n"
770 "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */
771 "msr cpsr_c, r2 \n"
772 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
773 "bic r2, r2, #4 \n"
774 "str r2, [r1, #0xc] \n"
775 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
776 : : : "r0", "r1", "r2");
778 #elif defined(CPU_TCC77X)
779 static inline void core_sleep(void)
781 #warning TODO: Implement core_sleep
782 enable_irq();
784 #elif defined(CPU_TCC780X)
785 static inline void core_sleep(void)
787 /* Single core only for now. Use the generic ARMv5 wait for IRQ */
788 asm volatile (
789 "mov r0, #0 \n"
790 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
791 : : : "r0"
793 enable_irq();
795 #elif CONFIG_CPU == IMX31L
796 static inline void core_sleep(void)
798 asm volatile (
799 "mov r0, #0 \n"
800 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
801 : : : "r0"
803 enable_irq();
805 #elif CONFIG_CPU == DM320
806 static inline void core_sleep(void)
808 asm volatile (
809 "mov r0, #0 \n"
810 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
811 : : : "r0"
813 enable_irq();
815 #else
816 static inline void core_sleep(void)
818 #warning core_sleep not implemented, battery life will be decreased
819 enable_irq();
821 #endif /* CONFIG_CPU == */
823 #elif defined(CPU_COLDFIRE)
824 /*---------------------------------------------------------------------------
825 * Start the thread running and terminate it if it returns
826 *---------------------------------------------------------------------------
828 void start_thread(void); /* Provide C access to ASM label */
829 static void __attribute__((used)) __start_thread(void)
831 /* a0=macsr, a1=context */
832 asm volatile (
833 "start_thread: \n" /* Start here - no naked attribute */
834 "move.l %a0, %macsr \n" /* Set initial mac status reg */
835 "lea.l 48(%a1), %a1 \n"
836 "move.l (%a1)+, %sp \n" /* Set initial stack */
837 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
838 "clr.l (%a1) \n" /* Mark thread running */
839 "jsr (%a2) \n" /* Call thread function */
841 thread_exit();
844 /* Set EMAC unit to fractional mode with saturation for each new thread,
845 * since that's what'll be the most useful for most things which the dsp
846 * will do. Codecs should still initialize their preferred modes
847 * explicitly. Context pointer is placed in d2 slot and start_thread
848 * pointer in d3 slot. thread function pointer is placed in context.start.
849 * See load_context for what happens when thread is initially going to
850 * run.
852 #define THREAD_STARTUP_INIT(core, thread, function) \
853 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
854 (thread)->context.d[0] = (uint32_t)&(thread)->context, \
855 (thread)->context.d[1] = (uint32_t)start_thread, \
856 (thread)->context.start = (uint32_t)(function); })
858 /*---------------------------------------------------------------------------
859 * Store non-volatile context.
860 *---------------------------------------------------------------------------
862 static inline void store_context(void* addr)
864 asm volatile (
865 "move.l %%macsr,%%d0 \n"
866 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
867 : : "a" (addr) : "d0" /* only! */
871 /*---------------------------------------------------------------------------
872 * Load non-volatile context.
873 *---------------------------------------------------------------------------
875 static inline void load_context(const void* addr)
877 asm volatile (
878 "move.l 52(%0), %%d0 \n" /* Get start address */
879 "beq.b 1f \n" /* NULL -> already running */
880 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
881 "jmp (%%a2) \n" /* Start the thread */
882 "1: \n"
883 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
884 "move.l %%d0, %%macsr \n"
885 : : "a" (addr) : "d0" /* only! */
889 /*---------------------------------------------------------------------------
890 * Put core in a power-saving state if waking list wasn't repopulated.
891 *---------------------------------------------------------------------------
893 static inline void core_sleep(void)
895 /* Supervisor mode, interrupts enabled upon wakeup */
896 asm volatile ("stop #0x2000");
899 #elif CONFIG_CPU == SH7034
900 /*---------------------------------------------------------------------------
901 * Start the thread running and terminate it if it returns
902 *---------------------------------------------------------------------------
904 void start_thread(void); /* Provide C access to ASM label */
905 static void __attribute__((used)) __start_thread(void)
907 /* r8 = context */
908 asm volatile (
909 "_start_thread: \n" /* Start here - no naked attribute */
910 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
911 "mov.l @(28, r8), r15 \n" /* Set initial sp */
912 "mov #0, r1 \n" /* Start the thread */
913 "jsr @r0 \n"
914 "mov.l r1, @(36, r8) \n" /* Clear start address */
916 thread_exit();
919 /* Place context pointer in r8 slot, function pointer in r9 slot, and
920 * start_thread pointer in context_start */
921 #define THREAD_STARTUP_INIT(core, thread, function) \
922 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
923 (thread)->context.r[1] = (uint32_t)(function), \
924 (thread)->context.start = (uint32_t)start_thread; })
926 /*---------------------------------------------------------------------------
927 * Store non-volatile context.
928 *---------------------------------------------------------------------------
930 static inline void store_context(void* addr)
932 asm volatile (
933 "add #36, %0 \n" /* Start at last reg. By the time routine */
934 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
935 "mov.l r15,@-%0 \n"
936 "mov.l r14,@-%0 \n"
937 "mov.l r13,@-%0 \n"
938 "mov.l r12,@-%0 \n"
939 "mov.l r11,@-%0 \n"
940 "mov.l r10,@-%0 \n"
941 "mov.l r9, @-%0 \n"
942 "mov.l r8, @-%0 \n"
943 : : "r" (addr)
947 /*---------------------------------------------------------------------------
948 * Load non-volatile context.
949 *---------------------------------------------------------------------------
951 static inline void load_context(const void* addr)
953 asm volatile (
954 "mov.l @(36, %0), r0 \n" /* Get start address */
955 "tst r0, r0 \n"
956 "bt .running \n" /* NULL -> already running */
957 "jmp @r0 \n" /* r8 = context */
958 ".running: \n"
959 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
960 "mov.l @%0+, r9 \n"
961 "mov.l @%0+, r10 \n"
962 "mov.l @%0+, r11 \n"
963 "mov.l @%0+, r12 \n"
964 "mov.l @%0+, r13 \n"
965 "mov.l @%0+, r14 \n"
966 "mov.l @%0+, r15 \n"
967 "lds.l @%0+, pr \n"
968 : : "r" (addr) : "r0" /* only! */
972 /*---------------------------------------------------------------------------
973 * Put core in a power-saving state.
974 *---------------------------------------------------------------------------
976 static inline void core_sleep(void)
978 asm volatile (
979 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
980 "mov #0, r1 \n" /* Enable interrupts */
981 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
982 "sleep \n" /* Execute standby */
983 : : "z"(&SBYCR-GBR) : "r1");
986 #endif /* CONFIG_CPU == */
989 * End Processor-specific section
990 ***************************************************************************/
992 #if THREAD_EXTRA_CHECKS
993 static void thread_panicf(const char *msg, struct thread_entry *thread)
995 IF_COP( const unsigned int core = thread->core; )
996 static char name[32];
997 thread_get_name(name, 32, thread);
998 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
1000 static void thread_stkov(struct thread_entry *thread)
1002 thread_panicf("Stkov", thread);
1004 #define THREAD_PANICF(msg, thread) \
1005 thread_panicf(msg, thread)
1006 #define THREAD_ASSERT(exp, msg, thread) \
1007 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
1008 #else
1009 static void thread_stkov(struct thread_entry *thread)
1011 IF_COP( const unsigned int core = thread->core; )
1012 static char name[32];
1013 thread_get_name(name, 32, thread);
1014 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
1016 #define THREAD_PANICF(msg, thread)
1017 #define THREAD_ASSERT(exp, msg, thread)
1018 #endif /* THREAD_EXTRA_CHECKS */
1020 /* Thread locking */
1021 #if NUM_CORES > 1
1022 #define LOCK_THREAD(thread) \
1023 ({ corelock_lock(&(thread)->slot_cl); })
1024 #define TRY_LOCK_THREAD(thread) \
1025 ({ corelock_try_lock(&thread->slot_cl); })
1026 #define UNLOCK_THREAD(thread) \
1027 ({ corelock_unlock(&(thread)->slot_cl); })
1028 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1029 ({ unsigned int _core = (thread)->core; \
1030 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
1031 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
1032 #else
1033 #define LOCK_THREAD(thread) \
1034 ({ })
1035 #define TRY_LOCK_THREAD(thread) \
1036 ({ })
1037 #define UNLOCK_THREAD(thread) \
1038 ({ })
1039 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1040 ({ })
1041 #endif
1043 /* RTR list */
1044 #define RTR_LOCK(core) \
1045 ({ corelock_lock(&cores[core].rtr_cl); })
1046 #define RTR_UNLOCK(core) \
1047 ({ corelock_unlock(&cores[core].rtr_cl); })
1049 #ifdef HAVE_PRIORITY_SCHEDULING
1050 #define rtr_add_entry(core, priority) \
1051 prio_add_entry(&cores[core].rtr, (priority))
1053 #define rtr_subtract_entry(core, priority) \
1054 prio_subtract_entry(&cores[core].rtr, (priority))
1056 #define rtr_move_entry(core, from, to) \
1057 prio_move_entry(&cores[core].rtr, (from), (to))
1058 #else
1059 #define rtr_add_entry(core, priority)
1060 #define rtr_add_entry_inl(core, priority)
1061 #define rtr_subtract_entry(core, priority)
1062 #define rtr_subtract_entry_inl(core, priotity)
1063 #define rtr_move_entry(core, from, to)
1064 #define rtr_move_entry_inl(core, from, to)
1065 #endif
1067 /*---------------------------------------------------------------------------
1068 * Thread list structure - circular:
1069 * +------------------------------+
1070 * | |
1071 * +--+---+<-+---+<-+---+<-+---+<-+
1072 * Head->| T | | T | | T | | T |
1073 * +->+---+->+---+->+---+->+---+--+
1074 * | |
1075 * +------------------------------+
1076 *---------------------------------------------------------------------------
1079 /*---------------------------------------------------------------------------
1080 * Adds a thread to a list of threads using "insert last". Uses the "l"
1081 * links.
1082 *---------------------------------------------------------------------------
1084 static void add_to_list_l(struct thread_entry **list,
1085 struct thread_entry *thread)
1087 struct thread_entry *l = *list;
1089 if (l == NULL)
1091 /* Insert into unoccupied list */
1092 thread->l.prev = thread;
1093 thread->l.next = thread;
1094 *list = thread;
1095 return;
1098 /* Insert last */
1099 thread->l.prev = l->l.prev;
1100 thread->l.next = l;
1101 l->l.prev->l.next = thread;
1102 l->l.prev = thread;
1105 /*---------------------------------------------------------------------------
1106 * Removes a thread from a list of threads. Uses the "l" links.
1107 *---------------------------------------------------------------------------
1109 static void remove_from_list_l(struct thread_entry **list,
1110 struct thread_entry *thread)
1112 struct thread_entry *prev, *next;
1114 next = thread->l.next;
1116 if (thread == next)
1118 /* The only item */
1119 *list = NULL;
1120 return;
1123 if (thread == *list)
1125 /* List becomes next item */
1126 *list = next;
1129 prev = thread->l.prev;
1131 /* Fix links to jump over the removed entry. */
1132 next->l.prev = prev;
1133 prev->l.next = next;
1136 /*---------------------------------------------------------------------------
1137 * Timeout list structure - circular reverse (to make "remove item" O(1)),
1138 * NULL-terminated forward (to ease the far more common forward traversal):
1139 * +------------------------------+
1140 * | |
1141 * +--+---+<-+---+<-+---+<-+---+<-+
1142 * Head->| T | | T | | T | | T |
1143 * +---+->+---+->+---+->+---+-X
1144 *---------------------------------------------------------------------------
1147 /*---------------------------------------------------------------------------
1148 * Add a thread from the core's timout list by linking the pointers in its
1149 * tmo structure.
1150 *---------------------------------------------------------------------------
1152 static void add_to_list_tmo(struct thread_entry *thread)
1154 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
1155 THREAD_ASSERT(thread->tmo.prev == NULL,
1156 "add_to_list_tmo->already listed", thread);
1158 thread->tmo.next = NULL;
1160 if (tmo == NULL)
1162 /* Insert into unoccupied list */
1163 thread->tmo.prev = thread;
1164 cores[IF_COP_CORE(thread->core)].timeout = thread;
1165 return;
1168 /* Insert Last */
1169 thread->tmo.prev = tmo->tmo.prev;
1170 tmo->tmo.prev->tmo.next = thread;
1171 tmo->tmo.prev = thread;
1174 /*---------------------------------------------------------------------------
1175 * Remove a thread from the core's timout list by unlinking the pointers in
1176 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1177 * is cancelled.
1178 *---------------------------------------------------------------------------
1180 static void remove_from_list_tmo(struct thread_entry *thread)
1182 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
1183 struct thread_entry *prev = thread->tmo.prev;
1184 struct thread_entry *next = thread->tmo.next;
1186 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
1188 if (next != NULL)
1189 next->tmo.prev = prev;
1191 if (thread == *list)
1193 /* List becomes next item and empty if next == NULL */
1194 *list = next;
1195 /* Mark as unlisted */
1196 thread->tmo.prev = NULL;
1198 else
1200 if (next == NULL)
1201 (*list)->tmo.prev = prev;
1202 prev->tmo.next = next;
1203 /* Mark as unlisted */
1204 thread->tmo.prev = NULL;
1209 #ifdef HAVE_PRIORITY_SCHEDULING
1210 /*---------------------------------------------------------------------------
1211 * Priority distribution structure (one category for each possible priority):
1213 * +----+----+----+ ... +-----+
1214 * hist: | F0 | F1 | F2 | | F31 |
1215 * +----+----+----+ ... +-----+
1216 * mask: | b0 | b1 | b2 | | b31 |
1217 * +----+----+----+ ... +-----+
1219 * F = count of threads at priority category n (frequency)
1220 * b = bitmask of non-zero priority categories (occupancy)
1222 * / if H[n] != 0 : 1
1223 * b[n] = |
1224 * \ else : 0
1226 *---------------------------------------------------------------------------
1227 * Basic priority inheritance priotocol (PIP):
1229 * Mn = mutex n, Tn = thread n
1231 * A lower priority thread inherits the priority of the highest priority
1232 * thread blocked waiting for it to complete an action (such as release a
1233 * mutex or respond to a message via queue_send):
1235 * 1) T2->M1->T1
1237 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
1238 * priority than T1 then T1 inherits the priority of T2.
1240 * 2) T3
1241 * \/
1242 * T2->M1->T1
1244 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
1245 * T1 inherits the higher of T2 and T3.
1247 * 3) T3->M2->T2->M1->T1
1249 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
1250 * then T1 inherits the priority of T3 through T2.
1252 * Blocking chains can grow arbitrarily complex (though it's best that they
1253 * not form at all very often :) and build-up from these units.
1254 *---------------------------------------------------------------------------
1257 /*---------------------------------------------------------------------------
1258 * Increment frequency at category "priority"
1259 *---------------------------------------------------------------------------
1261 static inline unsigned int prio_add_entry(
1262 struct priority_distribution *pd, int priority)
1264 unsigned int count;
1265 /* Enough size/instruction count difference for ARM makes it worth it to
1266 * use different code (192 bytes for ARM). Only thing better is ASM. */
1267 #ifdef CPU_ARM
1268 count = pd->hist[priority];
1269 if (++count == 1)
1270 pd->mask |= 1 << priority;
1271 pd->hist[priority] = count;
1272 #else /* This one's better for Coldfire */
1273 if ((count = ++pd->hist[priority]) == 1)
1274 pd->mask |= 1 << priority;
1275 #endif
1277 return count;
1280 /*---------------------------------------------------------------------------
1281 * Decrement frequency at category "priority"
1282 *---------------------------------------------------------------------------
1284 static inline unsigned int prio_subtract_entry(
1285 struct priority_distribution *pd, int priority)
1287 unsigned int count;
1289 #ifdef CPU_ARM
1290 count = pd->hist[priority];
1291 if (--count == 0)
1292 pd->mask &= ~(1 << priority);
1293 pd->hist[priority] = count;
1294 #else
1295 if ((count = --pd->hist[priority]) == 0)
1296 pd->mask &= ~(1 << priority);
1297 #endif
1299 return count;
1302 /*---------------------------------------------------------------------------
1303 * Remove from one category and add to another
1304 *---------------------------------------------------------------------------
1306 static inline void prio_move_entry(
1307 struct priority_distribution *pd, int from, int to)
1309 uint32_t mask = pd->mask;
1311 #ifdef CPU_ARM
1312 unsigned int count;
1314 count = pd->hist[from];
1315 if (--count == 0)
1316 mask &= ~(1 << from);
1317 pd->hist[from] = count;
1319 count = pd->hist[to];
1320 if (++count == 1)
1321 mask |= 1 << to;
1322 pd->hist[to] = count;
1323 #else
1324 if (--pd->hist[from] == 0)
1325 mask &= ~(1 << from);
1327 if (++pd->hist[to] == 1)
1328 mask |= 1 << to;
1329 #endif
1331 pd->mask = mask;
1334 /*---------------------------------------------------------------------------
1335 * Change the priority and rtr entry for a running thread
1336 *---------------------------------------------------------------------------
1338 static inline void set_running_thread_priority(
1339 struct thread_entry *thread, int priority)
1341 const unsigned int core = IF_COP_CORE(thread->core);
1342 RTR_LOCK(core);
1343 rtr_move_entry(core, thread->priority, priority);
1344 thread->priority = priority;
1345 RTR_UNLOCK(core);
1348 /*---------------------------------------------------------------------------
1349 * Finds the highest priority thread in a list of threads. If the list is
1350 * empty, the PRIORITY_IDLE is returned.
1352 * It is possible to use the struct priority_distribution within an object
1353 * instead of scanning the remaining threads in the list but as a compromise,
1354 * the resulting per-object memory overhead is saved at a slight speed
1355 * penalty under high contention.
1356 *---------------------------------------------------------------------------
1358 static int find_highest_priority_in_list_l(
1359 struct thread_entry * const thread)
1361 if (thread != NULL)
1363 /* Go though list until the ending up at the initial thread */
1364 int highest_priority = thread->priority;
1365 struct thread_entry *curr = thread;
1369 int priority = curr->priority;
1371 if (priority < highest_priority)
1372 highest_priority = priority;
1374 curr = curr->l.next;
1376 while (curr != thread);
1378 return highest_priority;
1381 return PRIORITY_IDLE;
1384 /*---------------------------------------------------------------------------
1385 * Register priority with blocking system and bubble it down the chain if
1386 * any until we reach the end or something is already equal or higher.
1388 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
1389 * targets but that same action also guarantees a circular block anyway and
1390 * those are prevented, right? :-)
1391 *---------------------------------------------------------------------------
1393 static struct thread_entry *
1394 blocker_inherit_priority(struct thread_entry *current)
1396 const int priority = current->priority;
1397 struct blocker *bl = current->blocker;
1398 struct thread_entry * const tstart = current;
1399 struct thread_entry *bl_t = bl->thread;
1401 /* Blocker cannot change since the object protection is held */
1402 LOCK_THREAD(bl_t);
1404 for (;;)
1406 struct thread_entry *next;
1407 int bl_pr = bl->priority;
1409 if (priority >= bl_pr)
1410 break; /* Object priority already high enough */
1412 bl->priority = priority;
1414 /* Add this one */
1415 prio_add_entry(&bl_t->pdist, priority);
1417 if (bl_pr < PRIORITY_IDLE)
1419 /* Not first waiter - subtract old one */
1420 prio_subtract_entry(&bl_t->pdist, bl_pr);
1423 if (priority >= bl_t->priority)
1424 break; /* Thread priority high enough */
1426 if (bl_t->state == STATE_RUNNING)
1428 /* Blocking thread is a running thread therefore there are no
1429 * further blockers. Change the "run queue" on which it
1430 * resides. */
1431 set_running_thread_priority(bl_t, priority);
1432 break;
1435 bl_t->priority = priority;
1437 /* If blocking thread has a blocker, apply transitive inheritance */
1438 bl = bl_t->blocker;
1440 if (bl == NULL)
1441 break; /* End of chain or object doesn't support inheritance */
1443 next = bl->thread;
1445 if (next == tstart)
1446 break; /* Full-circle - deadlock! */
1448 UNLOCK_THREAD(current);
1450 #if NUM_CORES > 1
1451 for (;;)
1453 LOCK_THREAD(next);
1455 /* Blocker could change - retest condition */
1456 if (bl->thread == next)
1457 break;
1459 UNLOCK_THREAD(next);
1460 next = bl->thread;
1462 #endif
1463 current = bl_t;
1464 bl_t = next;
1467 UNLOCK_THREAD(bl_t);
1469 return current;
1472 /*---------------------------------------------------------------------------
1473 * Readjust priorities when waking a thread blocked waiting for another
1474 * in essence "releasing" the thread's effect on the object owner. Can be
1475 * performed from any context.
1476 *---------------------------------------------------------------------------
1478 struct thread_entry *
1479 wakeup_priority_protocol_release(struct thread_entry *thread)
1481 const int priority = thread->priority;
1482 struct blocker *bl = thread->blocker;
1483 struct thread_entry * const tstart = thread;
1484 struct thread_entry *bl_t = bl->thread;
1486 /* Blocker cannot change since object will be locked */
1487 LOCK_THREAD(bl_t);
1489 thread->blocker = NULL; /* Thread not blocked */
1491 for (;;)
1493 struct thread_entry *next;
1494 int bl_pr = bl->priority;
1496 if (priority > bl_pr)
1497 break; /* Object priority higher */
1499 next = *thread->bqp;
1501 if (next == NULL)
1503 /* No more threads in queue */
1504 prio_subtract_entry(&bl_t->pdist, bl_pr);
1505 bl->priority = PRIORITY_IDLE;
1507 else
1509 /* Check list for highest remaining priority */
1510 int queue_pr = find_highest_priority_in_list_l(next);
1512 if (queue_pr == bl_pr)
1513 break; /* Object priority not changing */
1515 /* Change queue priority */
1516 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
1517 bl->priority = queue_pr;
1520 if (bl_pr > bl_t->priority)
1521 break; /* thread priority is higher */
1523 bl_pr = find_first_set_bit(bl_t->pdist.mask);
1525 if (bl_pr == bl_t->priority)
1526 break; /* Thread priority not changing */
1528 if (bl_t->state == STATE_RUNNING)
1530 /* No further blockers */
1531 set_running_thread_priority(bl_t, bl_pr);
1532 break;
1535 bl_t->priority = bl_pr;
1537 /* If blocking thread has a blocker, apply transitive inheritance */
1538 bl = bl_t->blocker;
1540 if (bl == NULL)
1541 break; /* End of chain or object doesn't support inheritance */
1543 next = bl->thread;
1545 if (next == tstart)
1546 break; /* Full-circle - deadlock! */
1548 UNLOCK_THREAD(thread);
1550 #if NUM_CORES > 1
1551 for (;;)
1553 LOCK_THREAD(next);
1555 /* Blocker could change - retest condition */
1556 if (bl->thread == next)
1557 break;
1559 UNLOCK_THREAD(next);
1560 next = bl->thread;
1562 #endif
1563 thread = bl_t;
1564 bl_t = next;
1567 UNLOCK_THREAD(bl_t);
1569 #if NUM_CORES > 1
1570 if (thread != tstart)
1572 /* Relock original if it changed */
1573 LOCK_THREAD(tstart);
1575 #endif
1577 return cores[CURRENT_CORE].running;
1580 /*---------------------------------------------------------------------------
1581 * Transfer ownership to a thread waiting for an objects and transfer
1582 * inherited priority boost from other waiters. This algorithm knows that
1583 * blocking chains may only unblock from the very end.
1585 * Only the owning thread itself may call this and so the assumption that
1586 * it is the running thread is made.
1587 *---------------------------------------------------------------------------
1589 struct thread_entry *
1590 wakeup_priority_protocol_transfer(struct thread_entry *thread)
1592 /* Waking thread inherits priority boost from object owner */
1593 struct blocker *bl = thread->blocker;
1594 struct thread_entry *bl_t = bl->thread;
1595 struct thread_entry *next;
1596 int bl_pr;
1598 THREAD_ASSERT(thread_get_current() == bl_t,
1599 "UPPT->wrong thread", thread_get_current());
1601 LOCK_THREAD(bl_t);
1603 bl_pr = bl->priority;
1605 /* Remove the object's boost from the owning thread */
1606 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
1607 bl_pr <= bl_t->priority)
1609 /* No more threads at this priority are waiting and the old level is
1610 * at least the thread level */
1611 int priority = find_first_set_bit(bl_t->pdist.mask);
1613 if (priority != bl_t->priority)
1615 /* Adjust this thread's priority */
1616 set_running_thread_priority(bl_t, priority);
1620 next = *thread->bqp;
1622 if (next == NULL)
1624 /* Expected shortcut - no more waiters */
1625 bl_pr = PRIORITY_IDLE;
1627 else
1629 if (thread->priority <= bl_pr)
1631 /* Need to scan threads remaining in queue */
1632 bl_pr = find_highest_priority_in_list_l(next);
1635 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
1636 bl_pr < thread->priority)
1638 /* Thread priority must be raised */
1639 thread->priority = bl_pr;
1643 bl->thread = thread; /* This thread pwns */
1644 bl->priority = bl_pr; /* Save highest blocked priority */
1645 thread->blocker = NULL; /* Thread not blocked */
1647 UNLOCK_THREAD(bl_t);
1649 return bl_t;
1652 /*---------------------------------------------------------------------------
1653 * No threads must be blocked waiting for this thread except for it to exit.
1654 * The alternative is more elaborate cleanup and object registration code.
1655 * Check this for risk of silent data corruption when objects with
1656 * inheritable blocking are abandoned by the owner - not precise but may
1657 * catch something.
1658 *---------------------------------------------------------------------------
1660 static void check_for_obj_waiters(const char *function, struct thread_entry *thread)
1662 /* Only one bit in the mask should be set with a frequency on 1 which
1663 * represents the thread's own base priority */
1664 uint32_t mask = thread->pdist.mask;
1665 if ((mask & (mask - 1)) != 0 ||
1666 thread->pdist.hist[find_first_set_bit(mask)] > 1)
1668 unsigned char name[32];
1669 thread_get_name(name, 32, thread);
1670 panicf("%s->%s with obj. waiters", function, name);
1673 #endif /* HAVE_PRIORITY_SCHEDULING */
1675 /*---------------------------------------------------------------------------
1676 * Move a thread back to a running state on its core.
1677 *---------------------------------------------------------------------------
1679 static void core_schedule_wakeup(struct thread_entry *thread)
1681 const unsigned int core = IF_COP_CORE(thread->core);
1683 RTR_LOCK(core);
1685 thread->state = STATE_RUNNING;
1687 add_to_list_l(&cores[core].running, thread);
1688 rtr_add_entry(core, thread->priority);
1690 RTR_UNLOCK(core);
1692 #if NUM_CORES > 1
1693 if (core != CURRENT_CORE)
1694 core_wake(core);
1695 #endif
1698 /*---------------------------------------------------------------------------
1699 * Check the core's timeout list when at least one thread is due to wake.
1700 * Filtering for the condition is done before making the call. Resets the
1701 * tick when the next check will occur.
1702 *---------------------------------------------------------------------------
1704 void check_tmo_threads(void)
1706 const unsigned int core = CURRENT_CORE;
1707 const long tick = current_tick; /* snapshot the current tick */
1708 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1709 struct thread_entry *next = cores[core].timeout;
1711 /* If there are no processes waiting for a timeout, just keep the check
1712 tick from falling into the past. */
1714 /* Break the loop once we have walked through the list of all
1715 * sleeping processes or have removed them all. */
1716 while (next != NULL)
1718 /* Check sleeping threads. Allow interrupts between checks. */
1719 enable_irq();
1721 struct thread_entry *curr = next;
1723 next = curr->tmo.next;
1725 /* Lock thread slot against explicit wakeup */
1726 disable_irq();
1727 LOCK_THREAD(curr);
1729 unsigned state = curr->state;
1731 if (state < TIMEOUT_STATE_FIRST)
1733 /* Cleanup threads no longer on a timeout but still on the
1734 * list. */
1735 remove_from_list_tmo(curr);
1737 else if (TIME_BEFORE(tick, curr->tmo_tick))
1739 /* Timeout still pending - this will be the usual case */
1740 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1742 /* Earliest timeout found so far - move the next check up
1743 to its time */
1744 next_tmo_check = curr->tmo_tick;
1747 else
1749 /* Sleep timeout has been reached so bring the thread back to
1750 * life again. */
1751 if (state == STATE_BLOCKED_W_TMO)
1753 #if NUM_CORES > 1
1754 /* Lock the waiting thread's kernel object */
1755 struct corelock *ocl = curr->obj_cl;
1757 if (corelock_try_lock(ocl) == 0)
1759 /* Need to retry in the correct order though the need is
1760 * unlikely */
1761 UNLOCK_THREAD(curr);
1762 corelock_lock(ocl);
1763 LOCK_THREAD(curr);
1765 if (curr->state != STATE_BLOCKED_W_TMO)
1767 /* Thread was woken or removed explicitely while slot
1768 * was unlocked */
1769 corelock_unlock(ocl);
1770 remove_from_list_tmo(curr);
1771 UNLOCK_THREAD(curr);
1772 continue;
1775 #endif /* NUM_CORES */
1777 remove_from_list_l(curr->bqp, curr);
1779 #ifdef HAVE_WAKEUP_EXT_CB
1780 if (curr->wakeup_ext_cb != NULL)
1781 curr->wakeup_ext_cb(curr);
1782 #endif
1784 #ifdef HAVE_PRIORITY_SCHEDULING
1785 if (curr->blocker != NULL)
1786 wakeup_priority_protocol_release(curr);
1787 #endif
1788 corelock_unlock(ocl);
1790 /* else state == STATE_SLEEPING */
1792 remove_from_list_tmo(curr);
1794 RTR_LOCK(core);
1796 curr->state = STATE_RUNNING;
1798 add_to_list_l(&cores[core].running, curr);
1799 rtr_add_entry(core, curr->priority);
1801 RTR_UNLOCK(core);
1804 UNLOCK_THREAD(curr);
1807 cores[core].next_tmo_check = next_tmo_check;
1810 /*---------------------------------------------------------------------------
1811 * Performs operations that must be done before blocking a thread but after
1812 * the state is saved.
1813 *---------------------------------------------------------------------------
1815 #if NUM_CORES > 1
1816 static inline void run_blocking_ops(
1817 unsigned int core, struct thread_entry *thread)
1819 struct thread_blk_ops *ops = &cores[core].blk_ops;
1820 const unsigned flags = ops->flags;
1822 if (flags == TBOP_CLEAR)
1823 return;
1825 switch (flags)
1827 case TBOP_SWITCH_CORE:
1828 core_switch_blk_op(core, thread);
1829 /* Fall-through */
1830 case TBOP_UNLOCK_CORELOCK:
1831 corelock_unlock(ops->cl_p);
1832 break;
1835 ops->flags = TBOP_CLEAR;
1837 #endif /* NUM_CORES > 1 */
1839 #ifdef RB_PROFILE
1840 void profile_thread(void)
1842 profstart(cores[CURRENT_CORE].running - threads);
1844 #endif
1846 /*---------------------------------------------------------------------------
1847 * Prepares a thread to block on an object's list and/or for a specified
1848 * duration - expects object and slot to be appropriately locked if needed
1849 * and interrupts to be masked.
1850 *---------------------------------------------------------------------------
1852 static inline void block_thread_on_l(struct thread_entry *thread,
1853 unsigned state)
1855 /* If inlined, unreachable branches will be pruned with no size penalty
1856 because state is passed as a constant parameter. */
1857 const unsigned int core = IF_COP_CORE(thread->core);
1859 /* Remove the thread from the list of running threads. */
1860 RTR_LOCK(core);
1861 remove_from_list_l(&cores[core].running, thread);
1862 rtr_subtract_entry(core, thread->priority);
1863 RTR_UNLOCK(core);
1865 /* Add a timeout to the block if not infinite */
1866 switch (state)
1868 case STATE_BLOCKED:
1869 case STATE_BLOCKED_W_TMO:
1870 /* Put the thread into a new list of inactive threads. */
1871 add_to_list_l(thread->bqp, thread);
1873 if (state == STATE_BLOCKED)
1874 break;
1876 /* Fall-through */
1877 case STATE_SLEEPING:
1878 /* If this thread times out sooner than any other thread, update
1879 next_tmo_check to its timeout */
1880 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1882 cores[core].next_tmo_check = thread->tmo_tick;
1885 if (thread->tmo.prev == NULL)
1887 add_to_list_tmo(thread);
1889 /* else thread was never removed from list - just keep it there */
1890 break;
1893 /* Remember the the next thread about to block. */
1894 cores[core].block_task = thread;
1896 /* Report new state. */
1897 thread->state = state;
1900 /*---------------------------------------------------------------------------
1901 * Switch thread in round robin fashion for any given priority. Any thread
1902 * that removed itself from the running list first must specify itself in
1903 * the paramter.
1905 * INTERNAL: Intended for use by kernel and not for programs.
1906 *---------------------------------------------------------------------------
1908 void switch_thread(void)
1910 const unsigned int core = CURRENT_CORE;
1911 struct thread_entry *block = cores[core].block_task;
1912 struct thread_entry *thread = cores[core].running;
1914 /* Get context to save - next thread to run is unknown until all wakeups
1915 * are evaluated */
1916 if (block != NULL)
1918 cores[core].block_task = NULL;
1920 #if NUM_CORES > 1
1921 if (thread == block)
1923 /* This was the last thread running and another core woke us before
1924 * reaching here. Force next thread selection to give tmo threads or
1925 * other threads woken before this block a first chance. */
1926 block = NULL;
1928 else
1929 #endif
1931 /* Blocking task is the old one */
1932 thread = block;
1936 #ifdef RB_PROFILE
1937 profile_thread_stopped(thread - threads);
1938 #endif
1940 /* Begin task switching by saving our current context so that we can
1941 * restore the state of the current thread later to the point prior
1942 * to this call. */
1943 store_context(&thread->context);
1945 /* Check if the current thread stack is overflown */
1946 if (thread->stack[0] != DEADBEEF)
1947 thread_stkov(thread);
1949 #if NUM_CORES > 1
1950 /* Run any blocking operations requested before switching/sleeping */
1951 run_blocking_ops(core, thread);
1952 #endif
1954 #ifdef HAVE_PRIORITY_SCHEDULING
1955 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1956 /* Reset the value of thread's skip count */
1957 thread->skip_count = 0;
1958 #endif
1960 for (;;)
1962 /* If there are threads on a timeout and the earliest wakeup is due,
1963 * check the list and wake any threads that need to start running
1964 * again. */
1965 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1967 check_tmo_threads();
1970 disable_irq();
1971 RTR_LOCK(core);
1973 thread = cores[core].running;
1975 if (thread == NULL)
1977 /* Enter sleep mode to reduce power usage - woken up on interrupt
1978 * or wakeup request from another core - expected to enable
1979 * interrupts. */
1980 RTR_UNLOCK(core);
1981 core_sleep(IF_COP(core));
1983 else
1985 #ifdef HAVE_PRIORITY_SCHEDULING
1986 /* Select the new task based on priorities and the last time a
1987 * process got CPU time relative to the highest priority runnable
1988 * task. */
1989 struct priority_distribution *pd = &cores[core].rtr;
1990 int max = find_first_set_bit(pd->mask);
1992 if (block == NULL)
1994 /* Not switching on a block, tentatively select next thread */
1995 thread = thread->l.next;
1998 for (;;)
2000 int priority = thread->priority;
2001 int diff;
2003 /* This ridiculously simple method of aging seems to work
2004 * suspiciously well. It does tend to reward CPU hogs (under
2005 * yielding) but that's generally not desirable at all. On the
2006 * plus side, it, relatively to other threads, penalizes excess
2007 * yielding which is good if some high priority thread is
2008 * performing no useful work such as polling for a device to be
2009 * ready. Of course, aging is only employed when higher and lower
2010 * priority threads are runnable. The highest priority runnable
2011 * thread(s) are never skipped. */
2012 if (priority <= max ||
2013 IF_NO_SKIP_YIELD( thread->skip_count == -1 || )
2014 (diff = priority - max, ++thread->skip_count > diff*diff))
2016 cores[core].running = thread;
2017 break;
2020 thread = thread->l.next;
2022 #else
2023 /* Without priority use a simple FCFS algorithm */
2024 if (block == NULL)
2026 /* Not switching on a block, select next thread */
2027 thread = thread->l.next;
2028 cores[core].running = thread;
2030 #endif /* HAVE_PRIORITY_SCHEDULING */
2032 RTR_UNLOCK(core);
2033 enable_irq();
2034 break;
2038 /* And finally give control to the next thread. */
2039 load_context(&thread->context);
2041 #ifdef RB_PROFILE
2042 profile_thread_started(thread - threads);
2043 #endif
2046 /*---------------------------------------------------------------------------
2047 * Sleeps a thread for at least a specified number of ticks with zero being
2048 * a wait until the next tick.
2050 * INTERNAL: Intended for use by kernel and not for programs.
2051 *---------------------------------------------------------------------------
2053 void sleep_thread(int ticks)
2055 struct thread_entry *current = cores[CURRENT_CORE].running;
2057 LOCK_THREAD(current);
2059 /* Set our timeout, remove from run list and join timeout list. */
2060 current->tmo_tick = current_tick + ticks + 1;
2061 block_thread_on_l(current, STATE_SLEEPING);
2063 UNLOCK_THREAD(current);
2066 /*---------------------------------------------------------------------------
2067 * Indefinitely block a thread on a blocking queue for explicit wakeup.
2069 * INTERNAL: Intended for use by kernel objects and not for programs.
2070 *---------------------------------------------------------------------------
2072 void block_thread(struct thread_entry *current)
2074 /* Set the state to blocked and take us off of the run queue until we
2075 * are explicitly woken */
2076 LOCK_THREAD(current);
2078 /* Set the list for explicit wakeup */
2079 block_thread_on_l(current, STATE_BLOCKED);
2081 #ifdef HAVE_PRIORITY_SCHEDULING
2082 if (current->blocker != NULL)
2084 /* Object supports PIP */
2085 current = blocker_inherit_priority(current);
2087 #endif
2089 UNLOCK_THREAD(current);
2092 /*---------------------------------------------------------------------------
2093 * Block a thread on a blocking queue for a specified time interval or until
2094 * explicitly woken - whichever happens first.
2096 * INTERNAL: Intended for use by kernel objects and not for programs.
2097 *---------------------------------------------------------------------------
2099 void block_thread_w_tmo(struct thread_entry *current, int timeout)
2101 /* Get the entry for the current running thread. */
2102 LOCK_THREAD(current);
2104 /* Set the state to blocked with the specified timeout */
2105 current->tmo_tick = current_tick + timeout;
2107 /* Set the list for explicit wakeup */
2108 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
2110 #ifdef HAVE_PRIORITY_SCHEDULING
2111 if (current->blocker != NULL)
2113 /* Object supports PIP */
2114 current = blocker_inherit_priority(current);
2116 #endif
2118 UNLOCK_THREAD(current);
2121 /*---------------------------------------------------------------------------
2122 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
2123 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
2125 * This code should be considered a critical section by the caller meaning
2126 * that the object's corelock should be held.
2128 * INTERNAL: Intended for use by kernel objects and not for programs.
2129 *---------------------------------------------------------------------------
2131 unsigned int wakeup_thread(struct thread_entry **list)
2133 struct thread_entry *thread = *list;
2134 unsigned int result = THREAD_NONE;
2136 /* Check if there is a blocked thread at all. */
2137 if (thread == NULL)
2138 return result;
2140 LOCK_THREAD(thread);
2142 /* Determine thread's current state. */
2143 switch (thread->state)
2145 case STATE_BLOCKED:
2146 case STATE_BLOCKED_W_TMO:
2147 remove_from_list_l(list, thread);
2149 result = THREAD_OK;
2151 #ifdef HAVE_PRIORITY_SCHEDULING
2152 struct thread_entry *current;
2153 struct blocker *bl = thread->blocker;
2155 if (bl == NULL)
2157 /* No inheritance - just boost the thread by aging */
2158 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
2159 thread->skip_count = thread->priority;
2160 current = cores[CURRENT_CORE].running;
2162 else
2164 /* Call the specified unblocking PIP */
2165 current = bl->wakeup_protocol(thread);
2168 if (current != NULL && thread->priority < current->priority
2169 IF_COP( && thread->core == current->core ))
2171 /* Woken thread is higher priority and exists on the same CPU core;
2172 * recommend a task switch. Knowing if this is an interrupt call
2173 * would be helpful here. */
2174 result |= THREAD_SWITCH;
2176 #endif /* HAVE_PRIORITY_SCHEDULING */
2178 core_schedule_wakeup(thread);
2179 break;
2181 /* Nothing to do. State is not blocked. */
2182 #if THREAD_EXTRA_CHECKS
2183 default:
2184 THREAD_PANICF("wakeup_thread->block invalid", thread);
2185 case STATE_RUNNING:
2186 case STATE_KILLED:
2187 break;
2188 #endif
2191 UNLOCK_THREAD(thread);
2192 return result;
2195 /*---------------------------------------------------------------------------
2196 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
2197 * from each operation or THREAD_NONE of nothing was awakened. Object owning
2198 * the queue must be locked first.
2200 * INTERNAL: Intended for use by kernel objects and not for programs.
2201 *---------------------------------------------------------------------------
2203 unsigned int thread_queue_wake(struct thread_entry **list)
2205 unsigned result = THREAD_NONE;
2207 for (;;)
2209 unsigned int rc = wakeup_thread(list);
2211 if (rc == THREAD_NONE)
2212 break; /* No more threads */
2214 result |= rc;
2217 return result;
2220 /*---------------------------------------------------------------------------
2221 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
2222 * will be locked on multicore.
2223 *---------------------------------------------------------------------------
2225 static struct thread_entry * find_empty_thread_slot(void)
2227 /* Any slot could be on an interrupt-accessible list */
2228 IF_COP( int oldlevel = disable_irq_save(); )
2229 struct thread_entry *thread = NULL;
2230 int n;
2232 for (n = 0; n < MAXTHREADS; n++)
2234 /* Obtain current slot state - lock it on multicore */
2235 struct thread_entry *t = &threads[n];
2236 LOCK_THREAD(t);
2238 if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
2240 /* Slot is empty - leave it locked and caller will unlock */
2241 thread = t;
2242 break;
2245 /* Finished examining slot - no longer busy - unlock on multicore */
2246 UNLOCK_THREAD(t);
2249 IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
2250 not accesible to them yet */
2251 return thread;
2255 /*---------------------------------------------------------------------------
2256 * Place the current core in idle mode - woken up on interrupt or wake
2257 * request from another core.
2258 *---------------------------------------------------------------------------
2260 void core_idle(void)
2262 IF_COP( const unsigned int core = CURRENT_CORE; )
2263 disable_irq();
2264 core_sleep(IF_COP(core));
2267 /*---------------------------------------------------------------------------
2268 * Create a thread. If using a dual core architecture, specify which core to
2269 * start the thread on.
2271 * Return ID if context area could be allocated, else NULL.
2272 *---------------------------------------------------------------------------
2274 struct thread_entry*
2275 create_thread(void (*function)(void), void* stack, size_t stack_size,
2276 unsigned flags, const char *name
2277 IF_PRIO(, int priority)
2278 IF_COP(, unsigned int core))
2280 unsigned int i;
2281 unsigned int stack_words;
2282 uintptr_t stackptr, stackend;
2283 struct thread_entry *thread;
2284 unsigned state;
2285 int oldlevel;
2287 thread = find_empty_thread_slot();
2288 if (thread == NULL)
2290 return NULL;
2293 oldlevel = disable_irq_save();
2295 /* Munge the stack to make it easy to spot stack overflows */
2296 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
2297 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
2298 stack_size = stackend - stackptr;
2299 stack_words = stack_size / sizeof (uintptr_t);
2301 for (i = 0; i < stack_words; i++)
2303 ((uintptr_t *)stackptr)[i] = DEADBEEF;
2306 /* Store interesting information */
2307 thread->name = name;
2308 thread->stack = (uintptr_t *)stackptr;
2309 thread->stack_size = stack_size;
2310 thread->queue = NULL;
2311 #ifdef HAVE_WAKEUP_EXT_CB
2312 thread->wakeup_ext_cb = NULL;
2313 #endif
2314 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2315 thread->cpu_boost = 0;
2316 #endif
2317 #ifdef HAVE_PRIORITY_SCHEDULING
2318 memset(&thread->pdist, 0, sizeof(thread->pdist));
2319 thread->blocker = NULL;
2320 thread->base_priority = priority;
2321 thread->priority = priority;
2322 thread->skip_count = priority;
2323 prio_add_entry(&thread->pdist, priority);
2324 #endif
2326 #if NUM_CORES > 1
2327 thread->core = core;
2329 /* Writeback stack munging or anything else before starting */
2330 if (core != CURRENT_CORE)
2332 flush_icache();
2334 #endif
2336 /* Thread is not on any timeout list but be a bit paranoid */
2337 thread->tmo.prev = NULL;
2339 state = (flags & CREATE_THREAD_FROZEN) ?
2340 STATE_FROZEN : STATE_RUNNING;
2342 thread->context.sp = (typeof (thread->context.sp))stackend;
2344 /* Load the thread's context structure with needed startup information */
2345 THREAD_STARTUP_INIT(core, thread, function);
2347 thread->state = state;
2349 if (state == STATE_RUNNING)
2350 core_schedule_wakeup(thread);
2352 UNLOCK_THREAD(thread);
2354 restore_irq(oldlevel);
2356 return thread;
2359 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2360 /*---------------------------------------------------------------------------
2361 * Change the boost state of a thread boosting or unboosting the CPU
2362 * as required.
2363 *---------------------------------------------------------------------------
2365 static inline void boost_thread(struct thread_entry *thread, bool boost)
2367 if ((thread->cpu_boost != 0) != boost)
2369 thread->cpu_boost = boost;
2370 cpu_boost(boost);
2374 void trigger_cpu_boost(void)
2376 struct thread_entry *current = cores[CURRENT_CORE].running;
2377 boost_thread(current, true);
2380 void cancel_cpu_boost(void)
2382 struct thread_entry *current = cores[CURRENT_CORE].running;
2383 boost_thread(current, false);
2385 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
2387 /*---------------------------------------------------------------------------
2388 * Block the current thread until another thread terminates. A thread may
2389 * wait on itself to terminate which prevents it from running again and it
2390 * will need to be killed externally.
2391 * Parameter is the ID as returned from create_thread().
2392 *---------------------------------------------------------------------------
2394 void thread_wait(struct thread_entry *thread)
2396 struct thread_entry *current = cores[CURRENT_CORE].running;
2398 if (thread == NULL)
2399 thread = current;
2401 /* Lock thread-as-waitable-object lock */
2402 corelock_lock(&thread->waiter_cl);
2404 /* Be sure it hasn't been killed yet */
2405 if (thread->state != STATE_KILLED)
2407 IF_COP( current->obj_cl = &thread->waiter_cl; )
2408 current->bqp = &thread->queue;
2410 disable_irq();
2411 block_thread(current);
2413 corelock_unlock(&thread->waiter_cl);
2415 switch_thread();
2416 return;
2419 corelock_unlock(&thread->waiter_cl);
2422 /*---------------------------------------------------------------------------
2423 * Exit the current thread. The Right Way to Do Things (TM).
2424 *---------------------------------------------------------------------------
2426 void thread_exit(void)
2428 const unsigned int core = CURRENT_CORE;
2429 struct thread_entry *current = cores[core].running;
2431 /* Cancel CPU boost if any */
2432 cancel_cpu_boost();
2434 disable_irq();
2436 corelock_lock(&current->waiter_cl);
2437 LOCK_THREAD(current);
2439 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
2440 if (current->name == THREAD_DESTRUCT)
2442 /* Thread being killed - become a waiter */
2443 UNLOCK_THREAD(current);
2444 corelock_unlock(&current->waiter_cl);
2445 thread_wait(current);
2446 THREAD_PANICF("thread_exit->WK:*R", current);
2448 #endif
2450 #ifdef HAVE_PRIORITY_SCHEDULING
2451 check_for_obj_waiters("thread_exit", current);
2452 #endif
2454 if (current->tmo.prev != NULL)
2456 /* Cancel pending timeout list removal */
2457 remove_from_list_tmo(current);
2460 /* Switch tasks and never return */
2461 block_thread_on_l(current, STATE_KILLED);
2463 #if NUM_CORES > 1
2464 /* Switch to the idle stack if not on the main core (where "main"
2465 * runs) - we can hope gcc doesn't need the old stack beyond this
2466 * point. */
2467 if (core != CPU)
2469 switch_to_idle_stack(core);
2472 flush_icache();
2473 #endif
2474 current->name = NULL;
2476 /* Signal this thread */
2477 thread_queue_wake(&current->queue);
2478 corelock_unlock(&current->waiter_cl);
2479 /* Slot must be unusable until thread is really gone */
2480 UNLOCK_THREAD_AT_TASK_SWITCH(current);
2481 switch_thread();
2482 /* This should never and must never be reached - if it is, the
2483 * state is corrupted */
2484 THREAD_PANICF("thread_exit->K:*R", current);
2487 #ifdef ALLOW_REMOVE_THREAD
2488 /*---------------------------------------------------------------------------
2489 * Remove a thread from the scheduler. Not The Right Way to Do Things in
2490 * normal programs.
2492 * Parameter is the ID as returned from create_thread().
2494 * Use with care on threads that are not under careful control as this may
2495 * leave various objects in an undefined state.
2496 *---------------------------------------------------------------------------
2498 void remove_thread(struct thread_entry *thread)
2500 #if NUM_CORES > 1
2501 /* core is not constant here because of core switching */
2502 unsigned int core = CURRENT_CORE;
2503 unsigned int old_core = NUM_CORES;
2504 struct corelock *ocl = NULL;
2505 #else
2506 const unsigned int core = CURRENT_CORE;
2507 #endif
2508 struct thread_entry *current = cores[core].running;
2510 unsigned state;
2511 int oldlevel;
2513 if (thread == NULL)
2514 thread = current;
2516 if (thread == current)
2517 thread_exit(); /* Current thread - do normal exit */
2519 oldlevel = disable_irq_save();
2521 corelock_lock(&thread->waiter_cl);
2522 LOCK_THREAD(thread);
2524 state = thread->state;
2526 if (state == STATE_KILLED)
2528 goto thread_killed;
2531 #if NUM_CORES > 1
2532 if (thread->name == THREAD_DESTRUCT)
2534 /* Thread being killed - become a waiter */
2535 UNLOCK_THREAD(thread);
2536 corelock_unlock(&thread->waiter_cl);
2537 restore_irq(oldlevel);
2538 thread_wait(thread);
2539 return;
2542 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2544 #ifdef HAVE_PRIORITY_SCHEDULING
2545 check_for_obj_waiters("remove_thread", thread);
2546 #endif
2548 if (thread->core != core)
2550 /* Switch cores and safely extract the thread there */
2551 /* Slot HAS to be unlocked or a deadlock could occur which means other
2552 * threads have to be guided into becoming thread waiters if they
2553 * attempt to remove it. */
2554 unsigned int new_core = thread->core;
2556 corelock_unlock(&thread->waiter_cl);
2558 UNLOCK_THREAD(thread);
2559 restore_irq(oldlevel);
2561 old_core = switch_core(new_core);
2563 oldlevel = disable_irq_save();
2565 corelock_lock(&thread->waiter_cl);
2566 LOCK_THREAD(thread);
2568 state = thread->state;
2569 core = new_core;
2570 /* Perform the extraction and switch ourselves back to the original
2571 processor */
2573 #endif /* NUM_CORES > 1 */
2575 if (thread->tmo.prev != NULL)
2577 /* Clean thread off the timeout list if a timeout check hasn't
2578 * run yet */
2579 remove_from_list_tmo(thread);
2582 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2583 /* Cancel CPU boost if any */
2584 boost_thread(thread, false);
2585 #endif
2587 IF_COP( retry_state: )
2589 switch (state)
2591 case STATE_RUNNING:
2592 RTR_LOCK(core);
2593 /* Remove thread from ready to run tasks */
2594 remove_from_list_l(&cores[core].running, thread);
2595 rtr_subtract_entry(core, thread->priority);
2596 RTR_UNLOCK(core);
2597 break;
2598 case STATE_BLOCKED:
2599 case STATE_BLOCKED_W_TMO:
2600 /* Remove thread from the queue it's blocked on - including its
2601 * own if waiting there */
2602 #if NUM_CORES > 1
2603 if (&thread->waiter_cl != thread->obj_cl)
2605 ocl = thread->obj_cl;
2607 if (corelock_try_lock(ocl) == 0)
2609 UNLOCK_THREAD(thread);
2610 corelock_lock(ocl);
2611 LOCK_THREAD(thread);
2613 if (thread->state != state)
2615 /* Something woke the thread */
2616 state = thread->state;
2617 corelock_unlock(ocl);
2618 goto retry_state;
2622 #endif
2623 remove_from_list_l(thread->bqp, thread);
2625 #ifdef HAVE_WAKEUP_EXT_CB
2626 if (thread->wakeup_ext_cb != NULL)
2627 thread->wakeup_ext_cb(thread);
2628 #endif
2630 #ifdef HAVE_PRIORITY_SCHEDULING
2631 if (thread->blocker != NULL)
2633 /* Remove thread's priority influence from its chain */
2634 wakeup_priority_protocol_release(thread);
2636 #endif
2638 #if NUM_CORES > 1
2639 if (ocl != NULL)
2640 corelock_unlock(ocl);
2641 #endif
2642 break;
2643 /* Otherwise thread is frozen and hasn't run yet */
2646 thread->state = STATE_KILLED;
2648 /* If thread was waiting on itself, it will have been removed above.
2649 * The wrong order would result in waking the thread first and deadlocking
2650 * since the slot is already locked. */
2651 thread_queue_wake(&thread->queue);
2653 thread->name = NULL;
2655 thread_killed: /* Thread was already killed */
2656 /* Removal complete - safe to unlock and reenable interrupts */
2657 corelock_unlock(&thread->waiter_cl);
2658 UNLOCK_THREAD(thread);
2659 restore_irq(oldlevel);
2661 #if NUM_CORES > 1
2662 if (old_core < NUM_CORES)
2664 /* Did a removal on another processor's thread - switch back to
2665 native core */
2666 switch_core(old_core);
2668 #endif
2670 #endif /* ALLOW_REMOVE_THREAD */
2672 #ifdef HAVE_PRIORITY_SCHEDULING
2673 /*---------------------------------------------------------------------------
2674 * Sets the thread's relative base priority for the core it runs on. Any
2675 * needed inheritance changes also may happen.
2676 *---------------------------------------------------------------------------
2678 int thread_set_priority(struct thread_entry *thread, int priority)
2680 int old_base_priority = -1;
2682 /* A little safety measure */
2683 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
2684 return -1;
2686 if (thread == NULL)
2687 thread = cores[CURRENT_CORE].running;
2689 /* Thread could be on any list and therefore on an interrupt accessible
2690 one - disable interrupts */
2691 int oldlevel = disable_irq_save();
2693 LOCK_THREAD(thread);
2695 /* Make sure it's not killed */
2696 if (thread->state != STATE_KILLED)
2698 int old_priority = thread->priority;
2700 old_base_priority = thread->base_priority;
2701 thread->base_priority = priority;
2703 prio_move_entry(&thread->pdist, old_base_priority, priority);
2704 priority = find_first_set_bit(thread->pdist.mask);
2706 if (old_priority == priority)
2708 /* No priority change - do nothing */
2710 else if (thread->state == STATE_RUNNING)
2712 /* This thread is running - change location on the run
2713 * queue. No transitive inheritance needed. */
2714 set_running_thread_priority(thread, priority);
2716 else
2718 thread->priority = priority;
2720 if (thread->blocker != NULL)
2722 /* Bubble new priority down the chain */
2723 struct blocker *bl = thread->blocker; /* Blocker struct */
2724 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
2725 struct thread_entry * const tstart = thread; /* Initial thread */
2726 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2728 for (;;)
2730 struct thread_entry *next; /* Next thread to check */
2731 int bl_pr; /* Highest blocked thread */
2732 int queue_pr; /* New highest blocked thread */
2733 #if NUM_CORES > 1
2734 /* Owner can change but thread cannot be dislodged - thread
2735 * may not be the first in the queue which allows other
2736 * threads ahead in the list to be given ownership during the
2737 * operation. If thread is next then the waker will have to
2738 * wait for us and the owner of the object will remain fixed.
2739 * If we successfully grab the owner -- which at some point
2740 * is guaranteed -- then the queue remains fixed until we
2741 * pass by. */
2742 for (;;)
2744 LOCK_THREAD(bl_t);
2746 /* Double-check the owner - retry if it changed */
2747 if (bl->thread == bl_t)
2748 break;
2750 UNLOCK_THREAD(bl_t);
2751 bl_t = bl->thread;
2753 #endif
2754 bl_pr = bl->priority;
2756 if (highest > bl_pr)
2757 break; /* Object priority won't change */
2759 /* This will include the thread being set */
2760 queue_pr = find_highest_priority_in_list_l(*thread->bqp);
2762 if (queue_pr == bl_pr)
2763 break; /* Object priority not changing */
2765 /* Update thread boost for this object */
2766 bl->priority = queue_pr;
2767 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2768 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2770 if (bl_t->priority == bl_pr)
2771 break; /* Blocking thread priority not changing */
2773 if (bl_t->state == STATE_RUNNING)
2775 /* Thread not blocked - we're done */
2776 set_running_thread_priority(bl_t, bl_pr);
2777 break;
2780 bl_t->priority = bl_pr;
2781 bl = bl_t->blocker; /* Blocking thread has a blocker? */
2783 if (bl == NULL)
2784 break; /* End of chain */
2786 next = bl->thread;
2788 if (next == tstart)
2789 break; /* Full-circle */
2791 UNLOCK_THREAD(thread);
2793 thread = bl_t;
2794 bl_t = next;
2795 } /* for (;;) */
2797 UNLOCK_THREAD(bl_t);
2802 UNLOCK_THREAD(thread);
2804 restore_irq(oldlevel);
2806 return old_base_priority;
2809 /*---------------------------------------------------------------------------
2810 * Returns the current base priority for a thread.
2811 *---------------------------------------------------------------------------
2813 int thread_get_priority(struct thread_entry *thread)
2815 /* Simple, quick probe. */
2816 if (thread == NULL)
2817 thread = cores[CURRENT_CORE].running;
2819 return thread->base_priority;
2821 #endif /* HAVE_PRIORITY_SCHEDULING */
2823 /*---------------------------------------------------------------------------
2824 * Starts a frozen thread - similar semantics to wakeup_thread except that
2825 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2826 * virtue of the slot having a state of STATE_FROZEN.
2827 *---------------------------------------------------------------------------
2829 void thread_thaw(struct thread_entry *thread)
2831 int oldlevel = disable_irq_save();
2832 LOCK_THREAD(thread);
2834 if (thread->state == STATE_FROZEN)
2835 core_schedule_wakeup(thread);
2837 UNLOCK_THREAD(thread);
2838 restore_irq(oldlevel);
2841 /*---------------------------------------------------------------------------
2842 * Return the ID of the currently executing thread.
2843 *---------------------------------------------------------------------------
2845 struct thread_entry * thread_get_current(void)
2847 return cores[CURRENT_CORE].running;
2850 #if NUM_CORES > 1
2851 /*---------------------------------------------------------------------------
2852 * Switch the processor that the currently executing thread runs on.
2853 *---------------------------------------------------------------------------
2855 unsigned int switch_core(unsigned int new_core)
2857 const unsigned int core = CURRENT_CORE;
2858 struct thread_entry *current = cores[core].running;
2860 if (core == new_core)
2862 /* No change - just return same core */
2863 return core;
2866 int oldlevel = disable_irq_save();
2867 LOCK_THREAD(current);
2869 if (current->name == THREAD_DESTRUCT)
2871 /* Thread being killed - deactivate and let process complete */
2872 UNLOCK_THREAD(current);
2873 restore_irq(oldlevel);
2874 thread_wait(current);
2875 /* Should never be reached */
2876 THREAD_PANICF("switch_core->D:*R", current);
2879 /* Get us off the running list for the current core */
2880 RTR_LOCK(core);
2881 remove_from_list_l(&cores[core].running, current);
2882 rtr_subtract_entry(core, current->priority);
2883 RTR_UNLOCK(core);
2885 /* Stash return value (old core) in a safe place */
2886 current->retval = core;
2888 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2889 * the other core will likely attempt a removal from the wrong list! */
2890 if (current->tmo.prev != NULL)
2892 remove_from_list_tmo(current);
2895 /* Change the core number for this thread slot */
2896 current->core = new_core;
2898 /* Do not use core_schedule_wakeup here since this will result in
2899 * the thread starting to run on the other core before being finished on
2900 * this one. Delay the list unlock to keep the other core stuck
2901 * until this thread is ready. */
2902 RTR_LOCK(new_core);
2904 rtr_add_entry(new_core, current->priority);
2905 add_to_list_l(&cores[new_core].running, current);
2907 /* Make a callback into device-specific code, unlock the wakeup list so
2908 * that execution may resume on the new core, unlock our slot and finally
2909 * restore the interrupt level */
2910 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
2911 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
2912 cores[core].block_task = current;
2914 UNLOCK_THREAD(current);
2916 /* Alert other core to activity */
2917 core_wake(new_core);
2919 /* Do the stack switching, cache_maintenence and switch_thread call -
2920 requires native code */
2921 switch_thread_core(core, current);
2923 /* Finally return the old core to caller */
2924 return current->retval;
2926 #endif /* NUM_CORES > 1 */
2928 /*---------------------------------------------------------------------------
2929 * Initialize threading API. This assumes interrupts are not yet enabled. On
2930 * multicore setups, no core is allowed to proceed until create_thread calls
2931 * are safe to perform.
2932 *---------------------------------------------------------------------------
2934 void init_threads(void)
2936 const unsigned int core = CURRENT_CORE;
2937 struct thread_entry *thread;
2939 /* CPU will initialize first and then sleep */
2940 thread = find_empty_thread_slot();
2942 if (thread == NULL)
2944 /* WTF? There really must be a slot available at this stage.
2945 * This can fail if, for example, .bss isn't zero'ed out by the loader
2946 * or threads is in the wrong section. */
2947 THREAD_PANICF("init_threads->no slot", NULL);
2950 /* Initialize initially non-zero members of core */
2951 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2953 /* Initialize initially non-zero members of slot */
2954 UNLOCK_THREAD(thread); /* No sync worries yet */
2955 thread->name = main_thread_name;
2956 thread->state = STATE_RUNNING;
2957 IF_COP( thread->core = core; )
2958 #ifdef HAVE_PRIORITY_SCHEDULING
2959 corelock_init(&cores[core].rtr_cl);
2960 thread->base_priority = PRIORITY_USER_INTERFACE;
2961 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2962 thread->priority = PRIORITY_USER_INTERFACE;
2963 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2964 #endif
2965 corelock_init(&thread->waiter_cl);
2966 corelock_init(&thread->slot_cl);
2968 add_to_list_l(&cores[core].running, thread);
2970 if (core == CPU)
2972 thread->stack = stackbegin;
2973 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
2974 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2975 /* Wait for other processors to finish their inits since create_thread
2976 * isn't safe to call until the kernel inits are done. The first
2977 * threads created in the system must of course be created by CPU. */
2978 core_thread_init(CPU);
2980 else
2982 /* Initial stack is the idle stack */
2983 thread->stack = idle_stacks[core];
2984 thread->stack_size = IDLE_STACK_SIZE;
2985 /* After last processor completes, it should signal all others to
2986 * proceed or may signal the next and call thread_exit(). The last one
2987 * to finish will signal CPU. */
2988 core_thread_init(core);
2989 /* Other cores do not have a main thread - go idle inside switch_thread
2990 * until a thread can run on the core. */
2991 thread_exit();
2992 #endif /* NUM_CORES */
2996 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2997 #if NUM_CORES == 1
2998 static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
2999 #else
3000 static int stack_usage(uintptr_t *stackptr, size_t stack_size)
3001 #endif
3003 unsigned int stack_words = stack_size / sizeof (uintptr_t);
3004 unsigned int i;
3005 int usage = 0;
3007 for (i = 0; i < stack_words; i++)
3009 if (stackptr[i] != DEADBEEF)
3011 usage = ((stack_words - i) * 100) / stack_words;
3012 break;
3016 return usage;
3019 /*---------------------------------------------------------------------------
3020 * Returns the maximum percentage of stack a thread ever used while running.
3021 * NOTE: Some large buffer allocations that don't use enough the buffer to
3022 * overwrite stackptr[0] will not be seen.
3023 *---------------------------------------------------------------------------
3025 int thread_stack_usage(const struct thread_entry *thread)
3027 return stack_usage(thread->stack, thread->stack_size);
3030 #if NUM_CORES > 1
3031 /*---------------------------------------------------------------------------
3032 * Returns the maximum percentage of the core's idle stack ever used during
3033 * runtime.
3034 *---------------------------------------------------------------------------
3036 int idle_stack_usage(unsigned int core)
3038 return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
3040 #endif
3042 /*---------------------------------------------------------------------------
3043 * Fills in the buffer with the specified thread's name. If the name is NULL,
3044 * empty, or the thread is in destruct state a formatted ID is written
3045 * instead.
3046 *---------------------------------------------------------------------------
3048 void thread_get_name(char *buffer, int size,
3049 struct thread_entry *thread)
3051 if (size <= 0)
3052 return;
3054 *buffer = '\0';
3056 if (thread)
3058 /* Display thread name if one or ID if none */
3059 const char *name = thread->name;
3060 const char *fmt = "%s";
3061 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
3063 name = (const char *)thread;
3064 fmt = "%08lX";
3066 snprintf(buffer, size, fmt, name);