1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
31 /****************************************************************************
33 * See notes below on implementing processor-specific portions! *
34 ***************************************************************************/
36 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
38 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
40 #define THREAD_EXTRA_CHECKS 0
44 * General locking order to guarantee progress. Order must be observed but
45 * all stages are not nescessarily obligatory. Going from 1) to 3) is
49 * This is first because of the likelyhood of having an interrupt occur that
50 * also accesses one of the objects farther down the list. Any non-blocking
51 * synchronization done may already have a lock on something during normal
52 * execution and if an interrupt handler running on the same processor as
53 * the one that has the resource locked were to attempt to access the
54 * resource, the interrupt handler would wait forever waiting for an unlock
55 * that will never happen. There is no danger if the interrupt occurs on
56 * a different processor because the one that has the lock will eventually
57 * unlock and the other processor's handler may proceed at that time. Not
58 * nescessary when the resource in question is definitely not available to
62 * 1) May be needed beforehand if the kernel object allows dual-use such as
63 * event queues. The kernel object must have a scheme to protect itself from
64 * access by another processor and is responsible for serializing the calls
65 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
66 * other. Objects' queues are also protected here.
69 * This locks access to the thread's slot such that its state cannot be
70 * altered by another processor when a state change is in progress such as
71 * when it is in the process of going on a blocked list. An attempt to wake
72 * a thread while it is still blocking will likely desync its state with
73 * the other resources used for that state.
76 * These lists are specific to a particular processor core and are accessible
77 * by all processor cores and interrupt handlers. The running (rtr) list is
78 * the prime example where a thread may be added by any means.
81 /*---------------------------------------------------------------------------
82 * Processor specific: core_sleep/core_wake/misc. notes
85 * FIQ is not dealt with by the scheduler code and is simply restored if it
86 * must by masked for some reason - because threading modifies a register
87 * that FIQ may also modify and there's no way to accomplish it atomically.
88 * s3c2440 is such a case.
90 * Audio interrupts are generally treated at a higher priority than others
91 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
92 * are not in general safe. Special cases may be constructed on a per-
93 * source basis and blocking operations are not available.
95 * core_sleep procedure to implement for any CPU to ensure an asychronous
96 * wakup never results in requiring a wait until the next tick (up to
97 * 10000uS!). May require assembly and careful instruction ordering.
99 * 1) On multicore, stay awake if directed to do so by another. If so, goto
101 * 2) If processor requires, atomically reenable interrupts and perform step
103 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
104 * on Coldfire) goto step 5.
105 * 4) Enable interrupts.
108 * core_wake and multprocessor notes for sleep/wake coordination:
109 * If possible, to wake up another processor, the forcing of an interrupt on
110 * the woken core by the waker core is the easiest way to ensure a non-
111 * delayed wake and immediate execution of any woken threads. If that isn't
112 * available then some careful non-blocking synchonization is needed (as on
113 * PP targets at the moment).
114 *---------------------------------------------------------------------------
117 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
119 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
120 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
121 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
123 static const char main_thread_name
[] = "main";
124 extern uintptr_t stackbegin
[];
125 extern uintptr_t stackend
[];
127 static inline void core_sleep(IF_COP_VOID(unsigned int core
))
128 __attribute__((always_inline
));
130 void check_tmo_threads(void)
131 __attribute__((noinline
));
133 static inline void block_thread_on_l(struct thread_entry
*thread
, unsigned state
)
134 __attribute__((always_inline
));
136 static void add_to_list_tmo(struct thread_entry
*thread
)
137 __attribute__((noinline
));
139 static void core_schedule_wakeup(struct thread_entry
*thread
)
140 __attribute__((noinline
));
143 static inline void run_blocking_ops(
144 unsigned int core
, struct thread_entry
*thread
)
145 __attribute__((always_inline
));
148 static void thread_stkov(struct thread_entry
*thread
)
149 __attribute__((noinline
));
151 static inline void store_context(void* addr
)
152 __attribute__((always_inline
));
154 static inline void load_context(const void* addr
)
155 __attribute__((always_inline
));
157 void switch_thread(void)
158 __attribute__((noinline
));
160 /****************************************************************************
161 * Processor-specific section
165 /*---------------------------------------------------------------------------
166 * Start the thread running and terminate it if it returns
167 *---------------------------------------------------------------------------
169 static void __attribute__((naked
,used
)) start_thread(void)
173 "ldr sp, [r0, #32] \n" /* Load initial sp */
174 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
175 "mov r1, #0 \n" /* Mark thread as running */
176 "str r1, [r0, #40] \n"
178 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
179 "mov lr, pc \n" /* This could be the first entry into */
180 "bx r0 \n" /* plugin or codec code for this core. */
182 "mov lr, pc \n" /* Call thread function */
184 ); /* No clobber list - new thread doesn't care */
186 //asm volatile (".ltorg"); /* Dump constant pool */
189 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
190 * slot, and thread function pointer in context.start. See load_context for
191 * what happens when thread is initially going to run. */
192 #define THREAD_STARTUP_INIT(core, thread, function) \
193 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
194 (thread)->context.r[1] = (uint32_t)start_thread, \
195 (thread)->context.start = (uint32_t)function; })
197 /*---------------------------------------------------------------------------
198 * Store non-volatile context.
199 *---------------------------------------------------------------------------
201 static inline void store_context(void* addr
)
204 "stmia %0, { r4-r11, sp, lr } \n"
209 /*---------------------------------------------------------------------------
210 * Load non-volatile context.
211 *---------------------------------------------------------------------------
213 static inline void load_context(const void* addr
)
216 "ldr r0, [%0, #40] \n" /* Load start pointer */
217 "cmp r0, #0 \n" /* Check for NULL */
218 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
219 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
220 : : "r" (addr
) : "r0" /* only! */
227 extern uintptr_t cpu_idlestackbegin
[];
228 extern uintptr_t cpu_idlestackend
[];
229 extern uintptr_t cop_idlestackbegin
[];
230 extern uintptr_t cop_idlestackend
[];
231 static uintptr_t * const idle_stacks
[NUM_CORES
] =
233 [CPU
] = cpu_idlestackbegin
,
234 [COP
] = cop_idlestackbegin
237 #if CONFIG_CPU == PP5002
238 /* Bytes to emulate the PP502x mailbox bits */
239 struct core_semaphores
241 volatile uint8_t intend_wake
; /* 00h */
242 volatile uint8_t stay_awake
; /* 01h */
243 volatile uint8_t intend_sleep
; /* 02h */
244 volatile uint8_t unused
; /* 03h */
247 static struct core_semaphores core_semaphores
[NUM_CORES
] IBSS_ATTR
;
248 #endif /* CONFIG_CPU == PP5002 */
250 #endif /* NUM_CORES */
252 #if CONFIG_CORELOCK == SW_CORELOCK
253 /* Software core locks using Peterson's mutual exclusion algorithm */
255 /*---------------------------------------------------------------------------
256 * Initialize the corelock structure.
257 *---------------------------------------------------------------------------
259 void corelock_init(struct corelock
*cl
)
261 memset(cl
, 0, sizeof (*cl
));
264 #if 1 /* Assembly locks to minimize overhead */
265 /*---------------------------------------------------------------------------
266 * Wait for the corelock to become free and acquire it when it does.
267 *---------------------------------------------------------------------------
269 void corelock_lock(struct corelock
*cl
) __attribute__((naked
));
270 void corelock_lock(struct corelock
*cl
)
272 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
274 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
276 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
277 "eor r2, r1, #0xff \n" /* r2 = othercore */
278 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
280 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
281 "cmp r3, #0 \n" /* yes? lock acquired */
283 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
285 "bxeq lr \n" /* yes? lock acquired */
286 "b 1b \n" /* keep trying */
287 : : "i"(&PROCESSOR_ID
)
292 /*---------------------------------------------------------------------------
293 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
294 *---------------------------------------------------------------------------
296 int corelock_try_lock(struct corelock
*cl
) __attribute__((naked
));
297 int corelock_try_lock(struct corelock
*cl
)
299 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
301 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
304 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
305 "eor r2, r1, #0xff \n" /* r2 = othercore */
306 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
307 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
308 "eors r0, r0, r2 \n" /* yes? lock acquired */
310 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
312 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
313 "bx lr \n" /* return result */
314 : : "i"(&PROCESSOR_ID
)
321 /*---------------------------------------------------------------------------
322 * Release ownership of the corelock
323 *---------------------------------------------------------------------------
325 void corelock_unlock(struct corelock
*cl
) __attribute__((naked
));
326 void corelock_unlock(struct corelock
*cl
)
329 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
331 "mov r2, #0 \n" /* cl->myl[core] = 0 */
332 "strb r2, [r0, r1, lsr #7] \n"
334 : : "i"(&PROCESSOR_ID
)
338 #else /* C versions for reference */
339 /*---------------------------------------------------------------------------
340 * Wait for the corelock to become free and aquire it when it does.
341 *---------------------------------------------------------------------------
343 void corelock_lock(struct corelock
*cl
)
345 const unsigned int core
= CURRENT_CORE
;
346 const unsigned int othercore
= 1 - core
;
348 cl
->myl
[core
] = core
;
349 cl
->turn
= othercore
;
353 if (cl
->myl
[othercore
] == 0 || cl
->turn
== core
)
358 /*---------------------------------------------------------------------------
359 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
360 *---------------------------------------------------------------------------
362 int corelock_try_lock(struct corelock
*cl
)
364 const unsigned int core
= CURRENT_CORE
;
365 const unsigned int othercore
= 1 - core
;
367 cl
->myl
[core
] = core
;
368 cl
->turn
= othercore
;
370 if (cl
->myl
[othercore
] == 0 || cl
->turn
== core
)
379 /*---------------------------------------------------------------------------
380 * Release ownership of the corelock
381 *---------------------------------------------------------------------------
383 void corelock_unlock(struct corelock
*cl
)
385 cl
->myl
[CURRENT_CORE
] = 0;
387 #endif /* ASM / C selection */
389 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
391 /*---------------------------------------------------------------------------
392 * Put core in a power-saving state if waking list wasn't repopulated and if
393 * no other core requested a wakeup for it to perform a task.
394 *---------------------------------------------------------------------------
398 static inline void core_sleep(void)
400 sleep_core(CURRENT_CORE
);
404 static inline void core_sleep(unsigned int core
)
408 "mov r0, #4 \n" /* r0 = 0x4 << core */
409 "mov r0, r0, lsl %[c] \n"
410 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
411 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
412 "tst r1, r0, lsl #2 \n"
413 "moveq r1, #0x80000000 \n" /* Then sleep */
414 "streq r1, [%[ctl], %[c], lsl #2] \n"
415 "moveq r1, #0 \n" /* Clear control reg */
416 "streq r1, [%[ctl], %[c], lsl #2] \n"
417 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
418 "str r1, [%[mbx], #8] \n"
419 "1: \n" /* Wait for wake procedure to finish */
420 "ldr r1, [%[mbx], #0] \n"
421 "tst r1, r0, lsr #2 \n"
424 : [ctl
]"r"(&CPU_CTL
), [mbx
]"r"(MBX_BASE
), [c
]"r"(core
)
426 #else /* C version for reference */
427 /* Signal intent to sleep */
428 MBX_MSG_SET
= 0x4 << core
;
430 /* Something waking or other processor intends to wake us? */
431 if ((MBX_MSG_STAT
& (0x10 << core
)) == 0)
437 /* Signal wake - clear wake flag */
438 MBX_MSG_CLR
= 0x14 << core
;
440 /* Wait for other processor to finish wake procedure */
441 while (MBX_MSG_STAT
& (0x1 << core
));
442 #endif /* ASM/C selection */
445 #endif /* NUM_CORES */
446 #elif CONFIG_CPU == PP5002
448 static inline void core_sleep(void)
450 sleep_core(CURRENT_CORE
);
454 /* PP5002 has no mailboxes - emulate using bytes */
455 static inline void core_sleep(unsigned int core
)
459 "mov r0, #1 \n" /* Signal intent to sleep */
460 "strb r0, [%[sem], #2] \n"
461 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
464 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
465 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
466 * that the correct alternative is executed. Don't change the order
467 * of the next 4 instructions! */
470 "strne r0, [%[ctl], %[c], lsl #2] \n"
471 "streq r0, [%[ctl], %[c], lsl #2] \n"
472 "nop \n" /* nop's needed because of pipeline */
476 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
477 "strb r0, [%[sem], #1] \n"
478 "strb r0, [%[sem], #2] \n"
479 "1: \n" /* Wait for wake procedure to finish */
480 "ldrb r0, [%[sem], #0] \n"
484 : [sem
]"r"(&core_semaphores
[core
]), [c
]"r"(core
),
488 #else /* C version for reference */
489 /* Signal intent to sleep */
490 core_semaphores
[core
].intend_sleep
= 1;
492 /* Something waking or other processor intends to wake us? */
493 if (core_semaphores
[core
].stay_awake
== 0)
498 /* Signal wake - clear wake flag */
499 core_semaphores
[core
].stay_awake
= 0;
500 core_semaphores
[core
].intend_sleep
= 0;
502 /* Wait for other processor to finish wake procedure */
503 while (core_semaphores
[core
].intend_wake
!= 0);
506 #endif /* ASM/C selection */
509 #endif /* NUM_CORES */
510 #endif /* PP CPU type */
512 /*---------------------------------------------------------------------------
513 * Wake another processor core that is sleeping or prevent it from doing so
514 * if it was already destined. FIQ, IRQ should be disabled before calling.
515 *---------------------------------------------------------------------------
518 /* Shared single-core build debugging version */
521 /* No wakey - core already wakey */
523 #elif defined (CPU_PP502x)
524 void core_wake(unsigned int othercore
)
527 /* avoid r0 since that contains othercore */
529 "mrs r3, cpsr \n" /* Disable IRQ */
530 "orr r1, r3, #0x80 \n"
532 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
533 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
534 "str r2, [%[mbx], #4] \n"
535 "1: \n" /* If it intends to sleep, let it first */
536 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
537 "eor r1, r1, #0xc \n"
538 "tst r1, r2, lsr #2 \n"
539 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
540 "tsteq r1, #0x80000000 \n"
541 "beq 1b \n" /* Wait for sleep or wake */
542 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
544 "strne r1, [%[ctl], %[oc], lsl #2] \n"
545 "mov r1, r2, lsr #4 \n"
546 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
547 "msr cpsr_c, r3 \n" /* Restore IRQ */
549 : [ctl
]"r"(&PROC_CTL(CPU
)), [mbx
]"r"(MBX_BASE
),
552 #else /* C version for reference */
553 /* Disable interrupts - avoid reentrancy from the tick */
554 int oldlevel
= disable_irq_save();
556 /* Signal intent to wake other processor - set stay awake */
557 MBX_MSG_SET
= 0x11 << othercore
;
559 /* If it intends to sleep, wait until it does or aborts */
560 while ((MBX_MSG_STAT
& (0x4 << othercore
)) != 0 &&
561 (PROC_CTL(othercore
) & PROC_SLEEP
) == 0);
563 /* If sleeping, wake it up */
564 if (PROC_CTL(othercore
) & PROC_SLEEP
)
565 PROC_CTL(othercore
) = 0;
567 /* Done with wake procedure */
568 MBX_MSG_CLR
= 0x1 << othercore
;
569 restore_irq(oldlevel
);
570 #endif /* ASM/C selection */
572 #elif CONFIG_CPU == PP5002
573 /* PP5002 has no mailboxes - emulate using bytes */
574 void core_wake(unsigned int othercore
)
577 /* avoid r0 since that contains othercore */
579 "mrs r3, cpsr \n" /* Disable IRQ */
580 "orr r1, r3, #0x80 \n"
582 "mov r1, #1 \n" /* Signal intent to wake other core */
583 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
584 "strh r1, [%[sem], #0] \n"
586 "1: \n" /* If it intends to sleep, let it first */
587 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
589 "ldr r1, [%[st]] \n" /* && not sleeping ? */
590 "tsteq r1, r2, lsr %[oc] \n"
591 "beq 1b \n" /* Wait for sleep or wake */
592 "tst r1, r2, lsr %[oc] \n"
593 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
595 "strne r1, [r2, %[oc], lsl #2] \n"
596 "mov r1, #0 \n" /* Done with wake procedure */
597 "strb r1, [%[sem], #0] \n"
598 "msr cpsr_c, r3 \n" /* Restore IRQ */
600 : [sem
]"r"(&core_semaphores
[othercore
]),
605 #else /* C version for reference */
606 /* Disable interrupts - avoid reentrancy from the tick */
607 int oldlevel
= disable_irq_save();
609 /* Signal intent to wake other processor - set stay awake */
610 core_semaphores
[othercore
].intend_wake
= 1;
611 core_semaphores
[othercore
].stay_awake
= 1;
613 /* If it intends to sleep, wait until it does or aborts */
614 while (core_semaphores
[othercore
].intend_sleep
!= 0 &&
615 (PROC_STAT
& PROC_SLEEPING(othercore
)) == 0);
617 /* If sleeping, wake it up */
618 if (PROC_STAT
& PROC_SLEEPING(othercore
))
619 wake_core(othercore
);
621 /* Done with wake procedure */
622 core_semaphores
[othercore
].intend_wake
= 0;
623 restore_irq(oldlevel
);
624 #endif /* ASM/C selection */
626 #endif /* CPU type */
629 /*---------------------------------------------------------------------------
630 * Switches to a stack that always resides in the Rockbox core.
632 * Needed when a thread suicides on a core other than the main CPU since the
633 * stack used when idling is the stack of the last thread to run. This stack
634 * may not reside in the core firmware in which case the core will continue
635 * to use a stack from an unloaded module until another thread runs on it.
636 *---------------------------------------------------------------------------
638 static inline void switch_to_idle_stack(const unsigned int core
)
641 "str sp, [%0] \n" /* save original stack pointer on idle stack */
642 "mov sp, %0 \n" /* switch stacks */
643 : : "r"(&idle_stacks
[core
][IDLE_STACK_WORDS
-1]));
647 /*---------------------------------------------------------------------------
648 * Perform core switch steps that need to take place inside switch_thread.
650 * These steps must take place while before changing the processor and after
651 * having entered switch_thread since switch_thread may not do a normal return
652 * because the stack being used for anything the compiler saved will not belong
653 * to the thread's destination core and it may have been recycled for other
654 * purposes by the time a normal context load has taken place. switch_thread
655 * will also clobber anything stashed in the thread's context or stored in the
656 * nonvolatile registers if it is saved there before the call since the
657 * compiler's order of operations cannot be known for certain.
659 static void core_switch_blk_op(unsigned int core
, struct thread_entry
*thread
)
661 /* Flush our data to ram */
663 /* Stash thread in r4 slot */
664 thread
->context
.r
[0] = (uint32_t)thread
;
665 /* Stash restart address in r5 slot */
666 thread
->context
.r
[1] = thread
->context
.start
;
667 /* Save sp in context.sp while still running on old core */
668 thread
->context
.sp
= idle_stacks
[core
][IDLE_STACK_WORDS
-1];
671 /*---------------------------------------------------------------------------
672 * Machine-specific helper function for switching the processor a thread is
673 * running on. Basically, the thread suicides on the departing core and is
674 * reborn on the destination. Were it not for gcc's ill-behavior regarding
675 * naked functions written in C where it actually clobbers non-volatile
676 * registers before the intended prologue code, this would all be much
677 * simpler. Generic setup is done in switch_core itself.
680 /*---------------------------------------------------------------------------
681 * This actually performs the core switch.
683 static void __attribute__((naked
))
684 switch_thread_core(unsigned int core
, struct thread_entry
*thread
)
686 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
687 * Stack access also isn't permitted until restoring the original stack and
690 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
691 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
692 "ldr r2, [r2, r0, lsl #2] \n"
693 "add r2, r2, %0*4 \n"
694 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
695 "mov sp, r2 \n" /* switch stacks */
696 "adr r2, 1f \n" /* r2 = new core restart address */
697 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
698 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
700 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
701 "mov r1, #0 \n" /* Clear start address */
702 "str r1, [r0, #40] \n"
703 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
706 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
707 ".ltorg \n" /* Dump constant pool */
708 : : "i"(IDLE_STACK_WORDS
)
710 (void)core
; (void)thread
;
713 /*---------------------------------------------------------------------------
714 * Do any device-specific inits for the threads and synchronize the kernel
716 *---------------------------------------------------------------------------
718 static void core_thread_init(unsigned int core
)
722 /* Wake up coprocessor and let it initialize kernel and threads */
727 /* Sleep until COP has finished */
732 /* Wake the CPU and return */
736 #endif /* NUM_CORES */
738 #elif CONFIG_CPU == S3C2440
740 /*---------------------------------------------------------------------------
741 * Put core in a power-saving state if waking list wasn't repopulated.
742 *---------------------------------------------------------------------------
744 static inline void core_sleep(void)
746 /* FIQ also changes the CLKCON register so FIQ must be disabled
747 when changing it here */
750 "orr r2, r0, #0x40 \n" /* Disable FIQ */
751 "bic r0, r0, #0x80 \n" /* Prepare IRQ enable */
753 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
754 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
756 "str r2, [r1, #0xc] \n"
757 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
758 "mov r2, #0 \n" /* wait for IDLE */
763 "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */
765 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
767 "str r2, [r1, #0xc] \n"
768 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
769 : : : "r0", "r1", "r2");
771 #elif defined(CPU_TCC77X)
772 static inline void core_sleep(void)
774 #warning TODO: Implement core_sleep
777 #elif defined(CPU_TCC780X)
778 static inline void core_sleep(void)
780 /* Single core only for now. Use the generic ARMv5 wait for IRQ */
783 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
788 #elif CONFIG_CPU == IMX31L
789 static inline void core_sleep(void)
793 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
798 #elif CONFIG_CPU == DM320
799 static inline void core_sleep(void)
803 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
809 static inline void core_sleep(void)
811 #warning core_sleep not implemented, battery life will be decreased
814 #endif /* CONFIG_CPU == */
816 #elif defined(CPU_COLDFIRE)
817 /*---------------------------------------------------------------------------
818 * Start the thread running and terminate it if it returns
819 *---------------------------------------------------------------------------
821 void start_thread(void); /* Provide C access to ASM label */
822 static void __attribute__((used
)) __start_thread(void)
824 /* a0=macsr, a1=context */
826 "start_thread: \n" /* Start here - no naked attribute */
827 "move.l %a0, %macsr \n" /* Set initial mac status reg */
828 "lea.l 48(%a1), %a1 \n"
829 "move.l (%a1)+, %sp \n" /* Set initial stack */
830 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
831 "clr.l (%a1) \n" /* Mark thread running */
832 "jsr (%a2) \n" /* Call thread function */
837 /* Set EMAC unit to fractional mode with saturation for each new thread,
838 * since that's what'll be the most useful for most things which the dsp
839 * will do. Codecs should still initialize their preferred modes
840 * explicitly. Context pointer is placed in d2 slot and start_thread
841 * pointer in d3 slot. thread function pointer is placed in context.start.
842 * See load_context for what happens when thread is initially going to
845 #define THREAD_STARTUP_INIT(core, thread, function) \
846 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
847 (thread)->context.d[0] = (uint32_t)&(thread)->context, \
848 (thread)->context.d[1] = (uint32_t)start_thread, \
849 (thread)->context.start = (uint32_t)(function); })
851 /*---------------------------------------------------------------------------
852 * Store non-volatile context.
853 *---------------------------------------------------------------------------
855 static inline void store_context(void* addr
)
858 "move.l %%macsr,%%d0 \n"
859 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
860 : : "a" (addr
) : "d0" /* only! */
864 /*---------------------------------------------------------------------------
865 * Load non-volatile context.
866 *---------------------------------------------------------------------------
868 static inline void load_context(const void* addr
)
871 "move.l 52(%0), %%d0 \n" /* Get start address */
872 "beq.b 1f \n" /* NULL -> already running */
873 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
874 "jmp (%%a2) \n" /* Start the thread */
876 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
877 "move.l %%d0, %%macsr \n"
878 : : "a" (addr
) : "d0" /* only! */
882 /*---------------------------------------------------------------------------
883 * Put core in a power-saving state if waking list wasn't repopulated.
884 *---------------------------------------------------------------------------
886 static inline void core_sleep(void)
888 /* Supervisor mode, interrupts enabled upon wakeup */
889 asm volatile ("stop #0x2000");
892 #elif CONFIG_CPU == SH7034
893 /*---------------------------------------------------------------------------
894 * Start the thread running and terminate it if it returns
895 *---------------------------------------------------------------------------
897 void start_thread(void); /* Provide C access to ASM label */
898 static void __attribute__((used
)) __start_thread(void)
902 "_start_thread: \n" /* Start here - no naked attribute */
903 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
904 "mov.l @(28, r8), r15 \n" /* Set initial sp */
905 "mov #0, r1 \n" /* Start the thread */
907 "mov.l r1, @(36, r8) \n" /* Clear start address */
912 /* Place context pointer in r8 slot, function pointer in r9 slot, and
913 * start_thread pointer in context_start */
914 #define THREAD_STARTUP_INIT(core, thread, function) \
915 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
916 (thread)->context.r[1] = (uint32_t)(function), \
917 (thread)->context.start = (uint32_t)start_thread; })
919 /*---------------------------------------------------------------------------
920 * Store non-volatile context.
921 *---------------------------------------------------------------------------
923 static inline void store_context(void* addr
)
926 "add #36, %0 \n" /* Start at last reg. By the time routine */
927 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
940 /*---------------------------------------------------------------------------
941 * Load non-volatile context.
942 *---------------------------------------------------------------------------
944 static inline void load_context(const void* addr
)
947 "mov.l @(36, %0), r0 \n" /* Get start address */
949 "bt .running \n" /* NULL -> already running */
950 "jmp @r0 \n" /* r8 = context */
952 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
961 : : "r" (addr
) : "r0" /* only! */
965 /*---------------------------------------------------------------------------
966 * Put core in a power-saving state.
967 *---------------------------------------------------------------------------
969 static inline void core_sleep(void)
972 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
973 "mov #0, r1 \n" /* Enable interrupts */
974 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
975 "sleep \n" /* Execute standby */
976 : : "z"(&SBYCR
-GBR
) : "r1");
979 #endif /* CONFIG_CPU == */
982 * End Processor-specific section
983 ***************************************************************************/
985 #if THREAD_EXTRA_CHECKS
986 static void thread_panicf(const char *msg
, struct thread_entry
*thread
)
988 IF_COP( const unsigned int core
= thread
->core
; )
989 static char name
[32];
990 thread_get_name(name
, 32, thread
);
991 panicf ("%s %s" IF_COP(" (%d)"), msg
, name
IF_COP(, core
));
993 static void thread_stkov(struct thread_entry
*thread
)
995 thread_panicf("Stkov", thread
);
997 #define THREAD_PANICF(msg, thread) \
998 thread_panicf(msg, thread)
999 #define THREAD_ASSERT(exp, msg, thread) \
1000 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
1002 static void thread_stkov(struct thread_entry
*thread
)
1004 IF_COP( const unsigned int core
= thread
->core
; )
1005 static char name
[32];
1006 thread_get_name(name
, 32, thread
);
1007 panicf("Stkov %s" IF_COP(" (%d)"), name
IF_COP(, core
));
1009 #define THREAD_PANICF(msg, thread)
1010 #define THREAD_ASSERT(exp, msg, thread)
1011 #endif /* THREAD_EXTRA_CHECKS */
1013 /* Thread locking */
1015 #define LOCK_THREAD(thread) \
1016 ({ corelock_lock(&(thread)->slot_cl); })
1017 #define TRY_LOCK_THREAD(thread) \
1018 ({ corelock_try_lock(&thread->slot_cl); })
1019 #define UNLOCK_THREAD(thread) \
1020 ({ corelock_unlock(&(thread)->slot_cl); })
1021 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1022 ({ unsigned int _core = (thread)->core; \
1023 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
1024 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
1026 #define LOCK_THREAD(thread) \
1028 #define TRY_LOCK_THREAD(thread) \
1030 #define UNLOCK_THREAD(thread) \
1032 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1037 #define RTR_LOCK(core) \
1038 ({ corelock_lock(&cores[core].rtr_cl); })
1039 #define RTR_UNLOCK(core) \
1040 ({ corelock_unlock(&cores[core].rtr_cl); })
1042 #ifdef HAVE_PRIORITY_SCHEDULING
1043 #define rtr_add_entry(core, priority) \
1044 prio_add_entry(&cores[core].rtr, (priority))
1046 #define rtr_subtract_entry(core, priority) \
1047 prio_subtract_entry(&cores[core].rtr, (priority))
1049 #define rtr_move_entry(core, from, to) \
1050 prio_move_entry(&cores[core].rtr, (from), (to))
1052 #define rtr_add_entry(core, priority)
1053 #define rtr_add_entry_inl(core, priority)
1054 #define rtr_subtract_entry(core, priority)
1055 #define rtr_subtract_entry_inl(core, priotity)
1056 #define rtr_move_entry(core, from, to)
1057 #define rtr_move_entry_inl(core, from, to)
1060 /*---------------------------------------------------------------------------
1061 * Thread list structure - circular:
1062 * +------------------------------+
1064 * +--+---+<-+---+<-+---+<-+---+<-+
1065 * Head->| T | | T | | T | | T |
1066 * +->+---+->+---+->+---+->+---+--+
1068 * +------------------------------+
1069 *---------------------------------------------------------------------------
1072 /*---------------------------------------------------------------------------
1073 * Adds a thread to a list of threads using "insert last". Uses the "l"
1075 *---------------------------------------------------------------------------
1077 static void add_to_list_l(struct thread_entry
**list
,
1078 struct thread_entry
*thread
)
1080 struct thread_entry
*l
= *list
;
1084 /* Insert into unoccupied list */
1085 thread
->l
.prev
= thread
;
1086 thread
->l
.next
= thread
;
1092 thread
->l
.prev
= l
->l
.prev
;
1094 l
->l
.prev
->l
.next
= thread
;
1098 /*---------------------------------------------------------------------------
1099 * Removes a thread from a list of threads. Uses the "l" links.
1100 *---------------------------------------------------------------------------
1102 static void remove_from_list_l(struct thread_entry
**list
,
1103 struct thread_entry
*thread
)
1105 struct thread_entry
*prev
, *next
;
1107 next
= thread
->l
.next
;
1116 if (thread
== *list
)
1118 /* List becomes next item */
1122 prev
= thread
->l
.prev
;
1124 /* Fix links to jump over the removed entry. */
1125 next
->l
.prev
= prev
;
1126 prev
->l
.next
= next
;
1129 /*---------------------------------------------------------------------------
1130 * Timeout list structure - circular reverse (to make "remove item" O(1)),
1131 * NULL-terminated forward (to ease the far more common forward traversal):
1132 * +------------------------------+
1134 * +--+---+<-+---+<-+---+<-+---+<-+
1135 * Head->| T | | T | | T | | T |
1136 * +---+->+---+->+---+->+---+-X
1137 *---------------------------------------------------------------------------
1140 /*---------------------------------------------------------------------------
1141 * Add a thread from the core's timout list by linking the pointers in its
1143 *---------------------------------------------------------------------------
1145 static void add_to_list_tmo(struct thread_entry
*thread
)
1147 struct thread_entry
*tmo
= cores
[IF_COP_CORE(thread
->core
)].timeout
;
1148 THREAD_ASSERT(thread
->tmo
.prev
== NULL
,
1149 "add_to_list_tmo->already listed", thread
);
1151 thread
->tmo
.next
= NULL
;
1155 /* Insert into unoccupied list */
1156 thread
->tmo
.prev
= thread
;
1157 cores
[IF_COP_CORE(thread
->core
)].timeout
= thread
;
1162 thread
->tmo
.prev
= tmo
->tmo
.prev
;
1163 tmo
->tmo
.prev
->tmo
.next
= thread
;
1164 tmo
->tmo
.prev
= thread
;
1167 /*---------------------------------------------------------------------------
1168 * Remove a thread from the core's timout list by unlinking the pointers in
1169 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1171 *---------------------------------------------------------------------------
1173 static void remove_from_list_tmo(struct thread_entry
*thread
)
1175 struct thread_entry
**list
= &cores
[IF_COP_CORE(thread
->core
)].timeout
;
1176 struct thread_entry
*prev
= thread
->tmo
.prev
;
1177 struct thread_entry
*next
= thread
->tmo
.next
;
1179 THREAD_ASSERT(prev
!= NULL
, "remove_from_list_tmo->not listed", thread
);
1182 next
->tmo
.prev
= prev
;
1184 if (thread
== *list
)
1186 /* List becomes next item and empty if next == NULL */
1188 /* Mark as unlisted */
1189 thread
->tmo
.prev
= NULL
;
1194 (*list
)->tmo
.prev
= prev
;
1195 prev
->tmo
.next
= next
;
1196 /* Mark as unlisted */
1197 thread
->tmo
.prev
= NULL
;
1202 #ifdef HAVE_PRIORITY_SCHEDULING
1203 /*---------------------------------------------------------------------------
1204 * Priority distribution structure (one category for each possible priority):
1206 * +----+----+----+ ... +-----+
1207 * hist: | F0 | F1 | F2 | | F31 |
1208 * +----+----+----+ ... +-----+
1209 * mask: | b0 | b1 | b2 | | b31 |
1210 * +----+----+----+ ... +-----+
1212 * F = count of threads at priority category n (frequency)
1213 * b = bitmask of non-zero priority categories (occupancy)
1215 * / if H[n] != 0 : 1
1219 *---------------------------------------------------------------------------
1220 * Basic priority inheritance priotocol (PIP):
1222 * Mn = mutex n, Tn = thread n
1224 * A lower priority thread inherits the priority of the highest priority
1225 * thread blocked waiting for it to complete an action (such as release a
1226 * mutex or respond to a message via queue_send):
1230 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
1231 * priority than T1 then T1 inherits the priority of T2.
1237 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
1238 * T1 inherits the higher of T2 and T3.
1240 * 3) T3->M2->T2->M1->T1
1242 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
1243 * then T1 inherits the priority of T3 through T2.
1245 * Blocking chains can grow arbitrarily complex (though it's best that they
1246 * not form at all very often :) and build-up from these units.
1247 *---------------------------------------------------------------------------
1250 /*---------------------------------------------------------------------------
1251 * Increment frequency at category "priority"
1252 *---------------------------------------------------------------------------
1254 static inline unsigned int prio_add_entry(
1255 struct priority_distribution
*pd
, int priority
)
1258 /* Enough size/instruction count difference for ARM makes it worth it to
1259 * use different code (192 bytes for ARM). Only thing better is ASM. */
1261 count
= pd
->hist
[priority
];
1263 pd
->mask
|= 1 << priority
;
1264 pd
->hist
[priority
] = count
;
1265 #else /* This one's better for Coldfire */
1266 if ((count
= ++pd
->hist
[priority
]) == 1)
1267 pd
->mask
|= 1 << priority
;
1273 /*---------------------------------------------------------------------------
1274 * Decrement frequency at category "priority"
1275 *---------------------------------------------------------------------------
1277 static inline unsigned int prio_subtract_entry(
1278 struct priority_distribution
*pd
, int priority
)
1283 count
= pd
->hist
[priority
];
1285 pd
->mask
&= ~(1 << priority
);
1286 pd
->hist
[priority
] = count
;
1288 if ((count
= --pd
->hist
[priority
]) == 0)
1289 pd
->mask
&= ~(1 << priority
);
1295 /*---------------------------------------------------------------------------
1296 * Remove from one category and add to another
1297 *---------------------------------------------------------------------------
1299 static inline void prio_move_entry(
1300 struct priority_distribution
*pd
, int from
, int to
)
1302 uint32_t mask
= pd
->mask
;
1307 count
= pd
->hist
[from
];
1309 mask
&= ~(1 << from
);
1310 pd
->hist
[from
] = count
;
1312 count
= pd
->hist
[to
];
1315 pd
->hist
[to
] = count
;
1317 if (--pd
->hist
[from
] == 0)
1318 mask
&= ~(1 << from
);
1320 if (++pd
->hist
[to
] == 1)
1327 /*---------------------------------------------------------------------------
1328 * Change the priority and rtr entry for a running thread
1329 *---------------------------------------------------------------------------
1331 static inline void set_running_thread_priority(
1332 struct thread_entry
*thread
, int priority
)
1334 const unsigned int core
= IF_COP_CORE(thread
->core
);
1336 rtr_move_entry(core
, thread
->priority
, priority
);
1337 thread
->priority
= priority
;
1341 /*---------------------------------------------------------------------------
1342 * Finds the highest priority thread in a list of threads. If the list is
1343 * empty, the PRIORITY_IDLE is returned.
1345 * It is possible to use the struct priority_distribution within an object
1346 * instead of scanning the remaining threads in the list but as a compromise,
1347 * the resulting per-object memory overhead is saved at a slight speed
1348 * penalty under high contention.
1349 *---------------------------------------------------------------------------
1351 static int find_highest_priority_in_list_l(
1352 struct thread_entry
* const thread
)
1356 /* Go though list until the ending up at the initial thread */
1357 int highest_priority
= thread
->priority
;
1358 struct thread_entry
*curr
= thread
;
1362 int priority
= curr
->priority
;
1364 if (priority
< highest_priority
)
1365 highest_priority
= priority
;
1367 curr
= curr
->l
.next
;
1369 while (curr
!= thread
);
1371 return highest_priority
;
1374 return PRIORITY_IDLE
;
1377 /*---------------------------------------------------------------------------
1378 * Register priority with blocking system and bubble it down the chain if
1379 * any until we reach the end or something is already equal or higher.
1381 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
1382 * targets but that same action also guarantees a circular block anyway and
1383 * those are prevented, right? :-)
1384 *---------------------------------------------------------------------------
1386 static struct thread_entry
*
1387 blocker_inherit_priority(struct thread_entry
*current
)
1389 const int priority
= current
->priority
;
1390 struct blocker
*bl
= current
->blocker
;
1391 struct thread_entry
* const tstart
= current
;
1392 struct thread_entry
*bl_t
= bl
->thread
;
1394 /* Blocker cannot change since the object protection is held */
1399 struct thread_entry
*next
;
1400 int bl_pr
= bl
->priority
;
1402 if (priority
>= bl_pr
)
1403 break; /* Object priority already high enough */
1405 bl
->priority
= priority
;
1408 prio_add_entry(&bl_t
->pdist
, priority
);
1410 if (bl_pr
< PRIORITY_IDLE
)
1412 /* Not first waiter - subtract old one */
1413 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
1416 if (priority
>= bl_t
->priority
)
1417 break; /* Thread priority high enough */
1419 if (bl_t
->state
== STATE_RUNNING
)
1421 /* Blocking thread is a running thread therefore there are no
1422 * further blockers. Change the "run queue" on which it
1424 set_running_thread_priority(bl_t
, priority
);
1428 bl_t
->priority
= priority
;
1430 /* If blocking thread has a blocker, apply transitive inheritance */
1434 break; /* End of chain or object doesn't support inheritance */
1439 break; /* Full-circle - deadlock! */
1441 UNLOCK_THREAD(current
);
1448 /* Blocker could change - retest condition */
1449 if (bl
->thread
== next
)
1452 UNLOCK_THREAD(next
);
1460 UNLOCK_THREAD(bl_t
);
1465 /*---------------------------------------------------------------------------
1466 * Readjust priorities when waking a thread blocked waiting for another
1467 * in essence "releasing" the thread's effect on the object owner. Can be
1468 * performed from any context.
1469 *---------------------------------------------------------------------------
1471 struct thread_entry
*
1472 wakeup_priority_protocol_release(struct thread_entry
*thread
)
1474 const int priority
= thread
->priority
;
1475 struct blocker
*bl
= thread
->blocker
;
1476 struct thread_entry
* const tstart
= thread
;
1477 struct thread_entry
*bl_t
= bl
->thread
;
1479 /* Blocker cannot change since object will be locked */
1482 thread
->blocker
= NULL
; /* Thread not blocked */
1486 struct thread_entry
*next
;
1487 int bl_pr
= bl
->priority
;
1489 if (priority
> bl_pr
)
1490 break; /* Object priority higher */
1492 next
= *thread
->bqp
;
1496 /* No more threads in queue */
1497 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
1498 bl
->priority
= PRIORITY_IDLE
;
1502 /* Check list for highest remaining priority */
1503 int queue_pr
= find_highest_priority_in_list_l(next
);
1505 if (queue_pr
== bl_pr
)
1506 break; /* Object priority not changing */
1508 /* Change queue priority */
1509 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
1510 bl
->priority
= queue_pr
;
1513 if (bl_pr
> bl_t
->priority
)
1514 break; /* thread priority is higher */
1516 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
1518 if (bl_pr
== bl_t
->priority
)
1519 break; /* Thread priority not changing */
1521 if (bl_t
->state
== STATE_RUNNING
)
1523 /* No further blockers */
1524 set_running_thread_priority(bl_t
, bl_pr
);
1528 bl_t
->priority
= bl_pr
;
1530 /* If blocking thread has a blocker, apply transitive inheritance */
1534 break; /* End of chain or object doesn't support inheritance */
1539 break; /* Full-circle - deadlock! */
1541 UNLOCK_THREAD(thread
);
1548 /* Blocker could change - retest condition */
1549 if (bl
->thread
== next
)
1552 UNLOCK_THREAD(next
);
1560 UNLOCK_THREAD(bl_t
);
1563 if (thread
!= tstart
)
1565 /* Relock original if it changed */
1566 LOCK_THREAD(tstart
);
1570 return cores
[CURRENT_CORE
].running
;
1573 /*---------------------------------------------------------------------------
1574 * Transfer ownership to a thread waiting for an objects and transfer
1575 * inherited priority boost from other waiters. This algorithm knows that
1576 * blocking chains may only unblock from the very end.
1578 * Only the owning thread itself may call this and so the assumption that
1579 * it is the running thread is made.
1580 *---------------------------------------------------------------------------
1582 struct thread_entry
*
1583 wakeup_priority_protocol_transfer(struct thread_entry
*thread
)
1585 /* Waking thread inherits priority boost from object owner */
1586 struct blocker
*bl
= thread
->blocker
;
1587 struct thread_entry
*bl_t
= bl
->thread
;
1588 struct thread_entry
*next
;
1591 THREAD_ASSERT(thread_get_current() == bl_t
,
1592 "UPPT->wrong thread", thread_get_current());
1596 bl_pr
= bl
->priority
;
1598 /* Remove the object's boost from the owning thread */
1599 if (prio_subtract_entry(&bl_t
->pdist
, bl_pr
) == 0 &&
1600 bl_pr
<= bl_t
->priority
)
1602 /* No more threads at this priority are waiting and the old level is
1603 * at least the thread level */
1604 int priority
= find_first_set_bit(bl_t
->pdist
.mask
);
1606 if (priority
!= bl_t
->priority
)
1608 /* Adjust this thread's priority */
1609 set_running_thread_priority(bl_t
, priority
);
1613 next
= *thread
->bqp
;
1617 /* Expected shortcut - no more waiters */
1618 bl_pr
= PRIORITY_IDLE
;
1622 if (thread
->priority
<= bl_pr
)
1624 /* Need to scan threads remaining in queue */
1625 bl_pr
= find_highest_priority_in_list_l(next
);
1628 if (prio_add_entry(&thread
->pdist
, bl_pr
) == 1 &&
1629 bl_pr
< thread
->priority
)
1631 /* Thread priority must be raised */
1632 thread
->priority
= bl_pr
;
1636 bl
->thread
= thread
; /* This thread pwns */
1637 bl
->priority
= bl_pr
; /* Save highest blocked priority */
1638 thread
->blocker
= NULL
; /* Thread not blocked */
1640 UNLOCK_THREAD(bl_t
);
1645 /*---------------------------------------------------------------------------
1646 * No threads must be blocked waiting for this thread except for it to exit.
1647 * The alternative is more elaborate cleanup and object registration code.
1648 * Check this for risk of silent data corruption when objects with
1649 * inheritable blocking are abandoned by the owner - not precise but may
1651 *---------------------------------------------------------------------------
1653 static void check_for_obj_waiters(const char *function
, struct thread_entry
*thread
)
1655 /* Only one bit in the mask should be set with a frequency on 1 which
1656 * represents the thread's own base priority */
1657 uint32_t mask
= thread
->pdist
.mask
;
1658 if ((mask
& (mask
- 1)) != 0 ||
1659 thread
->pdist
.hist
[find_first_set_bit(mask
)] > 1)
1661 unsigned char name
[32];
1662 thread_get_name(name
, 32, thread
);
1663 panicf("%s->%s with obj. waiters", function
, name
);
1666 #endif /* HAVE_PRIORITY_SCHEDULING */
1668 /*---------------------------------------------------------------------------
1669 * Move a thread back to a running state on its core.
1670 *---------------------------------------------------------------------------
1672 static void core_schedule_wakeup(struct thread_entry
*thread
)
1674 const unsigned int core
= IF_COP_CORE(thread
->core
);
1678 thread
->state
= STATE_RUNNING
;
1680 add_to_list_l(&cores
[core
].running
, thread
);
1681 rtr_add_entry(core
, thread
->priority
);
1686 if (core
!= CURRENT_CORE
)
1691 /*---------------------------------------------------------------------------
1692 * Check the core's timeout list when at least one thread is due to wake.
1693 * Filtering for the condition is done before making the call. Resets the
1694 * tick when the next check will occur.
1695 *---------------------------------------------------------------------------
1697 void check_tmo_threads(void)
1699 const unsigned int core
= CURRENT_CORE
;
1700 const long tick
= current_tick
; /* snapshot the current tick */
1701 long next_tmo_check
= tick
+ 60*HZ
; /* minimum duration: once/minute */
1702 struct thread_entry
*next
= cores
[core
].timeout
;
1704 /* If there are no processes waiting for a timeout, just keep the check
1705 tick from falling into the past. */
1707 /* Break the loop once we have walked through the list of all
1708 * sleeping processes or have removed them all. */
1709 while (next
!= NULL
)
1711 /* Check sleeping threads. Allow interrupts between checks. */
1714 struct thread_entry
*curr
= next
;
1716 next
= curr
->tmo
.next
;
1718 /* Lock thread slot against explicit wakeup */
1722 unsigned state
= curr
->state
;
1724 if (state
< TIMEOUT_STATE_FIRST
)
1726 /* Cleanup threads no longer on a timeout but still on the
1728 remove_from_list_tmo(curr
);
1730 else if (TIME_BEFORE(tick
, curr
->tmo_tick
))
1732 /* Timeout still pending - this will be the usual case */
1733 if (TIME_BEFORE(curr
->tmo_tick
, next_tmo_check
))
1735 /* Earliest timeout found so far - move the next check up
1737 next_tmo_check
= curr
->tmo_tick
;
1742 /* Sleep timeout has been reached so bring the thread back to
1744 if (state
== STATE_BLOCKED_W_TMO
)
1747 /* Lock the waiting thread's kernel object */
1748 struct corelock
*ocl
= curr
->obj_cl
;
1750 if (corelock_try_lock(ocl
) == 0)
1752 /* Need to retry in the correct order though the need is
1754 UNLOCK_THREAD(curr
);
1758 if (curr
->state
!= STATE_BLOCKED_W_TMO
)
1760 /* Thread was woken or removed explicitely while slot
1762 corelock_unlock(ocl
);
1763 remove_from_list_tmo(curr
);
1764 UNLOCK_THREAD(curr
);
1768 #endif /* NUM_CORES */
1770 remove_from_list_l(curr
->bqp
, curr
);
1772 #ifdef HAVE_WAKEUP_EXT_CB
1773 if (curr
->wakeup_ext_cb
!= NULL
)
1774 curr
->wakeup_ext_cb(curr
);
1777 #ifdef HAVE_PRIORITY_SCHEDULING
1778 if (curr
->blocker
!= NULL
)
1779 wakeup_priority_protocol_release(curr
);
1781 corelock_unlock(ocl
);
1783 /* else state == STATE_SLEEPING */
1785 remove_from_list_tmo(curr
);
1789 curr
->state
= STATE_RUNNING
;
1791 add_to_list_l(&cores
[core
].running
, curr
);
1792 rtr_add_entry(core
, curr
->priority
);
1797 UNLOCK_THREAD(curr
);
1800 cores
[core
].next_tmo_check
= next_tmo_check
;
1803 /*---------------------------------------------------------------------------
1804 * Performs operations that must be done before blocking a thread but after
1805 * the state is saved.
1806 *---------------------------------------------------------------------------
1809 static inline void run_blocking_ops(
1810 unsigned int core
, struct thread_entry
*thread
)
1812 struct thread_blk_ops
*ops
= &cores
[core
].blk_ops
;
1813 const unsigned flags
= ops
->flags
;
1815 if (flags
== TBOP_CLEAR
)
1820 case TBOP_SWITCH_CORE
:
1821 core_switch_blk_op(core
, thread
);
1823 case TBOP_UNLOCK_CORELOCK
:
1824 corelock_unlock(ops
->cl_p
);
1828 ops
->flags
= TBOP_CLEAR
;
1830 #endif /* NUM_CORES > 1 */
1833 void profile_thread(void)
1835 profstart(cores
[CURRENT_CORE
].running
- threads
);
1839 /*---------------------------------------------------------------------------
1840 * Prepares a thread to block on an object's list and/or for a specified
1841 * duration - expects object and slot to be appropriately locked if needed
1842 * and interrupts to be masked.
1843 *---------------------------------------------------------------------------
1845 static inline void block_thread_on_l(struct thread_entry
*thread
,
1848 /* If inlined, unreachable branches will be pruned with no size penalty
1849 because state is passed as a constant parameter. */
1850 const unsigned int core
= IF_COP_CORE(thread
->core
);
1852 /* Remove the thread from the list of running threads. */
1854 remove_from_list_l(&cores
[core
].running
, thread
);
1855 rtr_subtract_entry(core
, thread
->priority
);
1858 /* Add a timeout to the block if not infinite */
1862 case STATE_BLOCKED_W_TMO
:
1863 /* Put the thread into a new list of inactive threads. */
1864 add_to_list_l(thread
->bqp
, thread
);
1866 if (state
== STATE_BLOCKED
)
1870 case STATE_SLEEPING
:
1871 /* If this thread times out sooner than any other thread, update
1872 next_tmo_check to its timeout */
1873 if (TIME_BEFORE(thread
->tmo_tick
, cores
[core
].next_tmo_check
))
1875 cores
[core
].next_tmo_check
= thread
->tmo_tick
;
1878 if (thread
->tmo
.prev
== NULL
)
1880 add_to_list_tmo(thread
);
1882 /* else thread was never removed from list - just keep it there */
1886 /* Remember the the next thread about to block. */
1887 cores
[core
].block_task
= thread
;
1889 /* Report new state. */
1890 thread
->state
= state
;
1893 /*---------------------------------------------------------------------------
1894 * Switch thread in round robin fashion for any given priority. Any thread
1895 * that removed itself from the running list first must specify itself in
1898 * INTERNAL: Intended for use by kernel and not for programs.
1899 *---------------------------------------------------------------------------
1901 void switch_thread(void)
1903 const unsigned int core
= CURRENT_CORE
;
1904 struct thread_entry
*block
= cores
[core
].block_task
;
1905 struct thread_entry
*thread
= cores
[core
].running
;
1907 /* Get context to save - next thread to run is unknown until all wakeups
1911 cores
[core
].block_task
= NULL
;
1914 if (thread
== block
)
1916 /* This was the last thread running and another core woke us before
1917 * reaching here. Force next thread selection to give tmo threads or
1918 * other threads woken before this block a first chance. */
1924 /* Blocking task is the old one */
1930 profile_thread_stopped(thread
- threads
);
1933 /* Begin task switching by saving our current context so that we can
1934 * restore the state of the current thread later to the point prior
1936 store_context(&thread
->context
);
1938 /* Check if the current thread stack is overflown */
1939 if (thread
->stack
[0] != DEADBEEF
)
1940 thread_stkov(thread
);
1943 /* Run any blocking operations requested before switching/sleeping */
1944 run_blocking_ops(core
, thread
);
1947 #ifdef HAVE_PRIORITY_SCHEDULING
1948 /* Reset the value of thread's skip count */
1949 thread
->skip_count
= 0;
1954 /* If there are threads on a timeout and the earliest wakeup is due,
1955 * check the list and wake any threads that need to start running
1957 if (!TIME_BEFORE(current_tick
, cores
[core
].next_tmo_check
))
1959 check_tmo_threads();
1965 thread
= cores
[core
].running
;
1969 /* Enter sleep mode to reduce power usage - woken up on interrupt
1970 * or wakeup request from another core - expected to enable
1973 core_sleep(IF_COP(core
));
1977 #ifdef HAVE_PRIORITY_SCHEDULING
1978 /* Select the new task based on priorities and the last time a
1979 * process got CPU time relative to the highest priority runnable
1981 struct priority_distribution
*pd
= &cores
[core
].rtr
;
1982 int max
= find_first_set_bit(pd
->mask
);
1986 /* Not switching on a block, tentatively select next thread */
1987 thread
= thread
->l
.next
;
1992 int priority
= thread
->priority
;
1995 /* This ridiculously simple method of aging seems to work
1996 * suspiciously well. It does tend to reward CPU hogs (under
1997 * yielding) but that's generally not desirable at all. On the
1998 * plus side, it, relatively to other threads, penalizes excess
1999 * yielding which is good if some high priority thread is
2000 * performing no useful work such as polling for a device to be
2001 * ready. Of course, aging is only employed when higher and lower
2002 * priority threads are runnable. The highest priority runnable
2003 * thread(s) are never skipped. */
2004 if (priority
<= max
||
2005 (diff
= priority
- max
, ++thread
->skip_count
> diff
*diff
))
2007 cores
[core
].running
= thread
;
2011 thread
= thread
->l
.next
;
2014 /* Without priority use a simple FCFS algorithm */
2017 /* Not switching on a block, select next thread */
2018 thread
= thread
->l
.next
;
2019 cores
[core
].running
= thread
;
2021 #endif /* HAVE_PRIORITY_SCHEDULING */
2029 /* And finally give control to the next thread. */
2030 load_context(&thread
->context
);
2033 profile_thread_started(thread
- threads
);
2037 /*---------------------------------------------------------------------------
2038 * Sleeps a thread for at least a specified number of ticks with zero being
2039 * a wait until the next tick.
2041 * INTERNAL: Intended for use by kernel and not for programs.
2042 *---------------------------------------------------------------------------
2044 void sleep_thread(int ticks
)
2046 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2048 LOCK_THREAD(current
);
2050 /* Set our timeout, remove from run list and join timeout list. */
2051 current
->tmo_tick
= current_tick
+ ticks
+ 1;
2052 block_thread_on_l(current
, STATE_SLEEPING
);
2054 UNLOCK_THREAD(current
);
2057 /*---------------------------------------------------------------------------
2058 * Indefinitely block a thread on a blocking queue for explicit wakeup.
2060 * INTERNAL: Intended for use by kernel objects and not for programs.
2061 *---------------------------------------------------------------------------
2063 void block_thread(struct thread_entry
*current
)
2065 /* Set the state to blocked and take us off of the run queue until we
2066 * are explicitly woken */
2067 LOCK_THREAD(current
);
2069 /* Set the list for explicit wakeup */
2070 block_thread_on_l(current
, STATE_BLOCKED
);
2072 #ifdef HAVE_PRIORITY_SCHEDULING
2073 if (current
->blocker
!= NULL
)
2075 /* Object supports PIP */
2076 current
= blocker_inherit_priority(current
);
2080 UNLOCK_THREAD(current
);
2083 /*---------------------------------------------------------------------------
2084 * Block a thread on a blocking queue for a specified time interval or until
2085 * explicitly woken - whichever happens first.
2087 * INTERNAL: Intended for use by kernel objects and not for programs.
2088 *---------------------------------------------------------------------------
2090 void block_thread_w_tmo(struct thread_entry
*current
, int timeout
)
2092 /* Get the entry for the current running thread. */
2093 LOCK_THREAD(current
);
2095 /* Set the state to blocked with the specified timeout */
2096 current
->tmo_tick
= current_tick
+ timeout
;
2098 /* Set the list for explicit wakeup */
2099 block_thread_on_l(current
, STATE_BLOCKED_W_TMO
);
2101 #ifdef HAVE_PRIORITY_SCHEDULING
2102 if (current
->blocker
!= NULL
)
2104 /* Object supports PIP */
2105 current
= blocker_inherit_priority(current
);
2109 UNLOCK_THREAD(current
);
2112 /*---------------------------------------------------------------------------
2113 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
2114 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
2116 * This code should be considered a critical section by the caller meaning
2117 * that the object's corelock should be held.
2119 * INTERNAL: Intended for use by kernel objects and not for programs.
2120 *---------------------------------------------------------------------------
2122 unsigned int wakeup_thread(struct thread_entry
**list
)
2124 struct thread_entry
*thread
= *list
;
2125 unsigned int result
= THREAD_NONE
;
2127 /* Check if there is a blocked thread at all. */
2131 LOCK_THREAD(thread
);
2133 /* Determine thread's current state. */
2134 switch (thread
->state
)
2137 case STATE_BLOCKED_W_TMO
:
2138 remove_from_list_l(list
, thread
);
2142 #ifdef HAVE_PRIORITY_SCHEDULING
2143 struct thread_entry
*current
;
2144 struct blocker
*bl
= thread
->blocker
;
2148 /* No inheritance - just boost the thread by aging */
2149 thread
->skip_count
= thread
->priority
;
2150 current
= cores
[CURRENT_CORE
].running
;
2154 /* Call the specified unblocking PIP */
2155 current
= bl
->wakeup_protocol(thread
);
2158 if (current
!= NULL
&& thread
->priority
< current
->priority
2159 IF_COP( && thread
->core
== current
->core
))
2161 /* Woken thread is higher priority and exists on the same CPU core;
2162 * recommend a task switch. Knowing if this is an interrupt call
2163 * would be helpful here. */
2164 result
|= THREAD_SWITCH
;
2166 #endif /* HAVE_PRIORITY_SCHEDULING */
2168 core_schedule_wakeup(thread
);
2171 /* Nothing to do. State is not blocked. */
2172 #if THREAD_EXTRA_CHECKS
2174 THREAD_PANICF("wakeup_thread->block invalid", thread
);
2181 UNLOCK_THREAD(thread
);
2185 /*---------------------------------------------------------------------------
2186 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
2187 * from each operation or THREAD_NONE of nothing was awakened. Object owning
2188 * the queue must be locked first.
2190 * INTERNAL: Intended for use by kernel objects and not for programs.
2191 *---------------------------------------------------------------------------
2193 unsigned int thread_queue_wake(struct thread_entry
**list
)
2195 unsigned result
= THREAD_NONE
;
2199 unsigned int rc
= wakeup_thread(list
);
2201 if (rc
== THREAD_NONE
)
2202 break; /* No more threads */
2210 /*---------------------------------------------------------------------------
2211 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
2212 * will be locked on multicore.
2213 *---------------------------------------------------------------------------
2215 static struct thread_entry
* find_empty_thread_slot(void)
2217 /* Any slot could be on an interrupt-accessible list */
2218 IF_COP( int oldlevel
= disable_irq_save(); )
2219 struct thread_entry
*thread
= NULL
;
2222 for (n
= 0; n
< MAXTHREADS
; n
++)
2224 /* Obtain current slot state - lock it on multicore */
2225 struct thread_entry
*t
= &threads
[n
];
2228 if (t
->state
== STATE_KILLED
IF_COP( && t
->name
!= THREAD_DESTRUCT
))
2230 /* Slot is empty - leave it locked and caller will unlock */
2235 /* Finished examining slot - no longer busy - unlock on multicore */
2239 IF_COP( restore_irq(oldlevel
); ) /* Reenable interrups - this slot is
2240 not accesible to them yet */
2245 /*---------------------------------------------------------------------------
2246 * Place the current core in idle mode - woken up on interrupt or wake
2247 * request from another core.
2248 *---------------------------------------------------------------------------
2250 void core_idle(void)
2252 IF_COP( const unsigned int core
= CURRENT_CORE
; )
2254 core_sleep(IF_COP(core
));
2257 /*---------------------------------------------------------------------------
2258 * Create a thread. If using a dual core architecture, specify which core to
2259 * start the thread on.
2261 * Return ID if context area could be allocated, else NULL.
2262 *---------------------------------------------------------------------------
2264 struct thread_entry
*
2265 create_thread(void (*function
)(void), void* stack
, size_t stack_size
,
2266 unsigned flags
, const char *name
2267 IF_PRIO(, int priority
)
2268 IF_COP(, unsigned int core
))
2271 unsigned int stack_words
;
2272 uintptr_t stackptr
, stackend
;
2273 struct thread_entry
*thread
;
2277 thread
= find_empty_thread_slot();
2283 oldlevel
= disable_irq_save();
2285 /* Munge the stack to make it easy to spot stack overflows */
2286 stackptr
= ALIGN_UP((uintptr_t)stack
, sizeof (uintptr_t));
2287 stackend
= ALIGN_DOWN((uintptr_t)stack
+ stack_size
, sizeof (uintptr_t));
2288 stack_size
= stackend
- stackptr
;
2289 stack_words
= stack_size
/ sizeof (uintptr_t);
2291 for (i
= 0; i
< stack_words
; i
++)
2293 ((uintptr_t *)stackptr
)[i
] = DEADBEEF
;
2296 /* Store interesting information */
2297 thread
->name
= name
;
2298 thread
->stack
= (uintptr_t *)stackptr
;
2299 thread
->stack_size
= stack_size
;
2300 thread
->queue
= NULL
;
2301 #ifdef HAVE_WAKEUP_EXT_CB
2302 thread
->wakeup_ext_cb
= NULL
;
2304 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2305 thread
->cpu_boost
= 0;
2307 #ifdef HAVE_PRIORITY_SCHEDULING
2308 memset(&thread
->pdist
, 0, sizeof(thread
->pdist
));
2309 thread
->blocker
= NULL
;
2310 thread
->base_priority
= priority
;
2311 thread
->priority
= priority
;
2312 thread
->skip_count
= priority
;
2313 prio_add_entry(&thread
->pdist
, priority
);
2317 thread
->core
= core
;
2319 /* Writeback stack munging or anything else before starting */
2320 if (core
!= CURRENT_CORE
)
2326 /* Thread is not on any timeout list but be a bit paranoid */
2327 thread
->tmo
.prev
= NULL
;
2329 state
= (flags
& CREATE_THREAD_FROZEN
) ?
2330 STATE_FROZEN
: STATE_RUNNING
;
2332 thread
->context
.sp
= (typeof (thread
->context
.sp
))stackend
;
2334 /* Load the thread's context structure with needed startup information */
2335 THREAD_STARTUP_INIT(core
, thread
, function
);
2337 thread
->state
= state
;
2339 if (state
== STATE_RUNNING
)
2340 core_schedule_wakeup(thread
);
2342 UNLOCK_THREAD(thread
);
2344 restore_irq(oldlevel
);
2349 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2350 /*---------------------------------------------------------------------------
2351 * Change the boost state of a thread boosting or unboosting the CPU
2353 *---------------------------------------------------------------------------
2355 static inline void boost_thread(struct thread_entry
*thread
, bool boost
)
2357 if ((thread
->cpu_boost
!= 0) != boost
)
2359 thread
->cpu_boost
= boost
;
2364 void trigger_cpu_boost(void)
2366 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2367 boost_thread(current
, true);
2370 void cancel_cpu_boost(void)
2372 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2373 boost_thread(current
, false);
2375 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
2377 /*---------------------------------------------------------------------------
2378 * Block the current thread until another thread terminates. A thread may
2379 * wait on itself to terminate which prevents it from running again and it
2380 * will need to be killed externally.
2381 * Parameter is the ID as returned from create_thread().
2382 *---------------------------------------------------------------------------
2384 void thread_wait(struct thread_entry
*thread
)
2386 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2391 /* Lock thread-as-waitable-object lock */
2392 corelock_lock(&thread
->waiter_cl
);
2394 /* Be sure it hasn't been killed yet */
2395 if (thread
->state
!= STATE_KILLED
)
2397 IF_COP( current
->obj_cl
= &thread
->waiter_cl
; )
2398 current
->bqp
= &thread
->queue
;
2401 block_thread(current
);
2403 corelock_unlock(&thread
->waiter_cl
);
2409 corelock_unlock(&thread
->waiter_cl
);
2412 /*---------------------------------------------------------------------------
2413 * Exit the current thread. The Right Way to Do Things (TM).
2414 *---------------------------------------------------------------------------
2416 void thread_exit(void)
2418 const unsigned int core
= CURRENT_CORE
;
2419 struct thread_entry
*current
= cores
[core
].running
;
2421 /* Cancel CPU boost if any */
2426 corelock_lock(¤t
->waiter_cl
);
2427 LOCK_THREAD(current
);
2429 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
2430 if (current
->name
== THREAD_DESTRUCT
)
2432 /* Thread being killed - become a waiter */
2433 UNLOCK_THREAD(current
);
2434 corelock_unlock(¤t
->waiter_cl
);
2435 thread_wait(current
);
2436 THREAD_PANICF("thread_exit->WK:*R", current
);
2440 #ifdef HAVE_PRIORITY_SCHEDULING
2441 check_for_obj_waiters("thread_exit", current
);
2444 if (current
->tmo
.prev
!= NULL
)
2446 /* Cancel pending timeout list removal */
2447 remove_from_list_tmo(current
);
2450 /* Switch tasks and never return */
2451 block_thread_on_l(current
, STATE_KILLED
);
2454 /* Switch to the idle stack if not on the main core (where "main"
2455 * runs) - we can hope gcc doesn't need the old stack beyond this
2459 switch_to_idle_stack(core
);
2464 current
->name
= NULL
;
2466 /* Signal this thread */
2467 thread_queue_wake(¤t
->queue
);
2468 corelock_unlock(¤t
->waiter_cl
);
2469 /* Slot must be unusable until thread is really gone */
2470 UNLOCK_THREAD_AT_TASK_SWITCH(current
);
2472 /* This should never and must never be reached - if it is, the
2473 * state is corrupted */
2474 THREAD_PANICF("thread_exit->K:*R", current
);
2477 #ifdef ALLOW_REMOVE_THREAD
2478 /*---------------------------------------------------------------------------
2479 * Remove a thread from the scheduler. Not The Right Way to Do Things in
2482 * Parameter is the ID as returned from create_thread().
2484 * Use with care on threads that are not under careful control as this may
2485 * leave various objects in an undefined state.
2486 *---------------------------------------------------------------------------
2488 void remove_thread(struct thread_entry
*thread
)
2491 /* core is not constant here because of core switching */
2492 unsigned int core
= CURRENT_CORE
;
2493 unsigned int old_core
= NUM_CORES
;
2494 struct corelock
*ocl
= NULL
;
2496 const unsigned int core
= CURRENT_CORE
;
2498 struct thread_entry
*current
= cores
[core
].running
;
2506 if (thread
== current
)
2507 thread_exit(); /* Current thread - do normal exit */
2509 oldlevel
= disable_irq_save();
2511 corelock_lock(&thread
->waiter_cl
);
2512 LOCK_THREAD(thread
);
2514 state
= thread
->state
;
2516 if (state
== STATE_KILLED
)
2522 if (thread
->name
== THREAD_DESTRUCT
)
2524 /* Thread being killed - become a waiter */
2525 UNLOCK_THREAD(thread
);
2526 corelock_unlock(&thread
->waiter_cl
);
2527 restore_irq(oldlevel
);
2528 thread_wait(thread
);
2532 thread
->name
= THREAD_DESTRUCT
; /* Slot can't be used for now */
2534 #ifdef HAVE_PRIORITY_SCHEDULING
2535 check_for_obj_waiters("remove_thread", thread
);
2538 if (thread
->core
!= core
)
2540 /* Switch cores and safely extract the thread there */
2541 /* Slot HAS to be unlocked or a deadlock could occur which means other
2542 * threads have to be guided into becoming thread waiters if they
2543 * attempt to remove it. */
2544 unsigned int new_core
= thread
->core
;
2546 corelock_unlock(&thread
->waiter_cl
);
2548 UNLOCK_THREAD(thread
);
2549 restore_irq(oldlevel
);
2551 old_core
= switch_core(new_core
);
2553 oldlevel
= disable_irq_save();
2555 corelock_lock(&thread
->waiter_cl
);
2556 LOCK_THREAD(thread
);
2558 state
= thread
->state
;
2560 /* Perform the extraction and switch ourselves back to the original
2563 #endif /* NUM_CORES > 1 */
2565 if (thread
->tmo
.prev
!= NULL
)
2567 /* Clean thread off the timeout list if a timeout check hasn't
2569 remove_from_list_tmo(thread
);
2572 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2573 /* Cancel CPU boost if any */
2574 boost_thread(thread
, false);
2577 IF_COP( retry_state
: )
2583 /* Remove thread from ready to run tasks */
2584 remove_from_list_l(&cores
[core
].running
, thread
);
2585 rtr_subtract_entry(core
, thread
->priority
);
2589 case STATE_BLOCKED_W_TMO
:
2590 /* Remove thread from the queue it's blocked on - including its
2591 * own if waiting there */
2593 if (&thread
->waiter_cl
!= thread
->obj_cl
)
2595 ocl
= thread
->obj_cl
;
2597 if (corelock_try_lock(ocl
) == 0)
2599 UNLOCK_THREAD(thread
);
2601 LOCK_THREAD(thread
);
2603 if (thread
->state
!= state
)
2605 /* Something woke the thread */
2606 state
= thread
->state
;
2607 corelock_unlock(ocl
);
2613 remove_from_list_l(thread
->bqp
, thread
);
2615 #ifdef HAVE_WAKEUP_EXT_CB
2616 if (thread
->wakeup_ext_cb
!= NULL
)
2617 thread
->wakeup_ext_cb(thread
);
2620 #ifdef HAVE_PRIORITY_SCHEDULING
2621 if (thread
->blocker
!= NULL
)
2623 /* Remove thread's priority influence from its chain */
2624 wakeup_priority_protocol_release(thread
);
2630 corelock_unlock(ocl
);
2633 /* Otherwise thread is frozen and hasn't run yet */
2636 thread
->state
= STATE_KILLED
;
2638 /* If thread was waiting on itself, it will have been removed above.
2639 * The wrong order would result in waking the thread first and deadlocking
2640 * since the slot is already locked. */
2641 thread_queue_wake(&thread
->queue
);
2643 thread
->name
= NULL
;
2645 thread_killed
: /* Thread was already killed */
2646 /* Removal complete - safe to unlock and reenable interrupts */
2647 corelock_unlock(&thread
->waiter_cl
);
2648 UNLOCK_THREAD(thread
);
2649 restore_irq(oldlevel
);
2652 if (old_core
< NUM_CORES
)
2654 /* Did a removal on another processor's thread - switch back to
2656 switch_core(old_core
);
2660 #endif /* ALLOW_REMOVE_THREAD */
2662 #ifdef HAVE_PRIORITY_SCHEDULING
2663 /*---------------------------------------------------------------------------
2664 * Sets the thread's relative base priority for the core it runs on. Any
2665 * needed inheritance changes also may happen.
2666 *---------------------------------------------------------------------------
2668 int thread_set_priority(struct thread_entry
*thread
, int priority
)
2670 int old_base_priority
= -1;
2672 /* A little safety measure */
2673 if (priority
< HIGHEST_PRIORITY
|| priority
> LOWEST_PRIORITY
)
2677 thread
= cores
[CURRENT_CORE
].running
;
2679 /* Thread could be on any list and therefore on an interrupt accessible
2680 one - disable interrupts */
2681 int oldlevel
= disable_irq_save();
2683 LOCK_THREAD(thread
);
2685 /* Make sure it's not killed */
2686 if (thread
->state
!= STATE_KILLED
)
2688 int old_priority
= thread
->priority
;
2690 old_base_priority
= thread
->base_priority
;
2691 thread
->base_priority
= priority
;
2693 prio_move_entry(&thread
->pdist
, old_base_priority
, priority
);
2694 priority
= find_first_set_bit(thread
->pdist
.mask
);
2696 if (old_priority
== priority
)
2698 /* No priority change - do nothing */
2700 else if (thread
->state
== STATE_RUNNING
)
2702 /* This thread is running - change location on the run
2703 * queue. No transitive inheritance needed. */
2704 set_running_thread_priority(thread
, priority
);
2708 thread
->priority
= priority
;
2710 if (thread
->blocker
!= NULL
)
2712 /* Bubble new priority down the chain */
2713 struct blocker
*bl
= thread
->blocker
; /* Blocker struct */
2714 struct thread_entry
*bl_t
= bl
->thread
; /* Blocking thread */
2715 struct thread_entry
* const tstart
= thread
; /* Initial thread */
2716 const int highest
= MIN(priority
, old_priority
); /* Higher of new or old */
2720 struct thread_entry
*next
; /* Next thread to check */
2721 int bl_pr
; /* Highest blocked thread */
2722 int queue_pr
; /* New highest blocked thread */
2724 /* Owner can change but thread cannot be dislodged - thread
2725 * may not be the first in the queue which allows other
2726 * threads ahead in the list to be given ownership during the
2727 * operation. If thread is next then the waker will have to
2728 * wait for us and the owner of the object will remain fixed.
2729 * If we successfully grab the owner -- which at some point
2730 * is guaranteed -- then the queue remains fixed until we
2736 /* Double-check the owner - retry if it changed */
2737 if (bl
->thread
== bl_t
)
2740 UNLOCK_THREAD(bl_t
);
2744 bl_pr
= bl
->priority
;
2746 if (highest
> bl_pr
)
2747 break; /* Object priority won't change */
2749 /* This will include the thread being set */
2750 queue_pr
= find_highest_priority_in_list_l(*thread
->bqp
);
2752 if (queue_pr
== bl_pr
)
2753 break; /* Object priority not changing */
2755 /* Update thread boost for this object */
2756 bl
->priority
= queue_pr
;
2757 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
2758 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
2760 if (bl_t
->priority
== bl_pr
)
2761 break; /* Blocking thread priority not changing */
2763 if (bl_t
->state
== STATE_RUNNING
)
2765 /* Thread not blocked - we're done */
2766 set_running_thread_priority(bl_t
, bl_pr
);
2770 bl_t
->priority
= bl_pr
;
2771 bl
= bl_t
->blocker
; /* Blocking thread has a blocker? */
2774 break; /* End of chain */
2779 break; /* Full-circle */
2781 UNLOCK_THREAD(thread
);
2787 UNLOCK_THREAD(bl_t
);
2792 UNLOCK_THREAD(thread
);
2794 restore_irq(oldlevel
);
2796 return old_base_priority
;
2799 /*---------------------------------------------------------------------------
2800 * Returns the current base priority for a thread.
2801 *---------------------------------------------------------------------------
2803 int thread_get_priority(struct thread_entry
*thread
)
2805 /* Simple, quick probe. */
2807 thread
= cores
[CURRENT_CORE
].running
;
2809 return thread
->base_priority
;
2811 #endif /* HAVE_PRIORITY_SCHEDULING */
2813 /*---------------------------------------------------------------------------
2814 * Starts a frozen thread - similar semantics to wakeup_thread except that
2815 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2816 * virtue of the slot having a state of STATE_FROZEN.
2817 *---------------------------------------------------------------------------
2819 void thread_thaw(struct thread_entry
*thread
)
2821 int oldlevel
= disable_irq_save();
2822 LOCK_THREAD(thread
);
2824 if (thread
->state
== STATE_FROZEN
)
2825 core_schedule_wakeup(thread
);
2827 UNLOCK_THREAD(thread
);
2828 restore_irq(oldlevel
);
2831 /*---------------------------------------------------------------------------
2832 * Return the ID of the currently executing thread.
2833 *---------------------------------------------------------------------------
2835 struct thread_entry
* thread_get_current(void)
2837 return cores
[CURRENT_CORE
].running
;
2841 /*---------------------------------------------------------------------------
2842 * Switch the processor that the currently executing thread runs on.
2843 *---------------------------------------------------------------------------
2845 unsigned int switch_core(unsigned int new_core
)
2847 const unsigned int core
= CURRENT_CORE
;
2848 struct thread_entry
*current
= cores
[core
].running
;
2850 if (core
== new_core
)
2852 /* No change - just return same core */
2856 int oldlevel
= disable_irq_save();
2857 LOCK_THREAD(current
);
2859 if (current
->name
== THREAD_DESTRUCT
)
2861 /* Thread being killed - deactivate and let process complete */
2862 UNLOCK_THREAD(current
);
2863 restore_irq(oldlevel
);
2864 thread_wait(current
);
2865 /* Should never be reached */
2866 THREAD_PANICF("switch_core->D:*R", current
);
2869 /* Get us off the running list for the current core */
2871 remove_from_list_l(&cores
[core
].running
, current
);
2872 rtr_subtract_entry(core
, current
->priority
);
2875 /* Stash return value (old core) in a safe place */
2876 current
->retval
= core
;
2878 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2879 * the other core will likely attempt a removal from the wrong list! */
2880 if (current
->tmo
.prev
!= NULL
)
2882 remove_from_list_tmo(current
);
2885 /* Change the core number for this thread slot */
2886 current
->core
= new_core
;
2888 /* Do not use core_schedule_wakeup here since this will result in
2889 * the thread starting to run on the other core before being finished on
2890 * this one. Delay the list unlock to keep the other core stuck
2891 * until this thread is ready. */
2894 rtr_add_entry(new_core
, current
->priority
);
2895 add_to_list_l(&cores
[new_core
].running
, current
);
2897 /* Make a callback into device-specific code, unlock the wakeup list so
2898 * that execution may resume on the new core, unlock our slot and finally
2899 * restore the interrupt level */
2900 cores
[core
].blk_ops
.flags
= TBOP_SWITCH_CORE
;
2901 cores
[core
].blk_ops
.cl_p
= &cores
[new_core
].rtr_cl
;
2902 cores
[core
].block_task
= current
;
2904 UNLOCK_THREAD(current
);
2906 /* Alert other core to activity */
2907 core_wake(new_core
);
2909 /* Do the stack switching, cache_maintenence and switch_thread call -
2910 requires native code */
2911 switch_thread_core(core
, current
);
2913 /* Finally return the old core to caller */
2914 return current
->retval
;
2916 #endif /* NUM_CORES > 1 */
2918 /*---------------------------------------------------------------------------
2919 * Initialize threading API. This assumes interrupts are not yet enabled. On
2920 * multicore setups, no core is allowed to proceed until create_thread calls
2921 * are safe to perform.
2922 *---------------------------------------------------------------------------
2924 void init_threads(void)
2926 const unsigned int core
= CURRENT_CORE
;
2927 struct thread_entry
*thread
;
2929 /* CPU will initialize first and then sleep */
2930 thread
= find_empty_thread_slot();
2934 /* WTF? There really must be a slot available at this stage.
2935 * This can fail if, for example, .bss isn't zero'ed out by the loader
2936 * or threads is in the wrong section. */
2937 THREAD_PANICF("init_threads->no slot", NULL
);
2940 /* Initialize initially non-zero members of core */
2941 cores
[core
].next_tmo_check
= current_tick
; /* Something not in the past */
2943 /* Initialize initially non-zero members of slot */
2944 UNLOCK_THREAD(thread
); /* No sync worries yet */
2945 thread
->name
= main_thread_name
;
2946 thread
->state
= STATE_RUNNING
;
2947 IF_COP( thread
->core
= core
; )
2948 #ifdef HAVE_PRIORITY_SCHEDULING
2949 corelock_init(&cores
[core
].rtr_cl
);
2950 thread
->base_priority
= PRIORITY_USER_INTERFACE
;
2951 prio_add_entry(&thread
->pdist
, PRIORITY_USER_INTERFACE
);
2952 thread
->priority
= PRIORITY_USER_INTERFACE
;
2953 rtr_add_entry(core
, PRIORITY_USER_INTERFACE
);
2955 corelock_init(&thread
->waiter_cl
);
2956 corelock_init(&thread
->slot_cl
);
2958 add_to_list_l(&cores
[core
].running
, thread
);
2962 thread
->stack
= stackbegin
;
2963 thread
->stack_size
= (uintptr_t)stackend
- (uintptr_t)stackbegin
;
2964 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2965 /* Wait for other processors to finish their inits since create_thread
2966 * isn't safe to call until the kernel inits are done. The first
2967 * threads created in the system must of course be created by CPU. */
2968 core_thread_init(CPU
);
2972 /* Initial stack is the idle stack */
2973 thread
->stack
= idle_stacks
[core
];
2974 thread
->stack_size
= IDLE_STACK_SIZE
;
2975 /* After last processor completes, it should signal all others to
2976 * proceed or may signal the next and call thread_exit(). The last one
2977 * to finish will signal CPU. */
2978 core_thread_init(core
);
2979 /* Other cores do not have a main thread - go idle inside switch_thread
2980 * until a thread can run on the core. */
2982 #endif /* NUM_CORES */
2986 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2988 static inline int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2990 static int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2993 unsigned int stack_words
= stack_size
/ sizeof (uintptr_t);
2997 for (i
= 0; i
< stack_words
; i
++)
2999 if (stackptr
[i
] != DEADBEEF
)
3001 usage
= ((stack_words
- i
) * 100) / stack_words
;
3009 /*---------------------------------------------------------------------------
3010 * Returns the maximum percentage of stack a thread ever used while running.
3011 * NOTE: Some large buffer allocations that don't use enough the buffer to
3012 * overwrite stackptr[0] will not be seen.
3013 *---------------------------------------------------------------------------
3015 int thread_stack_usage(const struct thread_entry
*thread
)
3017 return stack_usage(thread
->stack
, thread
->stack_size
);
3021 /*---------------------------------------------------------------------------
3022 * Returns the maximum percentage of the core's idle stack ever used during
3024 *---------------------------------------------------------------------------
3026 int idle_stack_usage(unsigned int core
)
3028 return stack_usage(idle_stacks
[core
], IDLE_STACK_SIZE
);
3032 /*---------------------------------------------------------------------------
3033 * Fills in the buffer with the specified thread's name. If the name is NULL,
3034 * empty, or the thread is in destruct state a formatted ID is written
3036 *---------------------------------------------------------------------------
3038 void thread_get_name(char *buffer
, int size
,
3039 struct thread_entry
*thread
)
3048 /* Display thread name if one or ID if none */
3049 const char *name
= thread
->name
;
3050 const char *fmt
= "%s";
3051 if (name
== NULL
IF_COP(|| name
== THREAD_DESTRUCT
) || *name
== '\0')
3053 name
= (const char *)thread
;
3056 snprintf(buffer
, size
, fmt
, name
);