1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
33 /****************************************************************************
35 * See notes below on implementing processor-specific portions! *
36 ***************************************************************************/
38 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
40 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
42 #define THREAD_EXTRA_CHECKS 0
46 * General locking order to guarantee progress. Order must be observed but
47 * all stages are not nescessarily obligatory. Going from 1) to 3) is
51 * This is first because of the likelyhood of having an interrupt occur that
52 * also accesses one of the objects farther down the list. Any non-blocking
53 * synchronization done may already have a lock on something during normal
54 * execution and if an interrupt handler running on the same processor as
55 * the one that has the resource locked were to attempt to access the
56 * resource, the interrupt handler would wait forever waiting for an unlock
57 * that will never happen. There is no danger if the interrupt occurs on
58 * a different processor because the one that has the lock will eventually
59 * unlock and the other processor's handler may proceed at that time. Not
60 * nescessary when the resource in question is definitely not available to
64 * 1) May be needed beforehand if the kernel object allows dual-use such as
65 * event queues. The kernel object must have a scheme to protect itself from
66 * access by another processor and is responsible for serializing the calls
67 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
68 * other. Objects' queues are also protected here.
71 * This locks access to the thread's slot such that its state cannot be
72 * altered by another processor when a state change is in progress such as
73 * when it is in the process of going on a blocked list. An attempt to wake
74 * a thread while it is still blocking will likely desync its state with
75 * the other resources used for that state.
78 * These lists are specific to a particular processor core and are accessible
79 * by all processor cores and interrupt handlers. The running (rtr) list is
80 * the prime example where a thread may be added by any means.
83 /*---------------------------------------------------------------------------
84 * Processor specific: core_sleep/core_wake/misc. notes
87 * FIQ is not dealt with by the scheduler code and is simply restored if it
88 * must by masked for some reason - because threading modifies a register
89 * that FIQ may also modify and there's no way to accomplish it atomically.
90 * s3c2440 is such a case.
92 * Audio interrupts are generally treated at a higher priority than others
93 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
94 * are not in general safe. Special cases may be constructed on a per-
95 * source basis and blocking operations are not available.
97 * core_sleep procedure to implement for any CPU to ensure an asychronous
98 * wakup never results in requiring a wait until the next tick (up to
99 * 10000uS!). May require assembly and careful instruction ordering.
101 * 1) On multicore, stay awake if directed to do so by another. If so, goto
103 * 2) If processor requires, atomically reenable interrupts and perform step
105 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
106 * on Coldfire) goto step 5.
107 * 4) Enable interrupts.
110 * core_wake and multprocessor notes for sleep/wake coordination:
111 * If possible, to wake up another processor, the forcing of an interrupt on
112 * the woken core by the waker core is the easiest way to ensure a non-
113 * delayed wake and immediate execution of any woken threads. If that isn't
114 * available then some careful non-blocking synchonization is needed (as on
115 * PP targets at the moment).
116 *---------------------------------------------------------------------------
119 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
121 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
122 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
123 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
125 static const char main_thread_name
[] = "main";
126 extern uintptr_t stackbegin
[];
127 extern uintptr_t stackend
[];
129 static inline void core_sleep(IF_COP_VOID(unsigned int core
))
130 __attribute__((always_inline
));
132 void check_tmo_threads(void)
133 __attribute__((noinline
));
135 static inline void block_thread_on_l(struct thread_entry
*thread
, unsigned state
)
136 __attribute__((always_inline
));
138 static void add_to_list_tmo(struct thread_entry
*thread
)
139 __attribute__((noinline
));
141 static void core_schedule_wakeup(struct thread_entry
*thread
)
142 __attribute__((noinline
));
145 static inline void run_blocking_ops(
146 unsigned int core
, struct thread_entry
*thread
)
147 __attribute__((always_inline
));
150 static void thread_stkov(struct thread_entry
*thread
)
151 __attribute__((noinline
));
153 static inline void store_context(void* addr
)
154 __attribute__((always_inline
));
156 static inline void load_context(const void* addr
)
157 __attribute__((always_inline
));
159 void switch_thread(void)
160 __attribute__((noinline
));
162 /****************************************************************************
163 * Processor-specific section
166 #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
167 /* Support a special workaround object for large-sector disks */
168 #define IF_NO_SKIP_YIELD(...) __VA_ARGS__
170 #define IF_NO_SKIP_YIELD(...)
174 /*---------------------------------------------------------------------------
175 * Start the thread running and terminate it if it returns
176 *---------------------------------------------------------------------------
178 static void __attribute__((naked
,used
)) start_thread(void)
182 "ldr sp, [r0, #32] \n" /* Load initial sp */
183 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
184 "mov r1, #0 \n" /* Mark thread as running */
185 "str r1, [r0, #40] \n"
187 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
188 "mov lr, pc \n" /* This could be the first entry into */
189 "bx r0 \n" /* plugin or codec code for this core. */
191 "mov lr, pc \n" /* Call thread function */
193 ); /* No clobber list - new thread doesn't care */
195 //asm volatile (".ltorg"); /* Dump constant pool */
198 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
199 * slot, and thread function pointer in context.start. See load_context for
200 * what happens when thread is initially going to run. */
201 #define THREAD_STARTUP_INIT(core, thread, function) \
202 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
203 (thread)->context.r[1] = (uint32_t)start_thread, \
204 (thread)->context.start = (uint32_t)function; })
206 /*---------------------------------------------------------------------------
207 * Store non-volatile context.
208 *---------------------------------------------------------------------------
210 static inline void store_context(void* addr
)
213 "stmia %0, { r4-r11, sp, lr } \n"
218 /*---------------------------------------------------------------------------
219 * Load non-volatile context.
220 *---------------------------------------------------------------------------
222 static inline void load_context(const void* addr
)
225 "ldr r0, [%0, #40] \n" /* Load start pointer */
226 "cmp r0, #0 \n" /* Check for NULL */
227 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
228 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
229 : : "r" (addr
) : "r0" /* only! */
236 extern uintptr_t cpu_idlestackbegin
[];
237 extern uintptr_t cpu_idlestackend
[];
238 extern uintptr_t cop_idlestackbegin
[];
239 extern uintptr_t cop_idlestackend
[];
240 static uintptr_t * const idle_stacks
[NUM_CORES
] =
242 [CPU
] = cpu_idlestackbegin
,
243 [COP
] = cop_idlestackbegin
246 #if CONFIG_CPU == PP5002
247 /* Bytes to emulate the PP502x mailbox bits */
248 struct core_semaphores
250 volatile uint8_t intend_wake
; /* 00h */
251 volatile uint8_t stay_awake
; /* 01h */
252 volatile uint8_t intend_sleep
; /* 02h */
253 volatile uint8_t unused
; /* 03h */
256 static struct core_semaphores core_semaphores
[NUM_CORES
] IBSS_ATTR
;
257 #endif /* CONFIG_CPU == PP5002 */
259 #endif /* NUM_CORES */
261 #if CONFIG_CORELOCK == SW_CORELOCK
262 /* Software core locks using Peterson's mutual exclusion algorithm */
264 /*---------------------------------------------------------------------------
265 * Initialize the corelock structure.
266 *---------------------------------------------------------------------------
268 void corelock_init(struct corelock
*cl
)
270 memset(cl
, 0, sizeof (*cl
));
273 #if 1 /* Assembly locks to minimize overhead */
274 /*---------------------------------------------------------------------------
275 * Wait for the corelock to become free and acquire it when it does.
276 *---------------------------------------------------------------------------
278 void corelock_lock(struct corelock
*cl
) __attribute__((naked
));
279 void corelock_lock(struct corelock
*cl
)
281 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
283 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
285 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
286 "eor r2, r1, #0xff \n" /* r2 = othercore */
287 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
289 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
290 "cmp r3, #0 \n" /* yes? lock acquired */
292 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
294 "bxeq lr \n" /* yes? lock acquired */
295 "b 1b \n" /* keep trying */
296 : : "i"(&PROCESSOR_ID
)
301 /*---------------------------------------------------------------------------
302 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
303 *---------------------------------------------------------------------------
305 int corelock_try_lock(struct corelock
*cl
) __attribute__((naked
));
306 int corelock_try_lock(struct corelock
*cl
)
308 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
310 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
313 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
314 "eor r2, r1, #0xff \n" /* r2 = othercore */
315 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
316 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
317 "eors r0, r0, r2 \n" /* yes? lock acquired */
319 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
321 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
322 "bx lr \n" /* return result */
323 : : "i"(&PROCESSOR_ID
)
330 /*---------------------------------------------------------------------------
331 * Release ownership of the corelock
332 *---------------------------------------------------------------------------
334 void corelock_unlock(struct corelock
*cl
) __attribute__((naked
));
335 void corelock_unlock(struct corelock
*cl
)
338 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
340 "mov r2, #0 \n" /* cl->myl[core] = 0 */
341 "strb r2, [r0, r1, lsr #7] \n"
343 : : "i"(&PROCESSOR_ID
)
347 #else /* C versions for reference */
348 /*---------------------------------------------------------------------------
349 * Wait for the corelock to become free and aquire it when it does.
350 *---------------------------------------------------------------------------
352 void corelock_lock(struct corelock
*cl
)
354 const unsigned int core
= CURRENT_CORE
;
355 const unsigned int othercore
= 1 - core
;
357 cl
->myl
[core
] = core
;
358 cl
->turn
= othercore
;
362 if (cl
->myl
[othercore
] == 0 || cl
->turn
== core
)
367 /*---------------------------------------------------------------------------
368 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
369 *---------------------------------------------------------------------------
371 int corelock_try_lock(struct corelock
*cl
)
373 const unsigned int core
= CURRENT_CORE
;
374 const unsigned int othercore
= 1 - core
;
376 cl
->myl
[core
] = core
;
377 cl
->turn
= othercore
;
379 if (cl
->myl
[othercore
] == 0 || cl
->turn
== core
)
388 /*---------------------------------------------------------------------------
389 * Release ownership of the corelock
390 *---------------------------------------------------------------------------
392 void corelock_unlock(struct corelock
*cl
)
394 cl
->myl
[CURRENT_CORE
] = 0;
396 #endif /* ASM / C selection */
398 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
400 /*---------------------------------------------------------------------------
401 * Put core in a power-saving state if waking list wasn't repopulated and if
402 * no other core requested a wakeup for it to perform a task.
403 *---------------------------------------------------------------------------
407 static inline void core_sleep(void)
409 sleep_core(CURRENT_CORE
);
413 static inline void core_sleep(unsigned int core
)
417 "mov r0, #4 \n" /* r0 = 0x4 << core */
418 "mov r0, r0, lsl %[c] \n"
419 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
420 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
421 "tst r1, r0, lsl #2 \n"
422 "moveq r1, #0x80000000 \n" /* Then sleep */
423 "streq r1, [%[ctl], %[c], lsl #2] \n"
424 "moveq r1, #0 \n" /* Clear control reg */
425 "streq r1, [%[ctl], %[c], lsl #2] \n"
426 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
427 "str r1, [%[mbx], #8] \n"
428 "1: \n" /* Wait for wake procedure to finish */
429 "ldr r1, [%[mbx], #0] \n"
430 "tst r1, r0, lsr #2 \n"
433 : [ctl
]"r"(&CPU_CTL
), [mbx
]"r"(MBX_BASE
), [c
]"r"(core
)
435 #else /* C version for reference */
436 /* Signal intent to sleep */
437 MBX_MSG_SET
= 0x4 << core
;
439 /* Something waking or other processor intends to wake us? */
440 if ((MBX_MSG_STAT
& (0x10 << core
)) == 0)
446 /* Signal wake - clear wake flag */
447 MBX_MSG_CLR
= 0x14 << core
;
449 /* Wait for other processor to finish wake procedure */
450 while (MBX_MSG_STAT
& (0x1 << core
));
451 #endif /* ASM/C selection */
454 #endif /* NUM_CORES */
455 #elif CONFIG_CPU == PP5002
457 static inline void core_sleep(void)
459 sleep_core(CURRENT_CORE
);
463 /* PP5002 has no mailboxes - emulate using bytes */
464 static inline void core_sleep(unsigned int core
)
468 "mov r0, #1 \n" /* Signal intent to sleep */
469 "strb r0, [%[sem], #2] \n"
470 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
473 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
474 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
475 * that the correct alternative is executed. Don't change the order
476 * of the next 4 instructions! */
479 "strne r0, [%[ctl], %[c], lsl #2] \n"
480 "streq r0, [%[ctl], %[c], lsl #2] \n"
481 "nop \n" /* nop's needed because of pipeline */
485 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
486 "strb r0, [%[sem], #1] \n"
487 "strb r0, [%[sem], #2] \n"
488 "1: \n" /* Wait for wake procedure to finish */
489 "ldrb r0, [%[sem], #0] \n"
493 : [sem
]"r"(&core_semaphores
[core
]), [c
]"r"(core
),
497 #else /* C version for reference */
498 /* Signal intent to sleep */
499 core_semaphores
[core
].intend_sleep
= 1;
501 /* Something waking or other processor intends to wake us? */
502 if (core_semaphores
[core
].stay_awake
== 0)
507 /* Signal wake - clear wake flag */
508 core_semaphores
[core
].stay_awake
= 0;
509 core_semaphores
[core
].intend_sleep
= 0;
511 /* Wait for other processor to finish wake procedure */
512 while (core_semaphores
[core
].intend_wake
!= 0);
515 #endif /* ASM/C selection */
518 #endif /* NUM_CORES */
519 #endif /* PP CPU type */
521 /*---------------------------------------------------------------------------
522 * Wake another processor core that is sleeping or prevent it from doing so
523 * if it was already destined. FIQ, IRQ should be disabled before calling.
524 *---------------------------------------------------------------------------
527 /* Shared single-core build debugging version */
530 /* No wakey - core already wakey */
532 #elif defined (CPU_PP502x)
533 void core_wake(unsigned int othercore
)
536 /* avoid r0 since that contains othercore */
538 "mrs r3, cpsr \n" /* Disable IRQ */
539 "orr r1, r3, #0x80 \n"
541 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
542 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
543 "str r2, [%[mbx], #4] \n"
544 "1: \n" /* If it intends to sleep, let it first */
545 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
546 "eor r1, r1, #0xc \n"
547 "tst r1, r2, lsr #2 \n"
548 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
549 "tsteq r1, #0x80000000 \n"
550 "beq 1b \n" /* Wait for sleep or wake */
551 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
553 "strne r1, [%[ctl], %[oc], lsl #2] \n"
554 "mov r1, r2, lsr #4 \n"
555 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
556 "msr cpsr_c, r3 \n" /* Restore IRQ */
558 : [ctl
]"r"(&PROC_CTL(CPU
)), [mbx
]"r"(MBX_BASE
),
561 #else /* C version for reference */
562 /* Disable interrupts - avoid reentrancy from the tick */
563 int oldlevel
= disable_irq_save();
565 /* Signal intent to wake other processor - set stay awake */
566 MBX_MSG_SET
= 0x11 << othercore
;
568 /* If it intends to sleep, wait until it does or aborts */
569 while ((MBX_MSG_STAT
& (0x4 << othercore
)) != 0 &&
570 (PROC_CTL(othercore
) & PROC_SLEEP
) == 0);
572 /* If sleeping, wake it up */
573 if (PROC_CTL(othercore
) & PROC_SLEEP
)
574 PROC_CTL(othercore
) = 0;
576 /* Done with wake procedure */
577 MBX_MSG_CLR
= 0x1 << othercore
;
578 restore_irq(oldlevel
);
579 #endif /* ASM/C selection */
581 #elif CONFIG_CPU == PP5002
582 /* PP5002 has no mailboxes - emulate using bytes */
583 void core_wake(unsigned int othercore
)
586 /* avoid r0 since that contains othercore */
588 "mrs r3, cpsr \n" /* Disable IRQ */
589 "orr r1, r3, #0x80 \n"
591 "mov r1, #1 \n" /* Signal intent to wake other core */
592 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
593 "strh r1, [%[sem], #0] \n"
595 "1: \n" /* If it intends to sleep, let it first */
596 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
598 "ldr r1, [%[st]] \n" /* && not sleeping ? */
599 "tsteq r1, r2, lsr %[oc] \n"
600 "beq 1b \n" /* Wait for sleep or wake */
601 "tst r1, r2, lsr %[oc] \n"
602 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
604 "strne r1, [r2, %[oc], lsl #2] \n"
605 "mov r1, #0 \n" /* Done with wake procedure */
606 "strb r1, [%[sem], #0] \n"
607 "msr cpsr_c, r3 \n" /* Restore IRQ */
609 : [sem
]"r"(&core_semaphores
[othercore
]),
614 #else /* C version for reference */
615 /* Disable interrupts - avoid reentrancy from the tick */
616 int oldlevel
= disable_irq_save();
618 /* Signal intent to wake other processor - set stay awake */
619 core_semaphores
[othercore
].intend_wake
= 1;
620 core_semaphores
[othercore
].stay_awake
= 1;
622 /* If it intends to sleep, wait until it does or aborts */
623 while (core_semaphores
[othercore
].intend_sleep
!= 0 &&
624 (PROC_STAT
& PROC_SLEEPING(othercore
)) == 0);
626 /* If sleeping, wake it up */
627 if (PROC_STAT
& PROC_SLEEPING(othercore
))
628 wake_core(othercore
);
630 /* Done with wake procedure */
631 core_semaphores
[othercore
].intend_wake
= 0;
632 restore_irq(oldlevel
);
633 #endif /* ASM/C selection */
635 #endif /* CPU type */
638 /*---------------------------------------------------------------------------
639 * Switches to a stack that always resides in the Rockbox core.
641 * Needed when a thread suicides on a core other than the main CPU since the
642 * stack used when idling is the stack of the last thread to run. This stack
643 * may not reside in the core firmware in which case the core will continue
644 * to use a stack from an unloaded module until another thread runs on it.
645 *---------------------------------------------------------------------------
647 static inline void switch_to_idle_stack(const unsigned int core
)
650 "str sp, [%0] \n" /* save original stack pointer on idle stack */
651 "mov sp, %0 \n" /* switch stacks */
652 : : "r"(&idle_stacks
[core
][IDLE_STACK_WORDS
-1]));
656 /*---------------------------------------------------------------------------
657 * Perform core switch steps that need to take place inside switch_thread.
659 * These steps must take place while before changing the processor and after
660 * having entered switch_thread since switch_thread may not do a normal return
661 * because the stack being used for anything the compiler saved will not belong
662 * to the thread's destination core and it may have been recycled for other
663 * purposes by the time a normal context load has taken place. switch_thread
664 * will also clobber anything stashed in the thread's context or stored in the
665 * nonvolatile registers if it is saved there before the call since the
666 * compiler's order of operations cannot be known for certain.
668 static void core_switch_blk_op(unsigned int core
, struct thread_entry
*thread
)
670 /* Flush our data to ram */
672 /* Stash thread in r4 slot */
673 thread
->context
.r
[0] = (uint32_t)thread
;
674 /* Stash restart address in r5 slot */
675 thread
->context
.r
[1] = thread
->context
.start
;
676 /* Save sp in context.sp while still running on old core */
677 thread
->context
.sp
= idle_stacks
[core
][IDLE_STACK_WORDS
-1];
680 /*---------------------------------------------------------------------------
681 * Machine-specific helper function for switching the processor a thread is
682 * running on. Basically, the thread suicides on the departing core and is
683 * reborn on the destination. Were it not for gcc's ill-behavior regarding
684 * naked functions written in C where it actually clobbers non-volatile
685 * registers before the intended prologue code, this would all be much
686 * simpler. Generic setup is done in switch_core itself.
689 /*---------------------------------------------------------------------------
690 * This actually performs the core switch.
692 static void __attribute__((naked
))
693 switch_thread_core(unsigned int core
, struct thread_entry
*thread
)
695 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
696 * Stack access also isn't permitted until restoring the original stack and
699 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
700 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
701 "ldr r2, [r2, r0, lsl #2] \n"
702 "add r2, r2, %0*4 \n"
703 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
704 "mov sp, r2 \n" /* switch stacks */
705 "adr r2, 1f \n" /* r2 = new core restart address */
706 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
707 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
709 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
710 "mov r1, #0 \n" /* Clear start address */
711 "str r1, [r0, #40] \n"
712 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
715 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
716 ".ltorg \n" /* Dump constant pool */
717 : : "i"(IDLE_STACK_WORDS
)
719 (void)core
; (void)thread
;
722 /*---------------------------------------------------------------------------
723 * Do any device-specific inits for the threads and synchronize the kernel
725 *---------------------------------------------------------------------------
727 static void core_thread_init(unsigned int core
)
731 /* Wake up coprocessor and let it initialize kernel and threads */
736 /* Sleep until COP has finished */
741 /* Wake the CPU and return */
745 #endif /* NUM_CORES */
747 #elif CONFIG_CPU == S3C2440
749 /*---------------------------------------------------------------------------
750 * Put core in a power-saving state if waking list wasn't repopulated.
751 *---------------------------------------------------------------------------
753 static inline void core_sleep(void)
755 /* FIQ also changes the CLKCON register so FIQ must be disabled
756 when changing it here */
759 "orr r2, r0, #0x40 \n" /* Disable FIQ */
760 "bic r0, r0, #0x80 \n" /* Prepare IRQ enable */
762 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
763 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
765 "str r2, [r1, #0xc] \n"
766 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
767 "mov r2, #0 \n" /* wait for IDLE */
772 "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */
774 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
776 "str r2, [r1, #0xc] \n"
777 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
778 : : : "r0", "r1", "r2");
780 #elif defined(CPU_TCC77X)
781 static inline void core_sleep(void)
783 #warning TODO: Implement core_sleep
786 #elif defined(CPU_TCC780X)
787 static inline void core_sleep(void)
789 /* Single core only for now. Use the generic ARMv5 wait for IRQ */
792 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
797 #elif CONFIG_CPU == IMX31L
798 static inline void core_sleep(void)
802 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
807 #elif CONFIG_CPU == DM320
808 static inline void core_sleep(void)
812 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
818 static inline void core_sleep(void)
820 #warning core_sleep not implemented, battery life will be decreased
823 #endif /* CONFIG_CPU == */
825 #elif defined(CPU_COLDFIRE)
826 /*---------------------------------------------------------------------------
827 * Start the thread running and terminate it if it returns
828 *---------------------------------------------------------------------------
830 void start_thread(void); /* Provide C access to ASM label */
831 static void __attribute__((used
)) __start_thread(void)
833 /* a0=macsr, a1=context */
835 "start_thread: \n" /* Start here - no naked attribute */
836 "move.l %a0, %macsr \n" /* Set initial mac status reg */
837 "lea.l 48(%a1), %a1 \n"
838 "move.l (%a1)+, %sp \n" /* Set initial stack */
839 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
840 "clr.l (%a1) \n" /* Mark thread running */
841 "jsr (%a2) \n" /* Call thread function */
846 /* Set EMAC unit to fractional mode with saturation for each new thread,
847 * since that's what'll be the most useful for most things which the dsp
848 * will do. Codecs should still initialize their preferred modes
849 * explicitly. Context pointer is placed in d2 slot and start_thread
850 * pointer in d3 slot. thread function pointer is placed in context.start.
851 * See load_context for what happens when thread is initially going to
854 #define THREAD_STARTUP_INIT(core, thread, function) \
855 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
856 (thread)->context.d[0] = (uint32_t)&(thread)->context, \
857 (thread)->context.d[1] = (uint32_t)start_thread, \
858 (thread)->context.start = (uint32_t)(function); })
860 /*---------------------------------------------------------------------------
861 * Store non-volatile context.
862 *---------------------------------------------------------------------------
864 static inline void store_context(void* addr
)
867 "move.l %%macsr,%%d0 \n"
868 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
869 : : "a" (addr
) : "d0" /* only! */
873 /*---------------------------------------------------------------------------
874 * Load non-volatile context.
875 *---------------------------------------------------------------------------
877 static inline void load_context(const void* addr
)
880 "move.l 52(%0), %%d0 \n" /* Get start address */
881 "beq.b 1f \n" /* NULL -> already running */
882 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
883 "jmp (%%a2) \n" /* Start the thread */
885 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
886 "move.l %%d0, %%macsr \n"
887 : : "a" (addr
) : "d0" /* only! */
891 /*---------------------------------------------------------------------------
892 * Put core in a power-saving state if waking list wasn't repopulated.
893 *---------------------------------------------------------------------------
895 static inline void core_sleep(void)
897 /* Supervisor mode, interrupts enabled upon wakeup */
898 asm volatile ("stop #0x2000");
901 #elif CONFIG_CPU == SH7034
902 /*---------------------------------------------------------------------------
903 * Start the thread running and terminate it if it returns
904 *---------------------------------------------------------------------------
906 void start_thread(void); /* Provide C access to ASM label */
907 static void __attribute__((used
)) __start_thread(void)
911 "_start_thread: \n" /* Start here - no naked attribute */
912 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
913 "mov.l @(28, r8), r15 \n" /* Set initial sp */
914 "mov #0, r1 \n" /* Start the thread */
916 "mov.l r1, @(36, r8) \n" /* Clear start address */
921 /* Place context pointer in r8 slot, function pointer in r9 slot, and
922 * start_thread pointer in context_start */
923 #define THREAD_STARTUP_INIT(core, thread, function) \
924 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
925 (thread)->context.r[1] = (uint32_t)(function), \
926 (thread)->context.start = (uint32_t)start_thread; })
928 /*---------------------------------------------------------------------------
929 * Store non-volatile context.
930 *---------------------------------------------------------------------------
932 static inline void store_context(void* addr
)
935 "add #36, %0 \n" /* Start at last reg. By the time routine */
936 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
949 /*---------------------------------------------------------------------------
950 * Load non-volatile context.
951 *---------------------------------------------------------------------------
953 static inline void load_context(const void* addr
)
956 "mov.l @(36, %0), r0 \n" /* Get start address */
958 "bt .running \n" /* NULL -> already running */
959 "jmp @r0 \n" /* r8 = context */
961 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
970 : : "r" (addr
) : "r0" /* only! */
974 /*---------------------------------------------------------------------------
975 * Put core in a power-saving state.
976 *---------------------------------------------------------------------------
978 static inline void core_sleep(void)
981 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
982 "mov #0, r1 \n" /* Enable interrupts */
983 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
984 "sleep \n" /* Execute standby */
985 : : "z"(&SBYCR
-GBR
) : "r1");
988 #endif /* CONFIG_CPU == */
991 * End Processor-specific section
992 ***************************************************************************/
994 #if THREAD_EXTRA_CHECKS
995 static void thread_panicf(const char *msg
, struct thread_entry
*thread
)
997 IF_COP( const unsigned int core
= thread
->core
; )
998 static char name
[32];
999 thread_get_name(name
, 32, thread
);
1000 panicf ("%s %s" IF_COP(" (%d)"), msg
, name
IF_COP(, core
));
1002 static void thread_stkov(struct thread_entry
*thread
)
1004 thread_panicf("Stkov", thread
);
1006 #define THREAD_PANICF(msg, thread) \
1007 thread_panicf(msg, thread)
1008 #define THREAD_ASSERT(exp, msg, thread) \
1009 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
1011 static void thread_stkov(struct thread_entry
*thread
)
1013 IF_COP( const unsigned int core
= thread
->core
; )
1014 static char name
[32];
1015 thread_get_name(name
, 32, thread
);
1016 panicf("Stkov %s" IF_COP(" (%d)"), name
IF_COP(, core
));
1018 #define THREAD_PANICF(msg, thread)
1019 #define THREAD_ASSERT(exp, msg, thread)
1020 #endif /* THREAD_EXTRA_CHECKS */
1022 /* Thread locking */
1024 #define LOCK_THREAD(thread) \
1025 ({ corelock_lock(&(thread)->slot_cl); })
1026 #define TRY_LOCK_THREAD(thread) \
1027 ({ corelock_try_lock(&thread->slot_cl); })
1028 #define UNLOCK_THREAD(thread) \
1029 ({ corelock_unlock(&(thread)->slot_cl); })
1030 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1031 ({ unsigned int _core = (thread)->core; \
1032 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
1033 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
1035 #define LOCK_THREAD(thread) \
1037 #define TRY_LOCK_THREAD(thread) \
1039 #define UNLOCK_THREAD(thread) \
1041 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1046 #define RTR_LOCK(core) \
1047 ({ corelock_lock(&cores[core].rtr_cl); })
1048 #define RTR_UNLOCK(core) \
1049 ({ corelock_unlock(&cores[core].rtr_cl); })
1051 #ifdef HAVE_PRIORITY_SCHEDULING
1052 #define rtr_add_entry(core, priority) \
1053 prio_add_entry(&cores[core].rtr, (priority))
1055 #define rtr_subtract_entry(core, priority) \
1056 prio_subtract_entry(&cores[core].rtr, (priority))
1058 #define rtr_move_entry(core, from, to) \
1059 prio_move_entry(&cores[core].rtr, (from), (to))
1061 #define rtr_add_entry(core, priority)
1062 #define rtr_add_entry_inl(core, priority)
1063 #define rtr_subtract_entry(core, priority)
1064 #define rtr_subtract_entry_inl(core, priotity)
1065 #define rtr_move_entry(core, from, to)
1066 #define rtr_move_entry_inl(core, from, to)
1069 /*---------------------------------------------------------------------------
1070 * Thread list structure - circular:
1071 * +------------------------------+
1073 * +--+---+<-+---+<-+---+<-+---+<-+
1074 * Head->| T | | T | | T | | T |
1075 * +->+---+->+---+->+---+->+---+--+
1077 * +------------------------------+
1078 *---------------------------------------------------------------------------
1081 /*---------------------------------------------------------------------------
1082 * Adds a thread to a list of threads using "insert last". Uses the "l"
1084 *---------------------------------------------------------------------------
1086 static void add_to_list_l(struct thread_entry
**list
,
1087 struct thread_entry
*thread
)
1089 struct thread_entry
*l
= *list
;
1093 /* Insert into unoccupied list */
1094 thread
->l
.prev
= thread
;
1095 thread
->l
.next
= thread
;
1101 thread
->l
.prev
= l
->l
.prev
;
1103 l
->l
.prev
->l
.next
= thread
;
1107 /*---------------------------------------------------------------------------
1108 * Removes a thread from a list of threads. Uses the "l" links.
1109 *---------------------------------------------------------------------------
1111 static void remove_from_list_l(struct thread_entry
**list
,
1112 struct thread_entry
*thread
)
1114 struct thread_entry
*prev
, *next
;
1116 next
= thread
->l
.next
;
1125 if (thread
== *list
)
1127 /* List becomes next item */
1131 prev
= thread
->l
.prev
;
1133 /* Fix links to jump over the removed entry. */
1134 next
->l
.prev
= prev
;
1135 prev
->l
.next
= next
;
1138 /*---------------------------------------------------------------------------
1139 * Timeout list structure - circular reverse (to make "remove item" O(1)),
1140 * NULL-terminated forward (to ease the far more common forward traversal):
1141 * +------------------------------+
1143 * +--+---+<-+---+<-+---+<-+---+<-+
1144 * Head->| T | | T | | T | | T |
1145 * +---+->+---+->+---+->+---+-X
1146 *---------------------------------------------------------------------------
1149 /*---------------------------------------------------------------------------
1150 * Add a thread from the core's timout list by linking the pointers in its
1152 *---------------------------------------------------------------------------
1154 static void add_to_list_tmo(struct thread_entry
*thread
)
1156 struct thread_entry
*tmo
= cores
[IF_COP_CORE(thread
->core
)].timeout
;
1157 THREAD_ASSERT(thread
->tmo
.prev
== NULL
,
1158 "add_to_list_tmo->already listed", thread
);
1160 thread
->tmo
.next
= NULL
;
1164 /* Insert into unoccupied list */
1165 thread
->tmo
.prev
= thread
;
1166 cores
[IF_COP_CORE(thread
->core
)].timeout
= thread
;
1171 thread
->tmo
.prev
= tmo
->tmo
.prev
;
1172 tmo
->tmo
.prev
->tmo
.next
= thread
;
1173 tmo
->tmo
.prev
= thread
;
1176 /*---------------------------------------------------------------------------
1177 * Remove a thread from the core's timout list by unlinking the pointers in
1178 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1180 *---------------------------------------------------------------------------
1182 static void remove_from_list_tmo(struct thread_entry
*thread
)
1184 struct thread_entry
**list
= &cores
[IF_COP_CORE(thread
->core
)].timeout
;
1185 struct thread_entry
*prev
= thread
->tmo
.prev
;
1186 struct thread_entry
*next
= thread
->tmo
.next
;
1188 THREAD_ASSERT(prev
!= NULL
, "remove_from_list_tmo->not listed", thread
);
1191 next
->tmo
.prev
= prev
;
1193 if (thread
== *list
)
1195 /* List becomes next item and empty if next == NULL */
1197 /* Mark as unlisted */
1198 thread
->tmo
.prev
= NULL
;
1203 (*list
)->tmo
.prev
= prev
;
1204 prev
->tmo
.next
= next
;
1205 /* Mark as unlisted */
1206 thread
->tmo
.prev
= NULL
;
1211 #ifdef HAVE_PRIORITY_SCHEDULING
1212 /*---------------------------------------------------------------------------
1213 * Priority distribution structure (one category for each possible priority):
1215 * +----+----+----+ ... +-----+
1216 * hist: | F0 | F1 | F2 | | F31 |
1217 * +----+----+----+ ... +-----+
1218 * mask: | b0 | b1 | b2 | | b31 |
1219 * +----+----+----+ ... +-----+
1221 * F = count of threads at priority category n (frequency)
1222 * b = bitmask of non-zero priority categories (occupancy)
1224 * / if H[n] != 0 : 1
1228 *---------------------------------------------------------------------------
1229 * Basic priority inheritance priotocol (PIP):
1231 * Mn = mutex n, Tn = thread n
1233 * A lower priority thread inherits the priority of the highest priority
1234 * thread blocked waiting for it to complete an action (such as release a
1235 * mutex or respond to a message via queue_send):
1239 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
1240 * priority than T1 then T1 inherits the priority of T2.
1246 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
1247 * T1 inherits the higher of T2 and T3.
1249 * 3) T3->M2->T2->M1->T1
1251 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
1252 * then T1 inherits the priority of T3 through T2.
1254 * Blocking chains can grow arbitrarily complex (though it's best that they
1255 * not form at all very often :) and build-up from these units.
1256 *---------------------------------------------------------------------------
1259 /*---------------------------------------------------------------------------
1260 * Increment frequency at category "priority"
1261 *---------------------------------------------------------------------------
1263 static inline unsigned int prio_add_entry(
1264 struct priority_distribution
*pd
, int priority
)
1267 /* Enough size/instruction count difference for ARM makes it worth it to
1268 * use different code (192 bytes for ARM). Only thing better is ASM. */
1270 count
= pd
->hist
[priority
];
1272 pd
->mask
|= 1 << priority
;
1273 pd
->hist
[priority
] = count
;
1274 #else /* This one's better for Coldfire */
1275 if ((count
= ++pd
->hist
[priority
]) == 1)
1276 pd
->mask
|= 1 << priority
;
1282 /*---------------------------------------------------------------------------
1283 * Decrement frequency at category "priority"
1284 *---------------------------------------------------------------------------
1286 static inline unsigned int prio_subtract_entry(
1287 struct priority_distribution
*pd
, int priority
)
1292 count
= pd
->hist
[priority
];
1294 pd
->mask
&= ~(1 << priority
);
1295 pd
->hist
[priority
] = count
;
1297 if ((count
= --pd
->hist
[priority
]) == 0)
1298 pd
->mask
&= ~(1 << priority
);
1304 /*---------------------------------------------------------------------------
1305 * Remove from one category and add to another
1306 *---------------------------------------------------------------------------
1308 static inline void prio_move_entry(
1309 struct priority_distribution
*pd
, int from
, int to
)
1311 uint32_t mask
= pd
->mask
;
1316 count
= pd
->hist
[from
];
1318 mask
&= ~(1 << from
);
1319 pd
->hist
[from
] = count
;
1321 count
= pd
->hist
[to
];
1324 pd
->hist
[to
] = count
;
1326 if (--pd
->hist
[from
] == 0)
1327 mask
&= ~(1 << from
);
1329 if (++pd
->hist
[to
] == 1)
1336 /*---------------------------------------------------------------------------
1337 * Change the priority and rtr entry for a running thread
1338 *---------------------------------------------------------------------------
1340 static inline void set_running_thread_priority(
1341 struct thread_entry
*thread
, int priority
)
1343 const unsigned int core
= IF_COP_CORE(thread
->core
);
1345 rtr_move_entry(core
, thread
->priority
, priority
);
1346 thread
->priority
= priority
;
1350 /*---------------------------------------------------------------------------
1351 * Finds the highest priority thread in a list of threads. If the list is
1352 * empty, the PRIORITY_IDLE is returned.
1354 * It is possible to use the struct priority_distribution within an object
1355 * instead of scanning the remaining threads in the list but as a compromise,
1356 * the resulting per-object memory overhead is saved at a slight speed
1357 * penalty under high contention.
1358 *---------------------------------------------------------------------------
1360 static int find_highest_priority_in_list_l(
1361 struct thread_entry
* const thread
)
1365 /* Go though list until the ending up at the initial thread */
1366 int highest_priority
= thread
->priority
;
1367 struct thread_entry
*curr
= thread
;
1371 int priority
= curr
->priority
;
1373 if (priority
< highest_priority
)
1374 highest_priority
= priority
;
1376 curr
= curr
->l
.next
;
1378 while (curr
!= thread
);
1380 return highest_priority
;
1383 return PRIORITY_IDLE
;
1386 /*---------------------------------------------------------------------------
1387 * Register priority with blocking system and bubble it down the chain if
1388 * any until we reach the end or something is already equal or higher.
1390 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
1391 * targets but that same action also guarantees a circular block anyway and
1392 * those are prevented, right? :-)
1393 *---------------------------------------------------------------------------
1395 static struct thread_entry
*
1396 blocker_inherit_priority(struct thread_entry
*current
)
1398 const int priority
= current
->priority
;
1399 struct blocker
*bl
= current
->blocker
;
1400 struct thread_entry
* const tstart
= current
;
1401 struct thread_entry
*bl_t
= bl
->thread
;
1403 /* Blocker cannot change since the object protection is held */
1408 struct thread_entry
*next
;
1409 int bl_pr
= bl
->priority
;
1411 if (priority
>= bl_pr
)
1412 break; /* Object priority already high enough */
1414 bl
->priority
= priority
;
1417 prio_add_entry(&bl_t
->pdist
, priority
);
1419 if (bl_pr
< PRIORITY_IDLE
)
1421 /* Not first waiter - subtract old one */
1422 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
1425 if (priority
>= bl_t
->priority
)
1426 break; /* Thread priority high enough */
1428 if (bl_t
->state
== STATE_RUNNING
)
1430 /* Blocking thread is a running thread therefore there are no
1431 * further blockers. Change the "run queue" on which it
1433 set_running_thread_priority(bl_t
, priority
);
1437 bl_t
->priority
= priority
;
1439 /* If blocking thread has a blocker, apply transitive inheritance */
1443 break; /* End of chain or object doesn't support inheritance */
1448 break; /* Full-circle - deadlock! */
1450 UNLOCK_THREAD(current
);
1457 /* Blocker could change - retest condition */
1458 if (bl
->thread
== next
)
1461 UNLOCK_THREAD(next
);
1469 UNLOCK_THREAD(bl_t
);
1474 /*---------------------------------------------------------------------------
1475 * Readjust priorities when waking a thread blocked waiting for another
1476 * in essence "releasing" the thread's effect on the object owner. Can be
1477 * performed from any context.
1478 *---------------------------------------------------------------------------
1480 struct thread_entry
*
1481 wakeup_priority_protocol_release(struct thread_entry
*thread
)
1483 const int priority
= thread
->priority
;
1484 struct blocker
*bl
= thread
->blocker
;
1485 struct thread_entry
* const tstart
= thread
;
1486 struct thread_entry
*bl_t
= bl
->thread
;
1488 /* Blocker cannot change since object will be locked */
1491 thread
->blocker
= NULL
; /* Thread not blocked */
1495 struct thread_entry
*next
;
1496 int bl_pr
= bl
->priority
;
1498 if (priority
> bl_pr
)
1499 break; /* Object priority higher */
1501 next
= *thread
->bqp
;
1505 /* No more threads in queue */
1506 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
1507 bl
->priority
= PRIORITY_IDLE
;
1511 /* Check list for highest remaining priority */
1512 int queue_pr
= find_highest_priority_in_list_l(next
);
1514 if (queue_pr
== bl_pr
)
1515 break; /* Object priority not changing */
1517 /* Change queue priority */
1518 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
1519 bl
->priority
= queue_pr
;
1522 if (bl_pr
> bl_t
->priority
)
1523 break; /* thread priority is higher */
1525 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
1527 if (bl_pr
== bl_t
->priority
)
1528 break; /* Thread priority not changing */
1530 if (bl_t
->state
== STATE_RUNNING
)
1532 /* No further blockers */
1533 set_running_thread_priority(bl_t
, bl_pr
);
1537 bl_t
->priority
= bl_pr
;
1539 /* If blocking thread has a blocker, apply transitive inheritance */
1543 break; /* End of chain or object doesn't support inheritance */
1548 break; /* Full-circle - deadlock! */
1550 UNLOCK_THREAD(thread
);
1557 /* Blocker could change - retest condition */
1558 if (bl
->thread
== next
)
1561 UNLOCK_THREAD(next
);
1569 UNLOCK_THREAD(bl_t
);
1572 if (thread
!= tstart
)
1574 /* Relock original if it changed */
1575 LOCK_THREAD(tstart
);
1579 return cores
[CURRENT_CORE
].running
;
1582 /*---------------------------------------------------------------------------
1583 * Transfer ownership to a thread waiting for an objects and transfer
1584 * inherited priority boost from other waiters. This algorithm knows that
1585 * blocking chains may only unblock from the very end.
1587 * Only the owning thread itself may call this and so the assumption that
1588 * it is the running thread is made.
1589 *---------------------------------------------------------------------------
1591 struct thread_entry
*
1592 wakeup_priority_protocol_transfer(struct thread_entry
*thread
)
1594 /* Waking thread inherits priority boost from object owner */
1595 struct blocker
*bl
= thread
->blocker
;
1596 struct thread_entry
*bl_t
= bl
->thread
;
1597 struct thread_entry
*next
;
1600 THREAD_ASSERT(thread_get_current() == bl_t
,
1601 "UPPT->wrong thread", thread_get_current());
1605 bl_pr
= bl
->priority
;
1607 /* Remove the object's boost from the owning thread */
1608 if (prio_subtract_entry(&bl_t
->pdist
, bl_pr
) == 0 &&
1609 bl_pr
<= bl_t
->priority
)
1611 /* No more threads at this priority are waiting and the old level is
1612 * at least the thread level */
1613 int priority
= find_first_set_bit(bl_t
->pdist
.mask
);
1615 if (priority
!= bl_t
->priority
)
1617 /* Adjust this thread's priority */
1618 set_running_thread_priority(bl_t
, priority
);
1622 next
= *thread
->bqp
;
1626 /* Expected shortcut - no more waiters */
1627 bl_pr
= PRIORITY_IDLE
;
1631 if (thread
->priority
<= bl_pr
)
1633 /* Need to scan threads remaining in queue */
1634 bl_pr
= find_highest_priority_in_list_l(next
);
1637 if (prio_add_entry(&thread
->pdist
, bl_pr
) == 1 &&
1638 bl_pr
< thread
->priority
)
1640 /* Thread priority must be raised */
1641 thread
->priority
= bl_pr
;
1645 bl
->thread
= thread
; /* This thread pwns */
1646 bl
->priority
= bl_pr
; /* Save highest blocked priority */
1647 thread
->blocker
= NULL
; /* Thread not blocked */
1649 UNLOCK_THREAD(bl_t
);
1654 /*---------------------------------------------------------------------------
1655 * No threads must be blocked waiting for this thread except for it to exit.
1656 * The alternative is more elaborate cleanup and object registration code.
1657 * Check this for risk of silent data corruption when objects with
1658 * inheritable blocking are abandoned by the owner - not precise but may
1660 *---------------------------------------------------------------------------
1662 static void check_for_obj_waiters(const char *function
, struct thread_entry
*thread
)
1664 /* Only one bit in the mask should be set with a frequency on 1 which
1665 * represents the thread's own base priority */
1666 uint32_t mask
= thread
->pdist
.mask
;
1667 if ((mask
& (mask
- 1)) != 0 ||
1668 thread
->pdist
.hist
[find_first_set_bit(mask
)] > 1)
1670 unsigned char name
[32];
1671 thread_get_name(name
, 32, thread
);
1672 panicf("%s->%s with obj. waiters", function
, name
);
1675 #endif /* HAVE_PRIORITY_SCHEDULING */
1677 /*---------------------------------------------------------------------------
1678 * Move a thread back to a running state on its core.
1679 *---------------------------------------------------------------------------
1681 static void core_schedule_wakeup(struct thread_entry
*thread
)
1683 const unsigned int core
= IF_COP_CORE(thread
->core
);
1687 thread
->state
= STATE_RUNNING
;
1689 add_to_list_l(&cores
[core
].running
, thread
);
1690 rtr_add_entry(core
, thread
->priority
);
1695 if (core
!= CURRENT_CORE
)
1700 /*---------------------------------------------------------------------------
1701 * Check the core's timeout list when at least one thread is due to wake.
1702 * Filtering for the condition is done before making the call. Resets the
1703 * tick when the next check will occur.
1704 *---------------------------------------------------------------------------
1706 void check_tmo_threads(void)
1708 const unsigned int core
= CURRENT_CORE
;
1709 const long tick
= current_tick
; /* snapshot the current tick */
1710 long next_tmo_check
= tick
+ 60*HZ
; /* minimum duration: once/minute */
1711 struct thread_entry
*next
= cores
[core
].timeout
;
1713 /* If there are no processes waiting for a timeout, just keep the check
1714 tick from falling into the past. */
1716 /* Break the loop once we have walked through the list of all
1717 * sleeping processes or have removed them all. */
1718 while (next
!= NULL
)
1720 /* Check sleeping threads. Allow interrupts between checks. */
1723 struct thread_entry
*curr
= next
;
1725 next
= curr
->tmo
.next
;
1727 /* Lock thread slot against explicit wakeup */
1731 unsigned state
= curr
->state
;
1733 if (state
< TIMEOUT_STATE_FIRST
)
1735 /* Cleanup threads no longer on a timeout but still on the
1737 remove_from_list_tmo(curr
);
1739 else if (TIME_BEFORE(tick
, curr
->tmo_tick
))
1741 /* Timeout still pending - this will be the usual case */
1742 if (TIME_BEFORE(curr
->tmo_tick
, next_tmo_check
))
1744 /* Earliest timeout found so far - move the next check up
1746 next_tmo_check
= curr
->tmo_tick
;
1751 /* Sleep timeout has been reached so bring the thread back to
1753 if (state
== STATE_BLOCKED_W_TMO
)
1756 /* Lock the waiting thread's kernel object */
1757 struct corelock
*ocl
= curr
->obj_cl
;
1759 if (corelock_try_lock(ocl
) == 0)
1761 /* Need to retry in the correct order though the need is
1763 UNLOCK_THREAD(curr
);
1767 if (curr
->state
!= STATE_BLOCKED_W_TMO
)
1769 /* Thread was woken or removed explicitely while slot
1771 corelock_unlock(ocl
);
1772 remove_from_list_tmo(curr
);
1773 UNLOCK_THREAD(curr
);
1777 #endif /* NUM_CORES */
1779 remove_from_list_l(curr
->bqp
, curr
);
1781 #ifdef HAVE_WAKEUP_EXT_CB
1782 if (curr
->wakeup_ext_cb
!= NULL
)
1783 curr
->wakeup_ext_cb(curr
);
1786 #ifdef HAVE_PRIORITY_SCHEDULING
1787 if (curr
->blocker
!= NULL
)
1788 wakeup_priority_protocol_release(curr
);
1790 corelock_unlock(ocl
);
1792 /* else state == STATE_SLEEPING */
1794 remove_from_list_tmo(curr
);
1798 curr
->state
= STATE_RUNNING
;
1800 add_to_list_l(&cores
[core
].running
, curr
);
1801 rtr_add_entry(core
, curr
->priority
);
1806 UNLOCK_THREAD(curr
);
1809 cores
[core
].next_tmo_check
= next_tmo_check
;
1812 /*---------------------------------------------------------------------------
1813 * Performs operations that must be done before blocking a thread but after
1814 * the state is saved.
1815 *---------------------------------------------------------------------------
1818 static inline void run_blocking_ops(
1819 unsigned int core
, struct thread_entry
*thread
)
1821 struct thread_blk_ops
*ops
= &cores
[core
].blk_ops
;
1822 const unsigned flags
= ops
->flags
;
1824 if (flags
== TBOP_CLEAR
)
1829 case TBOP_SWITCH_CORE
:
1830 core_switch_blk_op(core
, thread
);
1832 case TBOP_UNLOCK_CORELOCK
:
1833 corelock_unlock(ops
->cl_p
);
1837 ops
->flags
= TBOP_CLEAR
;
1839 #endif /* NUM_CORES > 1 */
1842 void profile_thread(void)
1844 profstart(cores
[CURRENT_CORE
].running
- threads
);
1848 /*---------------------------------------------------------------------------
1849 * Prepares a thread to block on an object's list and/or for a specified
1850 * duration - expects object and slot to be appropriately locked if needed
1851 * and interrupts to be masked.
1852 *---------------------------------------------------------------------------
1854 static inline void block_thread_on_l(struct thread_entry
*thread
,
1857 /* If inlined, unreachable branches will be pruned with no size penalty
1858 because state is passed as a constant parameter. */
1859 const unsigned int core
= IF_COP_CORE(thread
->core
);
1861 /* Remove the thread from the list of running threads. */
1863 remove_from_list_l(&cores
[core
].running
, thread
);
1864 rtr_subtract_entry(core
, thread
->priority
);
1867 /* Add a timeout to the block if not infinite */
1871 case STATE_BLOCKED_W_TMO
:
1872 /* Put the thread into a new list of inactive threads. */
1873 add_to_list_l(thread
->bqp
, thread
);
1875 if (state
== STATE_BLOCKED
)
1879 case STATE_SLEEPING
:
1880 /* If this thread times out sooner than any other thread, update
1881 next_tmo_check to its timeout */
1882 if (TIME_BEFORE(thread
->tmo_tick
, cores
[core
].next_tmo_check
))
1884 cores
[core
].next_tmo_check
= thread
->tmo_tick
;
1887 if (thread
->tmo
.prev
== NULL
)
1889 add_to_list_tmo(thread
);
1891 /* else thread was never removed from list - just keep it there */
1895 /* Remember the the next thread about to block. */
1896 cores
[core
].block_task
= thread
;
1898 /* Report new state. */
1899 thread
->state
= state
;
1902 /*---------------------------------------------------------------------------
1903 * Switch thread in round robin fashion for any given priority. Any thread
1904 * that removed itself from the running list first must specify itself in
1907 * INTERNAL: Intended for use by kernel and not for programs.
1908 *---------------------------------------------------------------------------
1910 void switch_thread(void)
1912 const unsigned int core
= CURRENT_CORE
;
1913 struct thread_entry
*block
= cores
[core
].block_task
;
1914 struct thread_entry
*thread
= cores
[core
].running
;
1916 /* Get context to save - next thread to run is unknown until all wakeups
1920 cores
[core
].block_task
= NULL
;
1923 if (thread
== block
)
1925 /* This was the last thread running and another core woke us before
1926 * reaching here. Force next thread selection to give tmo threads or
1927 * other threads woken before this block a first chance. */
1933 /* Blocking task is the old one */
1939 profile_thread_stopped(thread
- threads
);
1942 /* Begin task switching by saving our current context so that we can
1943 * restore the state of the current thread later to the point prior
1945 store_context(&thread
->context
);
1947 /* Check if the current thread stack is overflown */
1948 if (thread
->stack
[0] != DEADBEEF
)
1949 thread_stkov(thread
);
1952 /* Run any blocking operations requested before switching/sleeping */
1953 run_blocking_ops(core
, thread
);
1956 #ifdef HAVE_PRIORITY_SCHEDULING
1957 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1958 /* Reset the value of thread's skip count */
1959 thread
->skip_count
= 0;
1964 /* If there are threads on a timeout and the earliest wakeup is due,
1965 * check the list and wake any threads that need to start running
1967 if (!TIME_BEFORE(current_tick
, cores
[core
].next_tmo_check
))
1969 check_tmo_threads();
1975 thread
= cores
[core
].running
;
1979 /* Enter sleep mode to reduce power usage - woken up on interrupt
1980 * or wakeup request from another core - expected to enable
1983 core_sleep(IF_COP(core
));
1987 #ifdef HAVE_PRIORITY_SCHEDULING
1988 /* Select the new task based on priorities and the last time a
1989 * process got CPU time relative to the highest priority runnable
1991 struct priority_distribution
*pd
= &cores
[core
].rtr
;
1992 int max
= find_first_set_bit(pd
->mask
);
1996 /* Not switching on a block, tentatively select next thread */
1997 thread
= thread
->l
.next
;
2002 int priority
= thread
->priority
;
2005 /* This ridiculously simple method of aging seems to work
2006 * suspiciously well. It does tend to reward CPU hogs (under
2007 * yielding) but that's generally not desirable at all. On the
2008 * plus side, it, relatively to other threads, penalizes excess
2009 * yielding which is good if some high priority thread is
2010 * performing no useful work such as polling for a device to be
2011 * ready. Of course, aging is only employed when higher and lower
2012 * priority threads are runnable. The highest priority runnable
2013 * thread(s) are never skipped. */
2014 if (priority
<= max
||
2015 IF_NO_SKIP_YIELD( thread
->skip_count
== -1 || )
2016 (diff
= priority
- max
, ++thread
->skip_count
> diff
*diff
))
2018 cores
[core
].running
= thread
;
2022 thread
= thread
->l
.next
;
2025 /* Without priority use a simple FCFS algorithm */
2028 /* Not switching on a block, select next thread */
2029 thread
= thread
->l
.next
;
2030 cores
[core
].running
= thread
;
2032 #endif /* HAVE_PRIORITY_SCHEDULING */
2040 /* And finally give control to the next thread. */
2041 load_context(&thread
->context
);
2044 profile_thread_started(thread
- threads
);
2048 /*---------------------------------------------------------------------------
2049 * Sleeps a thread for at least a specified number of ticks with zero being
2050 * a wait until the next tick.
2052 * INTERNAL: Intended for use by kernel and not for programs.
2053 *---------------------------------------------------------------------------
2055 void sleep_thread(int ticks
)
2057 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2059 LOCK_THREAD(current
);
2061 /* Set our timeout, remove from run list and join timeout list. */
2062 current
->tmo_tick
= current_tick
+ ticks
+ 1;
2063 block_thread_on_l(current
, STATE_SLEEPING
);
2065 UNLOCK_THREAD(current
);
2068 /*---------------------------------------------------------------------------
2069 * Indefinitely block a thread on a blocking queue for explicit wakeup.
2071 * INTERNAL: Intended for use by kernel objects and not for programs.
2072 *---------------------------------------------------------------------------
2074 void block_thread(struct thread_entry
*current
)
2076 /* Set the state to blocked and take us off of the run queue until we
2077 * are explicitly woken */
2078 LOCK_THREAD(current
);
2080 /* Set the list for explicit wakeup */
2081 block_thread_on_l(current
, STATE_BLOCKED
);
2083 #ifdef HAVE_PRIORITY_SCHEDULING
2084 if (current
->blocker
!= NULL
)
2086 /* Object supports PIP */
2087 current
= blocker_inherit_priority(current
);
2091 UNLOCK_THREAD(current
);
2094 /*---------------------------------------------------------------------------
2095 * Block a thread on a blocking queue for a specified time interval or until
2096 * explicitly woken - whichever happens first.
2098 * INTERNAL: Intended for use by kernel objects and not for programs.
2099 *---------------------------------------------------------------------------
2101 void block_thread_w_tmo(struct thread_entry
*current
, int timeout
)
2103 /* Get the entry for the current running thread. */
2104 LOCK_THREAD(current
);
2106 /* Set the state to blocked with the specified timeout */
2107 current
->tmo_tick
= current_tick
+ timeout
;
2109 /* Set the list for explicit wakeup */
2110 block_thread_on_l(current
, STATE_BLOCKED_W_TMO
);
2112 #ifdef HAVE_PRIORITY_SCHEDULING
2113 if (current
->blocker
!= NULL
)
2115 /* Object supports PIP */
2116 current
= blocker_inherit_priority(current
);
2120 UNLOCK_THREAD(current
);
2123 /*---------------------------------------------------------------------------
2124 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
2125 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
2127 * This code should be considered a critical section by the caller meaning
2128 * that the object's corelock should be held.
2130 * INTERNAL: Intended for use by kernel objects and not for programs.
2131 *---------------------------------------------------------------------------
2133 unsigned int wakeup_thread(struct thread_entry
**list
)
2135 struct thread_entry
*thread
= *list
;
2136 unsigned int result
= THREAD_NONE
;
2138 /* Check if there is a blocked thread at all. */
2142 LOCK_THREAD(thread
);
2144 /* Determine thread's current state. */
2145 switch (thread
->state
)
2148 case STATE_BLOCKED_W_TMO
:
2149 remove_from_list_l(list
, thread
);
2153 #ifdef HAVE_PRIORITY_SCHEDULING
2154 struct thread_entry
*current
;
2155 struct blocker
*bl
= thread
->blocker
;
2159 /* No inheritance - just boost the thread by aging */
2160 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
2161 thread
->skip_count
= thread
->priority
;
2162 current
= cores
[CURRENT_CORE
].running
;
2166 /* Call the specified unblocking PIP */
2167 current
= bl
->wakeup_protocol(thread
);
2170 if (current
!= NULL
&& thread
->priority
< current
->priority
2171 IF_COP( && thread
->core
== current
->core
))
2173 /* Woken thread is higher priority and exists on the same CPU core;
2174 * recommend a task switch. Knowing if this is an interrupt call
2175 * would be helpful here. */
2176 result
|= THREAD_SWITCH
;
2178 #endif /* HAVE_PRIORITY_SCHEDULING */
2180 core_schedule_wakeup(thread
);
2183 /* Nothing to do. State is not blocked. */
2184 #if THREAD_EXTRA_CHECKS
2186 THREAD_PANICF("wakeup_thread->block invalid", thread
);
2193 UNLOCK_THREAD(thread
);
2197 /*---------------------------------------------------------------------------
2198 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
2199 * from each operation or THREAD_NONE of nothing was awakened. Object owning
2200 * the queue must be locked first.
2202 * INTERNAL: Intended for use by kernel objects and not for programs.
2203 *---------------------------------------------------------------------------
2205 unsigned int thread_queue_wake(struct thread_entry
**list
)
2207 unsigned result
= THREAD_NONE
;
2211 unsigned int rc
= wakeup_thread(list
);
2213 if (rc
== THREAD_NONE
)
2214 break; /* No more threads */
2222 /*---------------------------------------------------------------------------
2223 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
2224 * will be locked on multicore.
2225 *---------------------------------------------------------------------------
2227 static struct thread_entry
* find_empty_thread_slot(void)
2229 /* Any slot could be on an interrupt-accessible list */
2230 IF_COP( int oldlevel
= disable_irq_save(); )
2231 struct thread_entry
*thread
= NULL
;
2234 for (n
= 0; n
< MAXTHREADS
; n
++)
2236 /* Obtain current slot state - lock it on multicore */
2237 struct thread_entry
*t
= &threads
[n
];
2240 if (t
->state
== STATE_KILLED
IF_COP( && t
->name
!= THREAD_DESTRUCT
))
2242 /* Slot is empty - leave it locked and caller will unlock */
2247 /* Finished examining slot - no longer busy - unlock on multicore */
2251 IF_COP( restore_irq(oldlevel
); ) /* Reenable interrups - this slot is
2252 not accesible to them yet */
2257 /*---------------------------------------------------------------------------
2258 * Place the current core in idle mode - woken up on interrupt or wake
2259 * request from another core.
2260 *---------------------------------------------------------------------------
2262 void core_idle(void)
2264 IF_COP( const unsigned int core
= CURRENT_CORE
; )
2266 core_sleep(IF_COP(core
));
2269 /*---------------------------------------------------------------------------
2270 * Create a thread. If using a dual core architecture, specify which core to
2271 * start the thread on.
2273 * Return ID if context area could be allocated, else NULL.
2274 *---------------------------------------------------------------------------
2276 struct thread_entry
*
2277 create_thread(void (*function
)(void), void* stack
, size_t stack_size
,
2278 unsigned flags
, const char *name
2279 IF_PRIO(, int priority
)
2280 IF_COP(, unsigned int core
))
2283 unsigned int stack_words
;
2284 uintptr_t stackptr
, stackend
;
2285 struct thread_entry
*thread
;
2289 thread
= find_empty_thread_slot();
2295 oldlevel
= disable_irq_save();
2297 /* Munge the stack to make it easy to spot stack overflows */
2298 stackptr
= ALIGN_UP((uintptr_t)stack
, sizeof (uintptr_t));
2299 stackend
= ALIGN_DOWN((uintptr_t)stack
+ stack_size
, sizeof (uintptr_t));
2300 stack_size
= stackend
- stackptr
;
2301 stack_words
= stack_size
/ sizeof (uintptr_t);
2303 for (i
= 0; i
< stack_words
; i
++)
2305 ((uintptr_t *)stackptr
)[i
] = DEADBEEF
;
2308 /* Store interesting information */
2309 thread
->name
= name
;
2310 thread
->stack
= (uintptr_t *)stackptr
;
2311 thread
->stack_size
= stack_size
;
2312 thread
->queue
= NULL
;
2313 #ifdef HAVE_WAKEUP_EXT_CB
2314 thread
->wakeup_ext_cb
= NULL
;
2316 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2317 thread
->cpu_boost
= 0;
2319 #ifdef HAVE_PRIORITY_SCHEDULING
2320 memset(&thread
->pdist
, 0, sizeof(thread
->pdist
));
2321 thread
->blocker
= NULL
;
2322 thread
->base_priority
= priority
;
2323 thread
->priority
= priority
;
2324 thread
->skip_count
= priority
;
2325 prio_add_entry(&thread
->pdist
, priority
);
2329 thread
->core
= core
;
2331 /* Writeback stack munging or anything else before starting */
2332 if (core
!= CURRENT_CORE
)
2338 /* Thread is not on any timeout list but be a bit paranoid */
2339 thread
->tmo
.prev
= NULL
;
2341 state
= (flags
& CREATE_THREAD_FROZEN
) ?
2342 STATE_FROZEN
: STATE_RUNNING
;
2344 thread
->context
.sp
= (typeof (thread
->context
.sp
))stackend
;
2346 /* Load the thread's context structure with needed startup information */
2347 THREAD_STARTUP_INIT(core
, thread
, function
);
2349 thread
->state
= state
;
2351 if (state
== STATE_RUNNING
)
2352 core_schedule_wakeup(thread
);
2354 UNLOCK_THREAD(thread
);
2356 restore_irq(oldlevel
);
2361 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2362 /*---------------------------------------------------------------------------
2363 * Change the boost state of a thread boosting or unboosting the CPU
2365 *---------------------------------------------------------------------------
2367 static inline void boost_thread(struct thread_entry
*thread
, bool boost
)
2369 if ((thread
->cpu_boost
!= 0) != boost
)
2371 thread
->cpu_boost
= boost
;
2376 void trigger_cpu_boost(void)
2378 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2379 boost_thread(current
, true);
2382 void cancel_cpu_boost(void)
2384 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2385 boost_thread(current
, false);
2387 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
2389 /*---------------------------------------------------------------------------
2390 * Block the current thread until another thread terminates. A thread may
2391 * wait on itself to terminate which prevents it from running again and it
2392 * will need to be killed externally.
2393 * Parameter is the ID as returned from create_thread().
2394 *---------------------------------------------------------------------------
2396 void thread_wait(struct thread_entry
*thread
)
2398 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2403 /* Lock thread-as-waitable-object lock */
2404 corelock_lock(&thread
->waiter_cl
);
2406 /* Be sure it hasn't been killed yet */
2407 if (thread
->state
!= STATE_KILLED
)
2409 IF_COP( current
->obj_cl
= &thread
->waiter_cl
; )
2410 current
->bqp
= &thread
->queue
;
2413 block_thread(current
);
2415 corelock_unlock(&thread
->waiter_cl
);
2421 corelock_unlock(&thread
->waiter_cl
);
2424 /*---------------------------------------------------------------------------
2425 * Exit the current thread. The Right Way to Do Things (TM).
2426 *---------------------------------------------------------------------------
2428 void thread_exit(void)
2430 const unsigned int core
= CURRENT_CORE
;
2431 struct thread_entry
*current
= cores
[core
].running
;
2433 /* Cancel CPU boost if any */
2438 corelock_lock(¤t
->waiter_cl
);
2439 LOCK_THREAD(current
);
2441 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
2442 if (current
->name
== THREAD_DESTRUCT
)
2444 /* Thread being killed - become a waiter */
2445 UNLOCK_THREAD(current
);
2446 corelock_unlock(¤t
->waiter_cl
);
2447 thread_wait(current
);
2448 THREAD_PANICF("thread_exit->WK:*R", current
);
2452 #ifdef HAVE_PRIORITY_SCHEDULING
2453 check_for_obj_waiters("thread_exit", current
);
2456 if (current
->tmo
.prev
!= NULL
)
2458 /* Cancel pending timeout list removal */
2459 remove_from_list_tmo(current
);
2462 /* Switch tasks and never return */
2463 block_thread_on_l(current
, STATE_KILLED
);
2466 /* Switch to the idle stack if not on the main core (where "main"
2467 * runs) - we can hope gcc doesn't need the old stack beyond this
2471 switch_to_idle_stack(core
);
2476 current
->name
= NULL
;
2478 /* Signal this thread */
2479 thread_queue_wake(¤t
->queue
);
2480 corelock_unlock(¤t
->waiter_cl
);
2481 /* Slot must be unusable until thread is really gone */
2482 UNLOCK_THREAD_AT_TASK_SWITCH(current
);
2484 /* This should never and must never be reached - if it is, the
2485 * state is corrupted */
2486 THREAD_PANICF("thread_exit->K:*R", current
);
2489 #ifdef ALLOW_REMOVE_THREAD
2490 /*---------------------------------------------------------------------------
2491 * Remove a thread from the scheduler. Not The Right Way to Do Things in
2494 * Parameter is the ID as returned from create_thread().
2496 * Use with care on threads that are not under careful control as this may
2497 * leave various objects in an undefined state.
2498 *---------------------------------------------------------------------------
2500 void remove_thread(struct thread_entry
*thread
)
2503 /* core is not constant here because of core switching */
2504 unsigned int core
= CURRENT_CORE
;
2505 unsigned int old_core
= NUM_CORES
;
2506 struct corelock
*ocl
= NULL
;
2508 const unsigned int core
= CURRENT_CORE
;
2510 struct thread_entry
*current
= cores
[core
].running
;
2518 if (thread
== current
)
2519 thread_exit(); /* Current thread - do normal exit */
2521 oldlevel
= disable_irq_save();
2523 corelock_lock(&thread
->waiter_cl
);
2524 LOCK_THREAD(thread
);
2526 state
= thread
->state
;
2528 if (state
== STATE_KILLED
)
2534 if (thread
->name
== THREAD_DESTRUCT
)
2536 /* Thread being killed - become a waiter */
2537 UNLOCK_THREAD(thread
);
2538 corelock_unlock(&thread
->waiter_cl
);
2539 restore_irq(oldlevel
);
2540 thread_wait(thread
);
2544 thread
->name
= THREAD_DESTRUCT
; /* Slot can't be used for now */
2546 #ifdef HAVE_PRIORITY_SCHEDULING
2547 check_for_obj_waiters("remove_thread", thread
);
2550 if (thread
->core
!= core
)
2552 /* Switch cores and safely extract the thread there */
2553 /* Slot HAS to be unlocked or a deadlock could occur which means other
2554 * threads have to be guided into becoming thread waiters if they
2555 * attempt to remove it. */
2556 unsigned int new_core
= thread
->core
;
2558 corelock_unlock(&thread
->waiter_cl
);
2560 UNLOCK_THREAD(thread
);
2561 restore_irq(oldlevel
);
2563 old_core
= switch_core(new_core
);
2565 oldlevel
= disable_irq_save();
2567 corelock_lock(&thread
->waiter_cl
);
2568 LOCK_THREAD(thread
);
2570 state
= thread
->state
;
2572 /* Perform the extraction and switch ourselves back to the original
2575 #endif /* NUM_CORES > 1 */
2577 if (thread
->tmo
.prev
!= NULL
)
2579 /* Clean thread off the timeout list if a timeout check hasn't
2581 remove_from_list_tmo(thread
);
2584 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2585 /* Cancel CPU boost if any */
2586 boost_thread(thread
, false);
2589 IF_COP( retry_state
: )
2595 /* Remove thread from ready to run tasks */
2596 remove_from_list_l(&cores
[core
].running
, thread
);
2597 rtr_subtract_entry(core
, thread
->priority
);
2601 case STATE_BLOCKED_W_TMO
:
2602 /* Remove thread from the queue it's blocked on - including its
2603 * own if waiting there */
2605 if (&thread
->waiter_cl
!= thread
->obj_cl
)
2607 ocl
= thread
->obj_cl
;
2609 if (corelock_try_lock(ocl
) == 0)
2611 UNLOCK_THREAD(thread
);
2613 LOCK_THREAD(thread
);
2615 if (thread
->state
!= state
)
2617 /* Something woke the thread */
2618 state
= thread
->state
;
2619 corelock_unlock(ocl
);
2625 remove_from_list_l(thread
->bqp
, thread
);
2627 #ifdef HAVE_WAKEUP_EXT_CB
2628 if (thread
->wakeup_ext_cb
!= NULL
)
2629 thread
->wakeup_ext_cb(thread
);
2632 #ifdef HAVE_PRIORITY_SCHEDULING
2633 if (thread
->blocker
!= NULL
)
2635 /* Remove thread's priority influence from its chain */
2636 wakeup_priority_protocol_release(thread
);
2642 corelock_unlock(ocl
);
2645 /* Otherwise thread is frozen and hasn't run yet */
2648 thread
->state
= STATE_KILLED
;
2650 /* If thread was waiting on itself, it will have been removed above.
2651 * The wrong order would result in waking the thread first and deadlocking
2652 * since the slot is already locked. */
2653 thread_queue_wake(&thread
->queue
);
2655 thread
->name
= NULL
;
2657 thread_killed
: /* Thread was already killed */
2658 /* Removal complete - safe to unlock and reenable interrupts */
2659 corelock_unlock(&thread
->waiter_cl
);
2660 UNLOCK_THREAD(thread
);
2661 restore_irq(oldlevel
);
2664 if (old_core
< NUM_CORES
)
2666 /* Did a removal on another processor's thread - switch back to
2668 switch_core(old_core
);
2672 #endif /* ALLOW_REMOVE_THREAD */
2674 #ifdef HAVE_PRIORITY_SCHEDULING
2675 /*---------------------------------------------------------------------------
2676 * Sets the thread's relative base priority for the core it runs on. Any
2677 * needed inheritance changes also may happen.
2678 *---------------------------------------------------------------------------
2680 int thread_set_priority(struct thread_entry
*thread
, int priority
)
2682 int old_base_priority
= -1;
2684 /* A little safety measure */
2685 if (priority
< HIGHEST_PRIORITY
|| priority
> LOWEST_PRIORITY
)
2689 thread
= cores
[CURRENT_CORE
].running
;
2691 /* Thread could be on any list and therefore on an interrupt accessible
2692 one - disable interrupts */
2693 int oldlevel
= disable_irq_save();
2695 LOCK_THREAD(thread
);
2697 /* Make sure it's not killed */
2698 if (thread
->state
!= STATE_KILLED
)
2700 int old_priority
= thread
->priority
;
2702 old_base_priority
= thread
->base_priority
;
2703 thread
->base_priority
= priority
;
2705 prio_move_entry(&thread
->pdist
, old_base_priority
, priority
);
2706 priority
= find_first_set_bit(thread
->pdist
.mask
);
2708 if (old_priority
== priority
)
2710 /* No priority change - do nothing */
2712 else if (thread
->state
== STATE_RUNNING
)
2714 /* This thread is running - change location on the run
2715 * queue. No transitive inheritance needed. */
2716 set_running_thread_priority(thread
, priority
);
2720 thread
->priority
= priority
;
2722 if (thread
->blocker
!= NULL
)
2724 /* Bubble new priority down the chain */
2725 struct blocker
*bl
= thread
->blocker
; /* Blocker struct */
2726 struct thread_entry
*bl_t
= bl
->thread
; /* Blocking thread */
2727 struct thread_entry
* const tstart
= thread
; /* Initial thread */
2728 const int highest
= MIN(priority
, old_priority
); /* Higher of new or old */
2732 struct thread_entry
*next
; /* Next thread to check */
2733 int bl_pr
; /* Highest blocked thread */
2734 int queue_pr
; /* New highest blocked thread */
2736 /* Owner can change but thread cannot be dislodged - thread
2737 * may not be the first in the queue which allows other
2738 * threads ahead in the list to be given ownership during the
2739 * operation. If thread is next then the waker will have to
2740 * wait for us and the owner of the object will remain fixed.
2741 * If we successfully grab the owner -- which at some point
2742 * is guaranteed -- then the queue remains fixed until we
2748 /* Double-check the owner - retry if it changed */
2749 if (bl
->thread
== bl_t
)
2752 UNLOCK_THREAD(bl_t
);
2756 bl_pr
= bl
->priority
;
2758 if (highest
> bl_pr
)
2759 break; /* Object priority won't change */
2761 /* This will include the thread being set */
2762 queue_pr
= find_highest_priority_in_list_l(*thread
->bqp
);
2764 if (queue_pr
== bl_pr
)
2765 break; /* Object priority not changing */
2767 /* Update thread boost for this object */
2768 bl
->priority
= queue_pr
;
2769 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
2770 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
2772 if (bl_t
->priority
== bl_pr
)
2773 break; /* Blocking thread priority not changing */
2775 if (bl_t
->state
== STATE_RUNNING
)
2777 /* Thread not blocked - we're done */
2778 set_running_thread_priority(bl_t
, bl_pr
);
2782 bl_t
->priority
= bl_pr
;
2783 bl
= bl_t
->blocker
; /* Blocking thread has a blocker? */
2786 break; /* End of chain */
2791 break; /* Full-circle */
2793 UNLOCK_THREAD(thread
);
2799 UNLOCK_THREAD(bl_t
);
2804 UNLOCK_THREAD(thread
);
2806 restore_irq(oldlevel
);
2808 return old_base_priority
;
2811 /*---------------------------------------------------------------------------
2812 * Returns the current base priority for a thread.
2813 *---------------------------------------------------------------------------
2815 int thread_get_priority(struct thread_entry
*thread
)
2817 /* Simple, quick probe. */
2819 thread
= cores
[CURRENT_CORE
].running
;
2821 return thread
->base_priority
;
2823 #endif /* HAVE_PRIORITY_SCHEDULING */
2825 /*---------------------------------------------------------------------------
2826 * Starts a frozen thread - similar semantics to wakeup_thread except that
2827 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2828 * virtue of the slot having a state of STATE_FROZEN.
2829 *---------------------------------------------------------------------------
2831 void thread_thaw(struct thread_entry
*thread
)
2833 int oldlevel
= disable_irq_save();
2834 LOCK_THREAD(thread
);
2836 if (thread
->state
== STATE_FROZEN
)
2837 core_schedule_wakeup(thread
);
2839 UNLOCK_THREAD(thread
);
2840 restore_irq(oldlevel
);
2843 /*---------------------------------------------------------------------------
2844 * Return the ID of the currently executing thread.
2845 *---------------------------------------------------------------------------
2847 struct thread_entry
* thread_get_current(void)
2849 return cores
[CURRENT_CORE
].running
;
2853 /*---------------------------------------------------------------------------
2854 * Switch the processor that the currently executing thread runs on.
2855 *---------------------------------------------------------------------------
2857 unsigned int switch_core(unsigned int new_core
)
2859 const unsigned int core
= CURRENT_CORE
;
2860 struct thread_entry
*current
= cores
[core
].running
;
2862 if (core
== new_core
)
2864 /* No change - just return same core */
2868 int oldlevel
= disable_irq_save();
2869 LOCK_THREAD(current
);
2871 if (current
->name
== THREAD_DESTRUCT
)
2873 /* Thread being killed - deactivate and let process complete */
2874 UNLOCK_THREAD(current
);
2875 restore_irq(oldlevel
);
2876 thread_wait(current
);
2877 /* Should never be reached */
2878 THREAD_PANICF("switch_core->D:*R", current
);
2881 /* Get us off the running list for the current core */
2883 remove_from_list_l(&cores
[core
].running
, current
);
2884 rtr_subtract_entry(core
, current
->priority
);
2887 /* Stash return value (old core) in a safe place */
2888 current
->retval
= core
;
2890 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2891 * the other core will likely attempt a removal from the wrong list! */
2892 if (current
->tmo
.prev
!= NULL
)
2894 remove_from_list_tmo(current
);
2897 /* Change the core number for this thread slot */
2898 current
->core
= new_core
;
2900 /* Do not use core_schedule_wakeup here since this will result in
2901 * the thread starting to run on the other core before being finished on
2902 * this one. Delay the list unlock to keep the other core stuck
2903 * until this thread is ready. */
2906 rtr_add_entry(new_core
, current
->priority
);
2907 add_to_list_l(&cores
[new_core
].running
, current
);
2909 /* Make a callback into device-specific code, unlock the wakeup list so
2910 * that execution may resume on the new core, unlock our slot and finally
2911 * restore the interrupt level */
2912 cores
[core
].blk_ops
.flags
= TBOP_SWITCH_CORE
;
2913 cores
[core
].blk_ops
.cl_p
= &cores
[new_core
].rtr_cl
;
2914 cores
[core
].block_task
= current
;
2916 UNLOCK_THREAD(current
);
2918 /* Alert other core to activity */
2919 core_wake(new_core
);
2921 /* Do the stack switching, cache_maintenence and switch_thread call -
2922 requires native code */
2923 switch_thread_core(core
, current
);
2925 /* Finally return the old core to caller */
2926 return current
->retval
;
2928 #endif /* NUM_CORES > 1 */
2930 /*---------------------------------------------------------------------------
2931 * Initialize threading API. This assumes interrupts are not yet enabled. On
2932 * multicore setups, no core is allowed to proceed until create_thread calls
2933 * are safe to perform.
2934 *---------------------------------------------------------------------------
2936 void init_threads(void)
2938 const unsigned int core
= CURRENT_CORE
;
2939 struct thread_entry
*thread
;
2941 /* CPU will initialize first and then sleep */
2942 thread
= find_empty_thread_slot();
2946 /* WTF? There really must be a slot available at this stage.
2947 * This can fail if, for example, .bss isn't zero'ed out by the loader
2948 * or threads is in the wrong section. */
2949 THREAD_PANICF("init_threads->no slot", NULL
);
2952 /* Initialize initially non-zero members of core */
2953 cores
[core
].next_tmo_check
= current_tick
; /* Something not in the past */
2955 /* Initialize initially non-zero members of slot */
2956 UNLOCK_THREAD(thread
); /* No sync worries yet */
2957 thread
->name
= main_thread_name
;
2958 thread
->state
= STATE_RUNNING
;
2959 IF_COP( thread
->core
= core
; )
2960 #ifdef HAVE_PRIORITY_SCHEDULING
2961 corelock_init(&cores
[core
].rtr_cl
);
2962 thread
->base_priority
= PRIORITY_USER_INTERFACE
;
2963 prio_add_entry(&thread
->pdist
, PRIORITY_USER_INTERFACE
);
2964 thread
->priority
= PRIORITY_USER_INTERFACE
;
2965 rtr_add_entry(core
, PRIORITY_USER_INTERFACE
);
2967 corelock_init(&thread
->waiter_cl
);
2968 corelock_init(&thread
->slot_cl
);
2970 add_to_list_l(&cores
[core
].running
, thread
);
2974 thread
->stack
= stackbegin
;
2975 thread
->stack_size
= (uintptr_t)stackend
- (uintptr_t)stackbegin
;
2976 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2977 /* Wait for other processors to finish their inits since create_thread
2978 * isn't safe to call until the kernel inits are done. The first
2979 * threads created in the system must of course be created by CPU. */
2980 core_thread_init(CPU
);
2984 /* Initial stack is the idle stack */
2985 thread
->stack
= idle_stacks
[core
];
2986 thread
->stack_size
= IDLE_STACK_SIZE
;
2987 /* After last processor completes, it should signal all others to
2988 * proceed or may signal the next and call thread_exit(). The last one
2989 * to finish will signal CPU. */
2990 core_thread_init(core
);
2991 /* Other cores do not have a main thread - go idle inside switch_thread
2992 * until a thread can run on the core. */
2994 #endif /* NUM_CORES */
2998 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
3000 static inline int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
3002 static int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
3005 unsigned int stack_words
= stack_size
/ sizeof (uintptr_t);
3009 for (i
= 0; i
< stack_words
; i
++)
3011 if (stackptr
[i
] != DEADBEEF
)
3013 usage
= ((stack_words
- i
) * 100) / stack_words
;
3021 /*---------------------------------------------------------------------------
3022 * Returns the maximum percentage of stack a thread ever used while running.
3023 * NOTE: Some large buffer allocations that don't use enough the buffer to
3024 * overwrite stackptr[0] will not be seen.
3025 *---------------------------------------------------------------------------
3027 int thread_stack_usage(const struct thread_entry
*thread
)
3029 return stack_usage(thread
->stack
, thread
->stack_size
);
3033 /*---------------------------------------------------------------------------
3034 * Returns the maximum percentage of the core's idle stack ever used during
3036 *---------------------------------------------------------------------------
3038 int idle_stack_usage(unsigned int core
)
3040 return stack_usage(idle_stacks
[core
], IDLE_STACK_SIZE
);
3044 /*---------------------------------------------------------------------------
3045 * Fills in the buffer with the specified thread's name. If the name is NULL,
3046 * empty, or the thread is in destruct state a formatted ID is written
3048 *---------------------------------------------------------------------------
3050 void thread_get_name(char *buffer
, int size
,
3051 struct thread_entry
*thread
)
3060 /* Display thread name if one or ID if none */
3061 const char *name
= thread
->name
;
3062 const char *fmt
= "%s";
3063 if (name
== NULL
IF_COP(|| name
== THREAD_DESTRUCT
) || *name
== '\0')
3065 name
= (const char *)thread
;
3068 snprintf(buffer
, size
, fmt
, name
);