1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
32 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
34 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
36 #define THREAD_EXTRA_CHECKS 0
40 * General locking order to guarantee progress. Order must be observed but
41 * all stages are not nescessarily obligatory. Going from 1) to 3) is
45 * This is first because of the likelyhood of having an interrupt occur that
46 * also accesses one of the objects farther down the list. Any non-blocking
47 * synchronization done may already have a lock on something during normal
48 * execution and if an interrupt handler running on the same processor as
49 * the one that has the resource locked were to attempt to access the
50 * resource, the interrupt handler would wait forever waiting for an unlock
51 * that will never happen. There is no danger if the interrupt occurs on
52 * a different processor because the one that has the lock will eventually
53 * unlock and the other processor's handler may proceed at that time. Not
54 * nescessary when the resource in question is definitely not available to
58 * 1) May be needed beforehand if the kernel object allows dual-use such as
59 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
67 * This locks access to the thread's slot such that its state cannot be
68 * altered by another processor when a state change is in progress such as
69 * when it is in the process of going on a blocked list. An attempt to wake
70 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state.
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
83 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an
85 * operation may only be performed by the thread's own core in a normal
86 * execution context. The wakeup list is the prime example where a thread
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
91 #define DEADBEEF ((unsigned int)0xdeadbeef)
92 /* Cast to the the machine int type, whose size could be < 4. */
93 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
94 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
96 static const char main_thread_name
[] = "main";
97 extern int stackbegin
[];
98 extern int stackend
[];
100 /* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
101 * never results in requiring a wait until the next tick (up to 10000uS!). May
102 * require assembly and careful instruction ordering.
104 * 1) On multicore, stay awake if directed to do so by another. If so, goto step 4.
105 * 2) If processor requires, atomically reenable interrupts and perform step 3.
106 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
108 * 4) Enable interrupts.
111 static inline void core_sleep(IF_COP_VOID(unsigned int core
))
112 __attribute__((always_inline
));
114 static void check_tmo_threads(void)
115 __attribute__((noinline
));
117 static inline void block_thread_on_l(
118 struct thread_queue
*list
, struct thread_entry
*thread
, unsigned state
)
119 __attribute__((always_inline
));
121 static inline void block_thread_on_l_no_listlock(
122 struct thread_entry
**list
, struct thread_entry
*thread
, unsigned state
)
123 __attribute__((always_inline
));
125 static inline void _block_thread_on_l(
126 struct thread_queue
*list
, struct thread_entry
*thread
,
127 unsigned state
IF_SWCL(, const bool single
))
128 __attribute__((always_inline
));
130 IF_SWCL(static inline) struct thread_entry
* _wakeup_thread(
131 struct thread_queue
*list
IF_SWCL(, const bool nolock
))
132 __attribute__((IFN_SWCL(noinline
) IF_SWCL(always_inline
)));
134 IF_SWCL(static inline) void _block_thread(
135 struct thread_queue
*list
IF_SWCL(, const bool nolock
))
136 __attribute__((IFN_SWCL(noinline
) IF_SWCL(always_inline
)));
138 static void add_to_list_tmo(struct thread_entry
*thread
)
139 __attribute__((noinline
));
141 static void core_schedule_wakeup(struct thread_entry
*thread
)
142 __attribute__((noinline
));
144 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core
))
145 __attribute__((always_inline
));
148 static inline void run_blocking_ops(
149 unsigned int core
, struct thread_entry
*thread
)
150 __attribute__((always_inline
));
153 static void thread_stkov(struct thread_entry
*thread
)
154 __attribute__((noinline
));
156 static inline void store_context(void* addr
)
157 __attribute__((always_inline
));
159 static inline void load_context(const void* addr
)
160 __attribute__((always_inline
));
162 void switch_thread(struct thread_entry
*old
)
163 __attribute__((noinline
));
166 /****************************************************************************
167 * Processor-specific section
171 /*---------------------------------------------------------------------------
172 * Start the thread running and terminate it if it returns
173 *---------------------------------------------------------------------------
175 static void start_thread(void) __attribute__((naked
,used
));
176 static void start_thread(void)
180 "ldr sp, [r0, #32] \n" /* Load initial sp */
181 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
182 "mov r1, #0 \n" /* Mark thread as running */
183 "str r1, [r0, #40] \n"
185 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
186 "mov lr, pc \n" /* This could be the first entry into */
187 "bx r0 \n" /* plugin or codec code for this core. */
189 "mov lr, pc \n" /* Call thread function */
191 "mov r0, #0 \n" /* remove_thread(NULL) */
192 "ldr pc, =remove_thread \n"
193 ".ltorg \n" /* Dump constant pool */
194 ); /* No clobber list - new thread doesn't care */
197 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
198 * slot, and thread function pointer in context.start. See load_context for
199 * what happens when thread is initially going to run. */
200 #define THREAD_STARTUP_INIT(core, thread, function) \
201 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
202 (thread)->context.r[1] = (unsigned int)start_thread, \
203 (thread)->context.start = (void *)function; })
205 /*---------------------------------------------------------------------------
206 * Store non-volatile context.
207 *---------------------------------------------------------------------------
209 static inline void store_context(void* addr
)
212 "stmia %0, { r4-r11, sp, lr } \n"
217 /*---------------------------------------------------------------------------
218 * Load non-volatile context.
219 *---------------------------------------------------------------------------
221 static inline void load_context(const void* addr
)
224 "ldr r0, [%0, #40] \n" /* Load start pointer */
225 "cmp r0, #0 \n" /* Check for NULL */
226 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
227 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
228 : : "r" (addr
) : "r0" /* only! */
235 extern int cpu_idlestackbegin
[];
236 extern int cpu_idlestackend
[];
237 extern int cop_idlestackbegin
[];
238 extern int cop_idlestackend
[];
239 static int * const idle_stacks
[NUM_CORES
] NOCACHEDATA_ATTR
=
241 [CPU
] = cpu_idlestackbegin
,
242 [COP
] = cop_idlestackbegin
245 #if CONFIG_CPU == PP5002
246 /* Bytes to emulate the PP502x mailbox bits */
247 struct core_semaphores
249 volatile uint8_t intend_wake
; /* 00h */
250 volatile uint8_t stay_awake
; /* 01h */
251 volatile uint8_t intend_sleep
; /* 02h */
252 volatile uint8_t unused
; /* 03h */
255 static struct core_semaphores core_semaphores
[NUM_CORES
] NOCACHEBSS_ATTR
;
258 #endif /* NUM_CORES */
260 #if CONFIG_CORELOCK == SW_CORELOCK
261 /* Software core locks using Peterson's mutual exclusion algorithm */
263 /*---------------------------------------------------------------------------
264 * Initialize the corelock structure.
265 *---------------------------------------------------------------------------
267 void corelock_init(struct corelock
*cl
)
269 memset(cl
, 0, sizeof (*cl
));
272 #if 1 /* Assembly locks to minimize overhead */
273 /*---------------------------------------------------------------------------
274 * Wait for the corelock to become free and acquire it when it does.
275 *---------------------------------------------------------------------------
277 void corelock_lock(struct corelock
*cl
) __attribute__((naked
));
278 void corelock_lock(struct corelock
*cl
)
281 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
283 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
284 "and r2, r1, #1 \n" /* r2 = othercore */
285 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
287 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
289 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core ? */
290 "cmpne r3, r1, lsr #7 \n"
291 "bxeq lr \n" /* yes? lock acquired */
292 "b 1b \n" /* keep trying */
293 : : "i"(&PROCESSOR_ID
)
298 /*---------------------------------------------------------------------------
299 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
300 *---------------------------------------------------------------------------
302 int corelock_try_lock(struct corelock
*cl
) __attribute__((naked
));
303 int corelock_try_lock(struct corelock
*cl
)
306 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
308 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
309 "and r2, r1, #1 \n" /* r2 = othercore */
310 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
312 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
314 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core? */
315 "cmpne r3, r1, lsr #7 \n"
316 "moveq r0, #1 \n" /* yes? lock acquired */
318 "mov r2, #0 \n" /* cl->myl[core] = 0 */
319 "strb r2, [r0, r1, lsr #7] \n"
321 "bx lr \n" /* acquisition failed */
322 : : "i"(&PROCESSOR_ID
)
329 /*---------------------------------------------------------------------------
330 * Release ownership of the corelock
331 *---------------------------------------------------------------------------
333 void corelock_unlock(struct corelock
*cl
) __attribute__((naked
));
334 void corelock_unlock(struct corelock
*cl
)
337 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
339 "mov r2, #0 \n" /* cl->myl[core] = 0 */
340 "strb r2, [r0, r1, lsr #7] \n"
342 : : "i"(&PROCESSOR_ID
)
346 #else /* C versions for reference */
347 /*---------------------------------------------------------------------------
348 * Wait for the corelock to become free and aquire it when it does.
349 *---------------------------------------------------------------------------
351 void corelock_lock(struct corelock
*cl
)
353 const unsigned int core
= CURRENT_CORE
;
354 const unsigned int othercore
= 1 - core
;
356 cl
->myl
[core
] = core
;
357 cl
->turn
= othercore
;
361 if (cl
->myl
[othercore
] == 0 || cl
->turn
== core
)
366 /*---------------------------------------------------------------------------
367 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
368 *---------------------------------------------------------------------------
370 int corelock_try_lock(struct corelock
*cl
)
372 const unsigned int core
= CURRENT_CORE
;
373 const unsigned int othercore
= 1 - core
;
375 cl
->myl
[core
] = core
;
376 cl
->turn
= othercore
;
378 if (cl
->myl
[othercore
] == 0 || cl
->turn
== core
)
387 /*---------------------------------------------------------------------------
388 * Release ownership of the corelock
389 *---------------------------------------------------------------------------
391 void corelock_unlock(struct corelock
*cl
)
393 cl
->myl
[CURRENT_CORE
] = 0;
395 #endif /* ASM / C selection */
397 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
399 /*---------------------------------------------------------------------------
400 * Put core in a power-saving state if waking list wasn't repopulated and if
401 * no other core requested a wakeup for it to perform a task.
402 *---------------------------------------------------------------------------
405 /* Shared single-core build debugging version */
406 static inline void core_sleep(void)
408 PROC_CTL(CURRENT_CORE
) = PROC_SLEEP
;
410 set_interrupt_status(IRQ_FIQ_ENABLED
, IRQ_FIQ_STATUS
);
412 #elif defined (CPU_PP502x)
413 static inline void core_sleep(unsigned int core
)
417 "mov r0, #4 \n" /* r0 = 0x4 << core */
418 "mov r0, r0, lsl %[c] \n"
419 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
420 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
421 "tst r1, r0, lsl #2 \n"
422 "moveq r1, #0x80000000 \n" /* Then sleep */
423 "streq r1, [%[ctl], %[c], lsl #2] \n"
424 "moveq r1, #0 \n" /* Clear control reg */
425 "streq r1, [%[ctl], %[c], lsl #2] \n"
426 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
427 "str r1, [%[mbx], #8] \n"
428 "1: \n" /* Wait for wake procedure to finish */
429 "ldr r1, [%[mbx], #0] \n"
430 "tst r1, r0, lsr #2 \n"
432 "mrs r1, cpsr \n" /* Enable interrupts */
433 "bic r1, r1, #0xc0 \n"
436 : [ctl
]"r"(&PROC_CTL(CPU
)), [mbx
]"r"(MBX_BASE
), [c
]"r"(core
)
438 #else /* C version for reference */
439 /* Signal intent to sleep */
440 MBX_MSG_SET
= 0x4 << core
;
442 /* Something waking or other processor intends to wake us? */
443 if ((MBX_MSG_STAT
& (0x10 << core
)) == 0)
445 PROC_CTL(core
) = PROC_SLEEP
; nop
; /* Snooze */
446 PROC_CTL(core
) = 0; /* Clear control reg */
449 /* Signal wake - clear wake flag */
450 MBX_MSG_CLR
= 0x14 << core
;
452 /* Wait for other processor to finish wake procedure */
453 while (MBX_MSG_STAT
& (0x1 << core
));
455 /* Enable IRQ, FIQ */
456 set_interrupt_status(IRQ_FIQ_ENABLED
, IRQ_FIQ_STATUS
);
457 #endif /* ASM/C selection */
459 #elif CONFIG_CPU == PP5002
460 /* PP5002 has no mailboxes - emulate using bytes */
461 static inline void core_sleep(unsigned int core
)
465 "mov r0, #1 \n" /* Signal intent to sleep */
466 "strb r0, [%[sem], #2] \n"
467 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
469 "moveq r0, #0xca \n" /* Then sleep */
470 "streqb r0, [%[ctl], %[c], lsl #2] \n"
471 "nop \n" /* nop's needed because of pipeline */
474 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
475 "strb r0, [%[sem], #1] \n"
476 "strb r0, [%[sem], #2] \n"
477 "1: \n" /* Wait for wake procedure to finish */
478 "ldrb r0, [%[sem], #0] \n"
481 "mrs r0, cpsr \n" /* Enable interrupts */
482 "bic r0, r0, #0xc0 \n"
485 : [sem
]"r"(&core_semaphores
[core
]), [c
]"r"(core
),
486 [ctl
]"r"(&PROC_CTL(CPU
))
489 #else /* C version for reference */
490 /* Signal intent to sleep */
491 core_semaphores
[core
].intend_sleep
= 1;
493 /* Something waking or other processor intends to wake us? */
494 if (core_semaphores
[core
].stay_awake
== 0)
496 PROC_CTL(core
) = PROC_SLEEP
; /* Snooze */
500 /* Signal wake - clear wake flag */
501 core_semaphores
[core
].stay_awake
= 0;
502 core_semaphores
[core
].intend_sleep
= 0;
504 /* Wait for other processor to finish wake procedure */
505 while (core_semaphores
[core
].intend_wake
!= 0);
507 /* Enable IRQ, FIQ */
508 set_interrupt_status(IRQ_FIQ_ENABLED
, IRQ_FIQ_STATUS
);
509 #endif /* ASM/C selection */
511 #endif /* CPU type */
513 /*---------------------------------------------------------------------------
514 * Wake another processor core that is sleeping or prevent it from doing so
515 * if it was already destined. FIQ, IRQ should be disabled before calling.
516 *---------------------------------------------------------------------------
519 /* Shared single-core build debugging version */
522 /* No wakey - core already wakey */
524 #elif defined (CPU_PP502x)
525 void core_wake(unsigned int othercore
)
528 /* avoid r0 since that contains othercore */
530 "mrs r3, cpsr \n" /* Disable IRQ */
531 "orr r1, r3, #0x80 \n"
533 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
534 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
535 "str r2, [%[mbx], #4] \n"
536 "1: \n" /* If it intends to sleep, let it first */
537 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
538 "eor r1, r1, #0xc \n"
539 "tst r1, r2, lsr #2 \n"
540 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
541 "tsteq r1, #0x80000000 \n"
542 "beq 1b \n" /* Wait for sleep or wake */
543 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
545 "strne r1, [%[ctl], %[oc], lsl #2] \n"
546 "mov r1, r2, lsr #4 \n"
547 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
548 "msr cpsr_c, r3 \n" /* Restore int status */
550 : [ctl
]"r"(&PROC_CTL(CPU
)), [mbx
]"r"(MBX_BASE
),
553 #else /* C version for reference */
554 /* Disable interrupts - avoid reentrancy from the tick */
555 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
557 /* Signal intent to wake other processor - set stay awake */
558 MBX_MSG_SET
= 0x11 << othercore
;
560 /* If it intends to sleep, wait until it does or aborts */
561 while ((MBX_MSG_STAT
& (0x4 << othercore
)) != 0 &&
562 (PROC_CTL(othercore
) & PROC_SLEEP
) == 0);
564 /* If sleeping, wake it up */
565 if (PROC_CTL(othercore
) & PROC_SLEEP
)
566 PROC_CTL(othercore
) = 0;
568 /* Done with wake procedure */
569 MBX_MSG_CLR
= 0x1 << othercore
;
570 set_irq_level(oldlevel
);
571 #endif /* ASM/C selection */
573 #elif CONFIG_CPU == PP5002
574 /* PP5002 has no mailboxes - emulate using bytes */
575 void core_wake(unsigned int othercore
)
578 /* avoid r0 since that contains othercore */
580 "mrs r3, cpsr \n" /* Disable IRQ */
581 "orr r1, r3, #0x80 \n"
583 "mov r1, #1 \n" /* Signal intent to wake other core */
584 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
585 "strh r1, [%[sem], #0] \n"
587 "1: \n" /* If it intends to sleep, let it first */
588 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
590 "ldr r1, [%[st]] \n" /* && not sleeping ? */
591 "tsteq r1, r2, lsr %[oc] \n"
592 "beq 1b \n" /* Wait for sleep or wake */
593 "tst r1, r2, lsr %[oc] \n"
594 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
596 "strneb r1, [r2, %[oc], lsl #2] \n"
597 "mov r1, #0 \n" /* Done with wake procedure */
598 "strb r1, [%[sem], #0] \n"
599 "msr cpsr_c, r3 \n" /* Restore int status */
601 : [sem
]"r"(&core_semaphores
[othercore
]),
606 #else /* C version for reference */
607 /* Disable interrupts - avoid reentrancy from the tick */
608 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
610 /* Signal intent to wake other processor - set stay awake */
611 core_semaphores
[othercore
].intend_wake
= 1;
612 core_semaphores
[othercore
].stay_awake
= 1;
614 /* If it intends to sleep, wait until it does or aborts */
615 while (core_semaphores
[othercore
].intend_sleep
!= 0 &&
616 (PROC_STAT
& PROC_SLEEPING(othercore
)) == 0);
618 /* If sleeping, wake it up */
619 if (PROC_STAT
& PROC_SLEEPING(othercore
))
620 PROC_CTL(othercore
) = PROC_WAKE
;
622 /* Done with wake procedure */
623 core_semaphores
[othercore
].intend_wake
= 0;
624 set_irq_level(oldlevel
);
625 #endif /* ASM/C selection */
627 #endif /* CPU type */
630 /*---------------------------------------------------------------------------
631 * Switches to a stack that always resides in the Rockbox core.
633 * Needed when a thread suicides on a core other than the main CPU since the
634 * stack used when idling is the stack of the last thread to run. This stack
635 * may not reside in the core in which case the core will continue to use a
636 * stack from an unloaded module until another thread runs on it.
637 *---------------------------------------------------------------------------
639 static inline void switch_to_idle_stack(const unsigned int core
)
642 "str sp, [%0] \n" /* save original stack pointer on idle stack */
643 "mov sp, %0 \n" /* switch stacks */
644 : : "r"(&idle_stacks
[core
][IDLE_STACK_WORDS
-1]));
648 /*---------------------------------------------------------------------------
649 * Perform core switch steps that need to take place inside switch_thread.
651 * These steps must take place while before changing the processor and after
652 * having entered switch_thread since switch_thread may not do a normal return
653 * because the stack being used for anything the compiler saved will not belong
654 * to the thread's destination core and it may have been recycled for other
655 * purposes by the time a normal context load has taken place. switch_thread
656 * will also clobber anything stashed in the thread's context or stored in the
657 * nonvolatile registers if it is saved there before the call since the
658 * compiler's order of operations cannot be known for certain.
660 static void core_switch_blk_op(unsigned int core
, struct thread_entry
*thread
)
662 /* Flush our data to ram */
664 /* Stash thread in r4 slot */
665 thread
->context
.r
[0] = (unsigned int)thread
;
666 /* Stash restart address in r5 slot */
667 thread
->context
.r
[1] = (unsigned int)thread
->context
.start
;
668 /* Save sp in context.sp while still running on old core */
669 thread
->context
.sp
= (void*)idle_stacks
[core
][IDLE_STACK_WORDS
-1];
672 /*---------------------------------------------------------------------------
673 * Machine-specific helper function for switching the processor a thread is
674 * running on. Basically, the thread suicides on the departing core and is
675 * reborn on the destination. Were it not for gcc's ill-behavior regarding
676 * naked functions written in C where it actually clobbers non-volatile
677 * registers before the intended prologue code, this would all be much
678 * simpler. Generic setup is done in switch_core itself.
681 /*---------------------------------------------------------------------------
682 * This actually performs the core switch.
684 static void switch_thread_core(unsigned int core
, struct thread_entry
*thread
)
685 __attribute__((naked
));
686 static void switch_thread_core(unsigned int core
, struct thread_entry
*thread
)
688 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
689 * Stack access also isn't permitted until restoring the original stack and
692 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
693 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
694 "ldr r2, [r2, r0, lsl #2] \n"
695 "add r2, r2, %0*4 \n"
696 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
697 "mov sp, r2 \n" /* switch stacks */
698 "adr r2, 1f \n" /* r2 = new core restart address */
699 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
700 "mov r0, r1 \n" /* switch_thread(thread) */
701 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
703 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
704 "mov r1, #0 \n" /* Clear start address */
705 "str r1, [r0, #40] \n"
706 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
709 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
710 ".ltorg \n" /* Dump constant pool */
711 : : "i"(IDLE_STACK_WORDS
)
713 (void)core
; (void)thread
;
715 #endif /* NUM_CORES */
717 #elif CONFIG_CPU == S3C2440
719 /*---------------------------------------------------------------------------
720 * Put core in a power-saving state if waking list wasn't repopulated.
721 *---------------------------------------------------------------------------
723 static inline void core_sleep(void)
725 /* FIQ also changes the CLKCON register so FIQ must be disabled
726 when changing it here */
728 "mrs r0, cpsr \n" /* Prepare IRQ, FIQ enable */
729 "bic r0, r0, #0xc0 \n"
730 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
731 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
733 "str r2, [r1, #0xc] \n"
734 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
735 "mov r2, #0 \n" /* wait for IDLE */
740 "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */
742 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
744 "str r2, [r1, #0xc] \n"
745 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
746 : : : "r0", "r1", "r2");
748 #elif defined(CPU_TCC77X)
749 static inline void core_sleep(void)
751 #warning TODO: Implement core_sleep
753 #elif CONFIG_CPU == IMX31L
754 static inline void core_sleep(void)
758 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
759 "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */
760 "bic r0, r0, #0xc0 \n"
766 static inline void core_sleep(void)
768 #warning core_sleep not implemented, battery life will be decreased
770 #endif /* CONFIG_CPU == */
772 #elif defined(CPU_COLDFIRE)
773 /*---------------------------------------------------------------------------
774 * Start the thread running and terminate it if it returns
775 *---------------------------------------------------------------------------
777 void start_thread(void); /* Provide C access to ASM label */
778 static void __start_thread(void) __attribute__((used
));
779 static void __start_thread(void)
781 /* a0=macsr, a1=context */
783 "start_thread: \n" /* Start here - no naked attribute */
784 "move.l %a0, %macsr \n" /* Set initial mac status reg */
785 "lea.l 48(%a1), %a1 \n"
786 "move.l (%a1)+, %sp \n" /* Set initial stack */
787 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
788 "clr.l (%a1) \n" /* Mark thread running */
789 "jsr (%a2) \n" /* Call thread function */
790 "clr.l -(%sp) \n" /* remove_thread(NULL) */
791 "jsr remove_thread \n"
795 /* Set EMAC unit to fractional mode with saturation for each new thread,
796 * since that's what'll be the most useful for most things which the dsp
797 * will do. Codecs should still initialize their preferred modes
798 * explicitly. Context pointer is placed in d2 slot and start_thread
799 * pointer in d3 slot. thread function pointer is placed in context.start.
800 * See load_context for what happens when thread is initially going to
803 #define THREAD_STARTUP_INIT(core, thread, function) \
804 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
805 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
806 (thread)->context.d[1] = (unsigned int)start_thread, \
807 (thread)->context.start = (void *)(function); })
809 /*---------------------------------------------------------------------------
810 * Store non-volatile context.
811 *---------------------------------------------------------------------------
813 static inline void store_context(void* addr
)
816 "move.l %%macsr,%%d0 \n"
817 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
818 : : "a" (addr
) : "d0" /* only! */
822 /*---------------------------------------------------------------------------
823 * Load non-volatile context.
824 *---------------------------------------------------------------------------
826 static inline void load_context(const void* addr
)
829 "move.l 52(%0), %%d0 \n" /* Get start address */
830 "beq.b 1f \n" /* NULL -> already running */
831 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
832 "jmp (%%a2) \n" /* Start the thread */
834 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
835 "move.l %%d0, %%macsr \n"
836 : : "a" (addr
) : "d0" /* only! */
840 /*---------------------------------------------------------------------------
841 * Put core in a power-saving state if waking list wasn't repopulated.
842 *---------------------------------------------------------------------------
844 static inline void core_sleep(void)
846 /* Supervisor mode, interrupts enabled upon wakeup */
847 asm volatile ("stop #0x2000");
850 #elif CONFIG_CPU == SH7034
851 /*---------------------------------------------------------------------------
852 * Start the thread running and terminate it if it returns
853 *---------------------------------------------------------------------------
855 void start_thread(void); /* Provide C access to ASM label */
856 static void __start_thread(void) __attribute__((used
));
857 static void __start_thread(void)
861 "_start_thread: \n" /* Start here - no naked attribute */
862 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
863 "mov.l @(28, r8), r15 \n" /* Set initial sp */
864 "mov #0, r1 \n" /* Start the thread */
866 "mov.l r1, @(36, r8) \n" /* Clear start address */
867 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
871 ".long _remove_thread \n"
875 /* Place context pointer in r8 slot, function pointer in r9 slot, and
876 * start_thread pointer in context_start */
877 #define THREAD_STARTUP_INIT(core, thread, function) \
878 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
879 (thread)->context.r[1] = (unsigned int)(function), \
880 (thread)->context.start = (void*)start_thread; })
882 /*---------------------------------------------------------------------------
883 * Store non-volatile context.
884 *---------------------------------------------------------------------------
886 static inline void store_context(void* addr
)
889 "add #36, %0 \n" /* Start at last reg. By the time routine */
890 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
903 /*---------------------------------------------------------------------------
904 * Load non-volatile context.
905 *---------------------------------------------------------------------------
907 static inline void load_context(const void* addr
)
910 "mov.l @(36, %0), r0 \n" /* Get start address */
912 "bt .running \n" /* NULL -> already running */
913 "jmp @r0 \n" /* r8 = context */
915 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
924 : : "r" (addr
) : "r0" /* only! */
928 /*---------------------------------------------------------------------------
929 * Put core in a power-saving state if waking list wasn't repopulated.
930 *---------------------------------------------------------------------------
932 static inline void core_sleep(void)
935 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
936 "mov #0, r1 \n" /* Enable interrupts */
937 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
938 "sleep \n" /* Execute standby */
939 : : "z"(&SBYCR
-GBR
) : "r1");
942 #endif /* CONFIG_CPU == */
945 * End Processor-specific section
946 ***************************************************************************/
948 #if THREAD_EXTRA_CHECKS
949 static void thread_panicf(const char *msg
, struct thread_entry
*thread
)
952 const unsigned int core
= thread
->core
;
954 static char name
[32];
955 thread_get_name(name
, 32, thread
);
956 panicf ("%s %s" IF_COP(" (%d)"), msg
, name
IF_COP(, core
));
958 static void thread_stkov(struct thread_entry
*thread
)
960 thread_panicf("Stkov", thread
);
962 #define THREAD_PANICF(msg, thread) \
963 thread_panicf(msg, thread)
964 #define THREAD_ASSERT(exp, msg, thread) \
965 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
967 static void thread_stkov(struct thread_entry
*thread
)
970 const unsigned int core
= thread
->core
;
972 static char name
[32];
973 thread_get_name(name
, 32, thread
);
974 panicf("Stkov %s" IF_COP(" (%d)"), name
IF_COP(, core
));
976 #define THREAD_PANICF(msg, thread)
977 #define THREAD_ASSERT(exp, msg, thread)
978 #endif /* THREAD_EXTRA_CHECKS */
980 /*---------------------------------------------------------------------------
981 * Lock a list pointer and returns its value
982 *---------------------------------------------------------------------------
984 #if CONFIG_CORELOCK == SW_CORELOCK
985 /* Separate locking function versions */
988 #define GET_THREAD_STATE(thread) \
989 ({ corelock_lock(&(thread)->cl); (thread)->state; })
990 #define TRY_GET_THREAD_STATE(thread) \
991 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
992 #define UNLOCK_THREAD(thread, state) \
993 ({ corelock_unlock(&(thread)->cl); })
994 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
995 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
998 #define LOCK_LIST(tqp) \
999 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
1000 #define UNLOCK_LIST(tqp, mod) \
1001 ({ corelock_unlock(&(tqp)->cl); })
1002 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1003 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
1005 /* Select the queue pointer directly */
1006 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1007 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1008 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1009 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1011 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1012 /* Native swap/exchange versions */
1014 /* Thread locking */
1015 #define GET_THREAD_STATE(thread) \
1017 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
1019 #define TRY_GET_THREAD_STATE(thread) \
1020 ({ xchg8(&(thread)->state, STATE_BUSY); })
1021 #define UNLOCK_THREAD(thread, _state) \
1022 ({ (thread)->state = (_state); })
1023 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1024 ({ (thread)->state = (_state); })
1027 #define LOCK_LIST(tqp) \
1028 ({ struct thread_entry *_l; \
1029 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
1031 #define UNLOCK_LIST(tqp, mod) \
1032 ({ (tqp)->queue = (mod); })
1033 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1034 ({ (tqp)->queue = (mod); })
1036 /* Select the local queue pointer copy returned from LOCK_LIST */
1037 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1038 ({ add_to_list_l(&(tc), (thread)); })
1039 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1040 ({ remove_from_list_l(&(tc), (thread)); })
1043 /* Single-core/non-locked versions */
1046 #define GET_THREAD_STATE(thread) \
1047 ({ (thread)->state; })
1048 #define UNLOCK_THREAD(thread, _state)
1049 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1050 ({ (thread)->state = (_state); })
1053 #define LOCK_LIST(tqp) \
1055 #define UNLOCK_LIST(tqp, mod)
1056 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1057 ({ (tqp)->queue = (mod); })
1059 /* Select the queue pointer directly */
1060 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1061 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1062 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1063 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1065 #endif /* locking selection */
1067 #if THREAD_EXTRA_CHECKS
1068 /*---------------------------------------------------------------------------
1069 * Lock the thread slot to obtain the state and then unlock it. Waits for
1070 * it not to be busy. Used for debugging.
1071 *---------------------------------------------------------------------------
1073 static unsigned peek_thread_state(struct thread_entry
*thread
)
1075 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
1076 unsigned state
= GET_THREAD_STATE(thread
);
1077 UNLOCK_THREAD(thread
, state
);
1078 set_irq_level(oldlevel
);
1081 #endif /* THREAD_EXTRA_CHECKS */
1083 /*---------------------------------------------------------------------------
1084 * Adds a thread to a list of threads using "intert last". Uses the "l"
1086 *---------------------------------------------------------------------------
1088 static void add_to_list_l(struct thread_entry
**list
,
1089 struct thread_entry
*thread
)
1091 struct thread_entry
*l
= *list
;
1095 /* Insert into unoccupied list */
1096 thread
->l
.next
= thread
;
1097 thread
->l
.prev
= thread
;
1104 thread
->l
.prev
= l
->l
.prev
;
1105 thread
->l
.prev
->l
.next
= thread
;
1109 thread->l.next = l->l.next;
1111 thread->l.next->l.prev = thread;
1116 /*---------------------------------------------------------------------------
1117 * Locks a list, adds the thread entry and unlocks the list on multicore.
1118 * Defined as add_to_list_l on single-core.
1119 *---------------------------------------------------------------------------
1122 static void add_to_list_l_locked(struct thread_queue
*tq
,
1123 struct thread_entry
*thread
)
1125 struct thread_entry
*t
= LOCK_LIST(tq
);
1126 ADD_TO_LIST_L_SELECT(t
, tq
, thread
);
1131 #define add_to_list_l_locked(tq, thread) \
1132 add_to_list_l(&(tq)->queue, (thread))
1135 /*---------------------------------------------------------------------------
1136 * Removes a thread from a list of threads. Uses the "l" links.
1137 *---------------------------------------------------------------------------
1139 static void remove_from_list_l(struct thread_entry
**list
,
1140 struct thread_entry
*thread
)
1142 struct thread_entry
*prev
, *next
;
1144 next
= thread
->l
.next
;
1153 if (thread
== *list
)
1155 /* List becomes next item */
1159 prev
= thread
->l
.prev
;
1161 /* Fix links to jump over the removed entry. */
1162 prev
->l
.next
= next
;
1163 next
->l
.prev
= prev
;
1166 /*---------------------------------------------------------------------------
1167 * Locks a list, removes the thread entry and unlocks the list on multicore.
1168 * Defined as remove_from_list_l on single-core.
1169 *---------------------------------------------------------------------------
1172 static void remove_from_list_l_locked(struct thread_queue
*tq
,
1173 struct thread_entry
*thread
)
1175 struct thread_entry
*t
= LOCK_LIST(tq
);
1176 REMOVE_FROM_LIST_L_SELECT(t
, tq
, thread
);
1181 #define remove_from_list_l_locked(tq, thread) \
1182 remove_from_list_l(&(tq)->queue, (thread))
1185 /*---------------------------------------------------------------------------
1186 * Add a thread from the core's timout list by linking the pointers in its
1188 *---------------------------------------------------------------------------
1190 static void add_to_list_tmo(struct thread_entry
*thread
)
1193 struct thread_entry
*t
= cores
[IF_COP_CORE(thread
->core
)].timeout
;
1195 thread
->tmo
.prev
= thread
;
1196 thread
->tmo
.next
= t
;
1200 /* Fix second item's prev pointer to point to this thread */
1201 t
->tmo
.prev
= thread
;
1204 cores
[IF_COP_CORE(thread
->core
)].timeout
= thread
;
1207 /*---------------------------------------------------------------------------
1208 * Remove a thread from the core's timout list by unlinking the pointers in
1209 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1211 *---------------------------------------------------------------------------
1213 static void remove_from_list_tmo(struct thread_entry
*thread
)
1215 struct thread_entry
*next
= thread
->tmo
.next
;
1216 struct thread_entry
*prev
;
1218 if (thread
== cores
[IF_COP_CORE(thread
->core
)].timeout
)
1220 /* Next item becomes list head */
1221 cores
[IF_COP_CORE(thread
->core
)].timeout
= next
;
1225 /* Fix new list head's prev to point to itself. */
1226 next
->tmo
.prev
= next
;
1229 thread
->tmo
.prev
= NULL
;
1233 prev
= thread
->tmo
.prev
;
1237 next
->tmo
.prev
= prev
;
1240 prev
->tmo
.next
= next
;
1241 thread
->tmo
.prev
= NULL
;
1244 /*---------------------------------------------------------------------------
1245 * Schedules a thread wakeup on the specified core. Threads will be made
1246 * ready to run when the next task switch occurs. Note that this does not
1247 * introduce an on-core delay since the soonest the next thread may run is
1248 * no sooner than that. Other cores and on-core interrupts may only ever
1250 *---------------------------------------------------------------------------
1252 static void core_schedule_wakeup(struct thread_entry
*thread
)
1254 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
1255 const unsigned int core
= IF_COP_CORE(thread
->core
);
1256 add_to_list_l_locked(&cores
[core
].waking
, thread
);
1258 if (core
!= CURRENT_CORE
)
1263 set_irq_level(oldlevel
);
1266 /*---------------------------------------------------------------------------
1267 * If the waking list was populated, move all threads on it onto the running
1268 * list so they may be run ASAP.
1269 *---------------------------------------------------------------------------
1271 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core
))
1273 struct thread_entry
*w
= LOCK_LIST(&cores
[IF_COP_CORE(core
)].waking
);
1274 struct thread_entry
*r
= cores
[IF_COP_CORE(core
)].running
;
1276 /* Tranfer all threads on waking list to running list in one
1280 /* Place waking threads at the end of the running list. */
1281 struct thread_entry
*tmp
;
1282 w
->l
.prev
->l
.next
= r
;
1283 r
->l
.prev
->l
.next
= w
;
1285 r
->l
.prev
= w
->l
.prev
;
1290 /* Just transfer the list as-is */
1291 cores
[IF_COP_CORE(core
)].running
= w
;
1293 /* Just leave any timeout threads on the timeout list. If a timeout check
1294 * is due, they will be removed there. If they do a timeout again before
1295 * being removed, they will just stay on the list with a new expiration
1298 /* Waking list is clear - NULL and unlock it */
1299 UNLOCK_LIST_SET_PTR(&cores
[IF_COP_CORE(core
)].waking
, NULL
);
1302 /*---------------------------------------------------------------------------
1303 * Check the core's timeout list when at least one thread is due to wake.
1304 * Filtering for the condition is done before making the call. Resets the
1305 * tick when the next check will occur.
1306 *---------------------------------------------------------------------------
1308 static void check_tmo_threads(void)
1310 const unsigned int core
= CURRENT_CORE
;
1311 const long tick
= current_tick
; /* snapshot the current tick */
1312 long next_tmo_check
= tick
+ 60*HZ
; /* minimum duration: once/minute */
1313 struct thread_entry
*next
= cores
[core
].timeout
;
1315 /* If there are no processes waiting for a timeout, just keep the check
1316 tick from falling into the past. */
1319 /* Check sleeping threads. */
1322 /* Must make sure noone else is examining the state, wait until
1323 slot is no longer busy */
1324 struct thread_entry
*curr
= next
;
1325 next
= curr
->tmo
.next
;
1327 unsigned state
= GET_THREAD_STATE(curr
);
1329 if (state
< TIMEOUT_STATE_FIRST
)
1331 /* Cleanup threads no longer on a timeout but still on the
1333 remove_from_list_tmo(curr
);
1334 UNLOCK_THREAD(curr
, state
); /* Unlock thread slot */
1336 else if (TIME_BEFORE(tick
, curr
->tmo_tick
))
1338 /* Timeout still pending - this will be the usual case */
1339 if (TIME_BEFORE(curr
->tmo_tick
, next_tmo_check
))
1341 /* Earliest timeout found so far - move the next check up
1343 next_tmo_check
= curr
->tmo_tick
;
1345 UNLOCK_THREAD(curr
, state
); /* Unlock thread slot */
1349 /* Sleep timeout has been reached so bring the thread back to
1351 if (state
== STATE_BLOCKED_W_TMO
)
1353 remove_from_list_l_locked(curr
->bqp
, curr
);
1356 remove_from_list_tmo(curr
);
1357 add_to_list_l(&cores
[core
].running
, curr
);
1358 UNLOCK_THREAD_SET_STATE(curr
, STATE_RUNNING
);
1361 /* Break the loop once we have walked through the list of all
1362 * sleeping processes or have removed them all. */
1364 while (next
!= NULL
);
1367 cores
[core
].next_tmo_check
= next_tmo_check
;
1370 /*---------------------------------------------------------------------------
1371 * Performs operations that must be done before blocking a thread but after
1372 * the state is saved - follows reverse of locking order. blk_ops.flags is
1373 * assumed to be nonzero.
1374 *---------------------------------------------------------------------------
1377 static inline void run_blocking_ops(
1378 unsigned int core
, struct thread_entry
*thread
)
1380 struct thread_blk_ops
*ops
= &cores
[IF_COP_CORE(core
)].blk_ops
;
1381 const unsigned flags
= ops
->flags
;
1386 if (flags
& TBOP_SWITCH_CORE
)
1388 core_switch_blk_op(core
, thread
);
1391 #if CONFIG_CORELOCK == SW_CORELOCK
1392 if (flags
& TBOP_UNLOCK_LIST
)
1394 UNLOCK_LIST(ops
->list_p
, NULL
);
1397 if (flags
& TBOP_UNLOCK_CORELOCK
)
1399 corelock_unlock(ops
->cl_p
);
1402 if (flags
& TBOP_UNLOCK_THREAD
)
1404 UNLOCK_THREAD(ops
->thread
, 0);
1406 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1407 /* Write updated variable value into memory location */
1408 switch (flags
& TBOP_VAR_TYPE_MASK
)
1410 case TBOP_UNLOCK_LIST
:
1411 UNLOCK_LIST(ops
->list_p
, ops
->list_v
);
1414 *ops
->var_ip
= ops
->var_iv
;
1416 case TBOP_SET_VARu8
:
1417 *ops
->var_u8p
= ops
->var_u8v
;
1420 #endif /* CONFIG_CORELOCK == */
1422 /* Unlock thread's slot */
1423 if (flags
& TBOP_UNLOCK_CURRENT
)
1425 UNLOCK_THREAD(thread
, ops
->state
);
1430 #endif /* NUM_CORES > 1 */
1433 /*---------------------------------------------------------------------------
1434 * Runs any operations that may cause threads to be ready to run and then
1435 * sleeps the processor core until the next interrupt if none are.
1436 *---------------------------------------------------------------------------
1438 static inline struct thread_entry
* sleep_core(IF_COP_VOID(unsigned int core
))
1442 set_irq_level(HIGHEST_IRQ_LEVEL
);
1443 /* We want to do these ASAP as it may change the decision to sleep
1444 * the core or a core has woken because an interrupt occurred
1445 * and posted a message to a queue. */
1446 if (cores
[IF_COP_CORE(core
)].waking
.queue
!= NULL
)
1448 core_perform_wakeup(IF_COP(core
));
1451 /* If there are threads on a timeout and the earliest wakeup is due,
1452 * check the list and wake any threads that need to start running
1454 if (!TIME_BEFORE(current_tick
, cores
[IF_COP_CORE(core
)].next_tmo_check
))
1456 check_tmo_threads();
1459 /* If there is a ready to run task, return its ID and keep core
1461 if (cores
[IF_COP_CORE(core
)].running
== NULL
)
1463 /* Enter sleep mode to reduce power usage - woken up on interrupt
1464 * or wakeup request from another core - expected to enable all
1466 core_sleep(IF_COP(core
));
1471 return cores
[IF_COP_CORE(core
)].running
;
1476 void profile_thread(void)
1478 profstart(cores
[CURRENT_CORE
].running
- threads
);
1482 /*---------------------------------------------------------------------------
1483 * Prepares a thread to block on an object's list and/or for a specified
1484 * duration - expects object and slot to be appropriately locked if needed.
1485 *---------------------------------------------------------------------------
1487 static inline void _block_thread_on_l(struct thread_queue
*list
,
1488 struct thread_entry
*thread
,
1490 IF_SWCL(, const bool nolock
))
1492 /* If inlined, unreachable branches will be pruned with no size penalty
1493 because constant params are used for state and nolock. */
1494 const unsigned int core
= IF_COP_CORE(thread
->core
);
1496 /* Remove the thread from the list of running threads. */
1497 remove_from_list_l(&cores
[core
].running
, thread
);
1499 /* Add a timeout to the block if not infinite */
1503 /* Put the thread into a new list of inactive threads. */
1504 #if CONFIG_CORELOCK == SW_CORELOCK
1507 thread
->bqp
= NULL
; /* Indicate nolock list */
1508 thread
->bqnlp
= (struct thread_entry
**)list
;
1509 add_to_list_l((struct thread_entry
**)list
, thread
);
1515 add_to_list_l_locked(list
, thread
);
1518 case STATE_BLOCKED_W_TMO
:
1519 /* Put the thread into a new list of inactive threads. */
1520 #if CONFIG_CORELOCK == SW_CORELOCK
1523 thread
->bqp
= NULL
; /* Indicate nolock list */
1524 thread
->bqnlp
= (struct thread_entry
**)list
;
1525 add_to_list_l((struct thread_entry
**)list
, thread
);
1531 add_to_list_l_locked(list
, thread
);
1534 case STATE_SLEEPING
:
1535 /* If this thread times out sooner than any other thread, update
1536 next_tmo_check to its timeout */
1537 if (TIME_BEFORE(thread
->tmo_tick
, cores
[core
].next_tmo_check
))
1539 cores
[core
].next_tmo_check
= thread
->tmo_tick
;
1542 if (thread
->tmo
.prev
== NULL
)
1544 add_to_list_tmo(thread
);
1546 /* else thread was never removed from list - just keep it there */
1550 #ifdef HAVE_PRIORITY_SCHEDULING
1551 /* Reset priorities */
1552 if (thread
->priority
== cores
[core
].highest_priority
)
1553 cores
[core
].highest_priority
= LOWEST_PRIORITY
;
1556 #if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
1557 /* Safe to set state now */
1558 thread
->state
= state
;
1559 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1560 cores
[core
].blk_ops
.state
= state
;
1564 /* Delay slot unlock until task switch */
1565 cores
[core
].blk_ops
.flags
|= TBOP_UNLOCK_CURRENT
;
1569 static inline void block_thread_on_l(
1570 struct thread_queue
*list
, struct thread_entry
*thread
, unsigned state
)
1572 _block_thread_on_l(list
, thread
, state
IF_SWCL(, false));
1575 static inline void block_thread_on_l_no_listlock(
1576 struct thread_entry
**list
, struct thread_entry
*thread
, unsigned state
)
1578 _block_thread_on_l((struct thread_queue
*)list
, thread
, state
IF_SWCL(, true));
1581 /*---------------------------------------------------------------------------
1582 * Switch thread in round robin fashion for any given priority. Any thread
1583 * that removed itself from the running list first must specify itself in
1586 * INTERNAL: Intended for use by kernel and not for programs.
1587 *---------------------------------------------------------------------------
1589 void switch_thread(struct thread_entry
*old
)
1591 const unsigned int core
= CURRENT_CORE
;
1592 struct thread_entry
*thread
= cores
[core
].running
;
1596 /* Move to next thread */
1598 cores
[core
].running
= old
->l
.next
;
1600 /* else running list is already at next thread */
1603 profile_thread_stopped(old
- threads
);
1606 /* Begin task switching by saving our current context so that we can
1607 * restore the state of the current thread later to the point prior
1609 store_context(&old
->context
);
1611 /* Check if the current thread stack is overflown */
1612 if(((unsigned int *)old
->stack
)[0] != DEADBEEF
)
1616 /* Run any blocking operations requested before switching/sleeping */
1617 run_blocking_ops(core
, old
);
1620 /* Go through the list of sleeping task to check if we need to wake up
1621 * any of them due to timeout. Also puts core into sleep state until
1622 * there is at least one running process again. */
1623 thread
= sleep_core(IF_COP(core
));
1625 #ifdef HAVE_PRIORITY_SCHEDULING
1626 /* Select the new task based on priorities and the last time a process
1630 int priority
= thread
->priority
;
1632 if (priority
< cores
[core
].highest_priority
)
1633 cores
[core
].highest_priority
= priority
;
1635 if (priority
== cores
[core
].highest_priority
||
1636 thread
->priority_x
< cores
[core
].highest_priority
||
1637 (current_tick
- thread
->last_run
> priority
* 8))
1639 cores
[core
].running
= thread
;
1643 thread
= thread
->l
.next
;
1646 /* Reset the value of thread's last running time to the current time. */
1647 thread
->last_run
= current_tick
;
1648 #endif /* HAVE_PRIORITY_SCHEDULING */
1650 /* And finally give control to the next thread. */
1651 load_context(&thread
->context
);
1654 profile_thread_started(thread
- threads
);
1658 /*---------------------------------------------------------------------------
1659 * Change the boost state of a thread boosting or unboosting the CPU
1660 * as required. Require thread slot to be locked first.
1661 *---------------------------------------------------------------------------
1663 static inline void boost_thread(struct thread_entry
*thread
, bool boost
)
1665 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1666 if ((thread
->boosted
!= 0) != boost
)
1668 thread
->boosted
= boost
;
1672 (void)thread
; (void)boost
;
1675 /*---------------------------------------------------------------------------
1676 * Sleeps a thread for a specified number of ticks and unboost the thread if
1677 * if it is boosted. If ticks is zero, it does not delay but instead switches
1680 * INTERNAL: Intended for use by kernel and not for programs.
1681 *---------------------------------------------------------------------------
1683 void sleep_thread(int ticks
)
1685 /* Get the entry for the current running thread. */
1686 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1689 /* Lock thread slot */
1690 GET_THREAD_STATE(current
);
1693 /* Set our timeout, change lists, and finally switch threads.
1694 * Unlock during switch on mulicore. */
1695 current
->tmo_tick
= current_tick
+ ticks
+ 1;
1696 block_thread_on_l(NULL
, current
, STATE_SLEEPING
);
1697 switch_thread(current
);
1699 /* Our status should be STATE_RUNNING */
1700 THREAD_ASSERT(peek_thread_state(current
) == STATE_RUNNING
,
1701 "S:R->!*R", current
);
1704 /*---------------------------------------------------------------------------
1705 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1706 * Caller with interrupt-accessible lists should disable interrupts first
1707 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1709 * INTERNAL: Intended for use by kernel objects and not for programs.
1710 *---------------------------------------------------------------------------
1712 IF_SWCL(static inline) void _block_thread(struct thread_queue
*list
1713 IF_SWCL(, const bool nolock
))
1715 /* Get the entry for the current running thread. */
1716 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1718 /* Set the state to blocked and ask the scheduler to switch tasks,
1719 * this takes us off of the run queue until we are explicitly woken */
1722 /* Lock thread slot */
1723 GET_THREAD_STATE(current
);
1726 #if CONFIG_CORELOCK == SW_CORELOCK
1727 /* One branch optimized away during inlining */
1730 block_thread_on_l_no_listlock((struct thread_entry
**)list
,
1731 current
, STATE_BLOCKED
);
1736 block_thread_on_l(list
, current
, STATE_BLOCKED
);
1739 switch_thread(current
);
1741 /* Our status should be STATE_RUNNING */
1742 THREAD_ASSERT(peek_thread_state(current
) == STATE_RUNNING
,
1743 "B:R->!*R", current
);
1746 #if CONFIG_CORELOCK == SW_CORELOCK
1747 /* Inline lock/nolock version of _block_thread into these functions */
1748 void block_thread(struct thread_queue
*tq
)
1750 _block_thread(tq
, false);
1753 void block_thread_no_listlock(struct thread_entry
**list
)
1755 _block_thread((struct thread_queue
*)list
, true);
1757 #endif /* CONFIG_CORELOCK */
1759 /*---------------------------------------------------------------------------
1760 * Block a thread on a blocking queue for a specified time interval or until
1761 * explicitly woken - whichever happens first.
1762 * Caller with interrupt-accessible lists should disable interrupts first
1763 * and request that interrupt level be restored after switching out the
1766 * INTERNAL: Intended for use by kernel objects and not for programs.
1767 *---------------------------------------------------------------------------
1769 void block_thread_w_tmo(struct thread_queue
*list
, int timeout
)
1771 /* Get the entry for the current running thread. */
1772 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1775 /* Lock thread slot */
1776 GET_THREAD_STATE(current
);
1779 /* Set the state to blocked with the specified timeout */
1780 current
->tmo_tick
= current_tick
+ timeout
;
1781 /* Set the list for explicit wakeup */
1782 block_thread_on_l(list
, current
, STATE_BLOCKED_W_TMO
);
1784 /* Now force a task switch and block until we have been woken up
1785 * by another thread or timeout is reached - whichever happens first */
1786 switch_thread(current
);
1788 /* Our status should be STATE_RUNNING */
1789 THREAD_ASSERT(peek_thread_state(current
) == STATE_RUNNING
,
1790 "T:R->!*R", current
);
1793 /*---------------------------------------------------------------------------
1794 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
1795 * that called sleep().
1796 * Caller with interrupt-accessible lists should disable interrupts first.
1797 * This code should be considered a critical section by the caller.
1799 * INTERNAL: Intended for use by kernel objects and not for programs.
1800 *---------------------------------------------------------------------------
1802 IF_SWCL(static inline) struct thread_entry
* _wakeup_thread(
1803 struct thread_queue
*list
IF_SWCL(, const bool nolock
))
1805 struct thread_entry
*t
;
1806 struct thread_entry
*thread
;
1809 /* Wake up the last thread first. */
1810 #if CONFIG_CORELOCK == SW_CORELOCK
1811 /* One branch optimized away during inlining */
1819 t
= LOCK_LIST(list
);
1822 /* Check if there is a blocked thread at all. */
1825 #if CONFIG_CORELOCK == SW_CORELOCK
1829 UNLOCK_LIST(list
, NULL
);
1837 #if CONFIG_CORELOCK == SW_CORELOCK
1840 /* Lock thread only, not list */
1841 state
= GET_THREAD_STATE(thread
);
1846 /* This locks in reverse order from other routines so a retry in the
1847 correct order may be needed */
1848 state
= TRY_GET_THREAD_STATE(thread
);
1849 if (state
== STATE_BUSY
)
1851 /* Unlock list and retry slot, then list */
1852 UNLOCK_LIST(list
, t
);
1853 state
= GET_THREAD_STATE(thread
);
1854 t
= LOCK_LIST(list
);
1855 /* Be sure thread still exists here - it couldn't have re-added
1856 itself if it was woken elsewhere because this function is
1857 serialized within the object that owns the list. */
1860 /* Thread disappeared :( */
1861 UNLOCK_LIST(list
, t
);
1862 UNLOCK_THREAD(thread
, state
);
1863 return THREAD_WAKEUP_MISSING
; /* Indicate disappearance */
1867 #else /* NUM_CORES == 1 */
1868 state
= GET_THREAD_STATE(thread
);
1869 #endif /* NUM_CORES */
1871 /* Determine thread's current state. */
1875 case STATE_BLOCKED_W_TMO
:
1876 /* Remove thread from object's blocked list - select t or list depending
1877 on locking type at compile time */
1878 REMOVE_FROM_LIST_L_SELECT(t
, list
, thread
);
1879 #if CONFIG_CORELOCK == SW_CORELOCK
1880 /* Statment optimized away during inlining if nolock != false */
1884 UNLOCK_LIST(list
, t
); /* Unlock list - removal complete */
1887 #ifdef HAVE_PRIORITY_SCHEDULING
1888 /* Give the task a kick to avoid a stall after wakeup.
1889 Not really proper treatment - TODO later. */
1890 thread
->last_run
= current_tick
- 8*LOWEST_PRIORITY
;
1892 core_schedule_wakeup(thread
);
1893 UNLOCK_THREAD_SET_STATE(thread
, STATE_RUNNING
);
1896 /* Nothing to do. State is not blocked. */
1897 #if THREAD_EXTRA_CHECKS
1898 THREAD_PANICF("wakeup_thread->block invalid", thread
);
1902 #if CONFIG_CORELOCK == SW_CORELOCK
1903 /* Statement optimized away during inlining if nolock != false */
1907 UNLOCK_LIST(list
, t
); /* Unlock the object's list */
1909 UNLOCK_THREAD(thread
, state
); /* Unlock thread slot */
1914 #if CONFIG_CORELOCK == SW_CORELOCK
1915 /* Inline lock/nolock version of _wakeup_thread into these functions */
1916 struct thread_entry
* wakeup_thread(struct thread_queue
*tq
)
1918 return _wakeup_thread(tq
, false);
1921 struct thread_entry
* wakeup_thread_no_listlock(struct thread_entry
**list
)
1923 return _wakeup_thread((struct thread_queue
*)list
, true);
1925 #endif /* CONFIG_CORELOCK */
1927 /*---------------------------------------------------------------------------
1928 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1929 * will be locked on multicore.
1930 *---------------------------------------------------------------------------
1932 static int find_empty_thread_slot(void)
1935 /* Any slot could be on an IRQ-accessible list */
1936 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
1938 /* Thread slots are not locked on single core */
1942 for (n
= 0; n
< MAXTHREADS
; n
++)
1944 /* Obtain current slot state - lock it on multicore */
1945 unsigned state
= GET_THREAD_STATE(&threads
[n
]);
1947 if (state
== STATE_KILLED
1949 && threads
[n
].name
!= THREAD_DESTRUCT
1953 /* Slot is empty - leave it locked and caller will unlock */
1957 /* Finished examining slot - no longer busy - unlock on multicore */
1958 UNLOCK_THREAD(&threads
[n
], state
);
1962 set_irq_level(oldlevel
); /* Reenable interrups - this slot is
1963 not accesible to them yet */
1970 /*---------------------------------------------------------------------------
1971 * Place the current core in idle mode - woken up on interrupt or wake
1972 * request from another core.
1973 *---------------------------------------------------------------------------
1975 void core_idle(void)
1978 const unsigned int core
= CURRENT_CORE
;
1980 set_irq_level(HIGHEST_IRQ_LEVEL
);
1981 core_sleep(IF_COP(core
));
1984 /*---------------------------------------------------------------------------
1986 * If using a dual core architecture, specify which core to start the thread
1987 * on, and whether to fall back to the other core if it can't be created
1988 * Return ID if context area could be allocated, else NULL.
1989 *---------------------------------------------------------------------------
1991 struct thread_entry
*
1992 create_thread(void (*function
)(void), void* stack
, int stack_size
,
1993 unsigned flags
, const char *name
1994 IF_PRIO(, int priority
)
1995 IF_COP(, unsigned int core
))
1998 unsigned int stacklen
;
1999 unsigned int *stackptr
;
2001 struct thread_entry
*thread
;
2004 slot
= find_empty_thread_slot();
2005 if (slot
>= MAXTHREADS
)
2010 /* Munge the stack to make it easy to spot stack overflows */
2011 stacklen
= stack_size
/ sizeof(int);
2013 for(i
= 0;i
< stacklen
;i
++)
2015 stackptr
[i
] = DEADBEEF
;
2018 /* Store interesting information */
2019 thread
= &threads
[slot
];
2020 thread
->name
= name
;
2021 thread
->stack
= stack
;
2022 thread
->stack_size
= stack_size
;
2024 #if CONFIG_CORELOCK == SW_CORELOCK
2025 thread
->bqnlp
= NULL
;
2027 thread
->queue
= NULL
;
2028 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2029 thread
->boosted
= 0;
2031 #ifdef HAVE_PRIORITY_SCHEDULING
2032 thread
->priority_x
= LOWEST_PRIORITY
;
2033 thread
->priority
= priority
;
2034 thread
->last_run
= current_tick
- priority
* 8;
2035 cores
[IF_COP_CORE(core
)].highest_priority
= LOWEST_PRIORITY
;
2039 thread
->core
= core
;
2041 /* Writeback stack munging or anything else before starting */
2042 if (core
!= CURRENT_CORE
)
2048 /* Thread is not on any timeout list but be a bit paranoid */
2049 thread
->tmo
.prev
= NULL
;
2051 state
= (flags
& CREATE_THREAD_FROZEN
) ?
2052 STATE_FROZEN
: STATE_RUNNING
;
2054 /* Align stack to an even 32 bit boundary */
2055 thread
->context
.sp
= (void*)(((unsigned int)stack
+ stack_size
) & ~3);
2057 /* Load the thread's context structure with needed startup information */
2058 THREAD_STARTUP_INIT(core
, thread
, function
);
2060 if (state
== STATE_RUNNING
)
2063 if (core
!= CURRENT_CORE
)
2065 /* Next task switch on other core moves thread to running list */
2066 core_schedule_wakeup(thread
);
2071 /* Place on running list immediately */
2072 add_to_list_l(&cores
[IF_COP_CORE(core
)].running
, thread
);
2076 /* remove lock and set state */
2077 UNLOCK_THREAD_SET_STATE(thread
, state
);
2082 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2083 void trigger_cpu_boost(void)
2085 /* No IRQ disable nescessary since the current thread cannot be blocked
2086 on an IRQ-accessible list */
2087 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2090 state
= GET_THREAD_STATE(current
);
2091 boost_thread(current
, true);
2092 UNLOCK_THREAD(current
, state
);
2097 void cancel_cpu_boost(void)
2099 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
2102 state
= GET_THREAD_STATE(current
);
2103 boost_thread(current
, false);
2104 UNLOCK_THREAD(current
, state
);
2108 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
2110 /*---------------------------------------------------------------------------
2111 * Remove a thread from the scheduler.
2112 * Parameter is the ID as returned from create_thread().
2114 * Use with care on threads that are not under careful control as this may
2115 * leave various objects in an undefined state. When trying to kill a thread
2116 * on another processor, be sure you know what it's doing and won't be
2117 * switching around itself.
2118 *---------------------------------------------------------------------------
2120 void remove_thread(struct thread_entry
*thread
)
2123 /* core is not constant here because of core switching */
2124 unsigned int core
= CURRENT_CORE
;
2125 unsigned int old_core
= NUM_CORES
;
2127 const unsigned int core
= CURRENT_CORE
;
2133 thread
= cores
[core
].running
;
2135 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
2136 state
= GET_THREAD_STATE(thread
);
2138 if (state
== STATE_KILLED
)
2144 if (thread
->core
!= core
)
2146 /* Switch cores and safely extract the thread there */
2147 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
2148 condition if the thread runs away to another processor. */
2149 unsigned int new_core
= thread
->core
;
2150 const char *old_name
= thread
->name
;
2152 thread
->name
= THREAD_DESTRUCT
; /* Slot can't be used for now */
2153 UNLOCK_THREAD(thread
, state
);
2154 set_irq_level(oldlevel
);
2156 old_core
= switch_core(new_core
);
2158 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
2159 state
= GET_THREAD_STATE(thread
);
2163 if (state
== STATE_KILLED
)
2165 /* Thread suicided before we could kill it */
2169 /* Reopen slot - it's locked again anyway */
2170 thread
->name
= old_name
;
2172 if (thread
->core
!= core
)
2174 /* We won't play thread tag - just forget it */
2175 UNLOCK_THREAD(thread
, state
);
2176 set_irq_level(oldlevel
);
2177 goto thread_kill_abort
;
2180 /* Perform the extraction and switch ourselves back to the original
2183 #endif /* NUM_CORES > 1 */
2185 #ifdef HAVE_PRIORITY_SCHEDULING
2186 cores
[IF_COP_CORE(core
)].highest_priority
= LOWEST_PRIORITY
;
2188 if (thread
->tmo
.prev
!= NULL
)
2190 /* Clean thread off the timeout list if a timeout check hasn't
2192 remove_from_list_tmo(thread
);
2195 boost_thread(thread
, false);
2197 if (thread
== cores
[core
].running
)
2199 /* Suicide - thread has unconditional rights to do this */
2200 /* Maintain locks until switch-out */
2201 block_thread_on_l(NULL
, thread
, STATE_KILLED
);
2204 /* Switch to the idle stack if not on the main core (where "main"
2208 switch_to_idle_stack(core
);
2213 /* Signal this thread */
2214 thread_queue_wake_no_listlock(&thread
->queue
);
2215 /* Switch tasks and never return */
2216 switch_thread(thread
);
2217 /* This should never and must never be reached - if it is, the
2218 * state is corrupted */
2219 THREAD_PANICF("remove_thread->K:*R", thread
);
2223 if (thread
->name
== THREAD_DESTRUCT
)
2225 /* Another core is doing this operation already */
2226 UNLOCK_THREAD(thread
, state
);
2227 set_irq_level(oldlevel
);
2231 if (cores
[core
].waking
.queue
!= NULL
)
2233 /* Get any threads off the waking list and onto the running
2234 * list first - waking and running cannot be distinguished by
2236 core_perform_wakeup(IF_COP(core
));
2242 /* Remove thread from ready to run tasks */
2243 remove_from_list_l(&cores
[core
].running
, thread
);
2246 case STATE_BLOCKED_W_TMO
:
2247 /* Remove thread from the queue it's blocked on - including its
2248 * own if waiting there */
2249 #if CONFIG_CORELOCK == SW_CORELOCK
2250 /* One or the other will be valid */
2251 if (thread
->bqp
== NULL
)
2253 remove_from_list_l(thread
->bqnlp
, thread
);
2256 #endif /* CONFIG_CORELOCK */
2258 remove_from_list_l_locked(thread
->bqp
, thread
);
2261 /* Otherwise thread is killed or is frozen and hasn't run yet */
2264 /* If thread was waiting on itself, it will have been removed above.
2265 * The wrong order would result in waking the thread first and deadlocking
2266 * since the slot is already locked. */
2267 thread_queue_wake_no_listlock(&thread
->queue
);
2269 thread_killed
: /* Thread was already killed */
2270 /* Removal complete - safe to unlock state and reenable interrupts */
2271 UNLOCK_THREAD_SET_STATE(thread
, STATE_KILLED
);
2272 set_irq_level(oldlevel
);
2275 thread_kill_abort
: /* Something stopped us from killing the thread */
2276 if (old_core
< NUM_CORES
)
2278 /* Did a removal on another processor's thread - switch back to
2280 switch_core(old_core
);
2285 /*---------------------------------------------------------------------------
2286 * Block the current thread until another thread terminates. A thread may
2287 * wait on itself to terminate which prevents it from running again and it
2288 * will need to be killed externally.
2289 * Parameter is the ID as returned from create_thread().
2290 *---------------------------------------------------------------------------
2292 void thread_wait(struct thread_entry
*thread
)
2294 const unsigned int core
= CURRENT_CORE
;
2295 struct thread_entry
*current
= cores
[core
].running
;
2296 unsigned thread_state
;
2299 unsigned current_state
;
2306 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
2309 thread_state
= GET_THREAD_STATE(thread
);
2312 /* We can't lock the same slot twice. The waitee will also lock itself
2313 first then the thread slots that will be locked and woken in turn.
2314 The same order must be observed here as well. */
2315 if (thread
== current
)
2317 current_state
= thread_state
;
2321 current_state
= GET_THREAD_STATE(current
);
2325 if (thread_state
!= STATE_KILLED
)
2327 /* Unlock the waitee state at task switch - not done for self-wait
2328 because the would double-unlock the state and potentially
2329 corrupt another's busy assert on the slot */
2330 if (thread
!= current
)
2332 #if CONFIG_CORELOCK == SW_CORELOCK
2333 cores
[core
].blk_ops
.flags
|= TBOP_UNLOCK_THREAD
;
2334 cores
[core
].blk_ops
.thread
= thread
;
2335 #elif CONFIG_CORELOCK == CORELOCK_SWAP
2336 cores
[core
].blk_ops
.flags
|= TBOP_SET_VARu8
;
2337 cores
[core
].blk_ops
.var_u8p
= &thread
->state
;
2338 cores
[core
].blk_ops
.var_u8v
= thread_state
;
2341 block_thread_on_l_no_listlock(&thread
->queue
, current
, STATE_BLOCKED
);
2342 switch_thread(current
);
2346 /* Unlock both slots - obviously the current thread can't have
2347 STATE_KILLED so the above if clause will always catch a thread
2348 waiting on itself */
2350 UNLOCK_THREAD(current
, current_state
);
2351 UNLOCK_THREAD(thread
, thread_state
);
2352 set_irq_level(oldlevel
);
2356 #ifdef HAVE_PRIORITY_SCHEDULING
2357 /*---------------------------------------------------------------------------
2358 * Sets the thread's relative priority for the core it runs on.
2359 *---------------------------------------------------------------------------
2361 int thread_set_priority(struct thread_entry
*thread
, int priority
)
2363 unsigned old_priority
= (unsigned)-1;
2366 thread
= cores
[CURRENT_CORE
].running
;
2369 /* Thread could be on any list and therefore on an interrupt accessible
2370 one - disable interrupts */
2371 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
2373 unsigned state
= GET_THREAD_STATE(thread
);
2375 /* Make sure it's not killed */
2376 if (state
!= STATE_KILLED
)
2378 old_priority
= thread
->priority
;
2379 thread
->priority
= priority
;
2380 cores
[IF_COP_CORE(thread
->core
)].highest_priority
= LOWEST_PRIORITY
;
2384 UNLOCK_THREAD(thread
, state
);
2385 set_irq_level(oldlevel
);
2387 return old_priority
;
2390 /*---------------------------------------------------------------------------
2391 * Returns the current priority for a thread.
2392 *---------------------------------------------------------------------------
2394 int thread_get_priority(struct thread_entry
*thread
)
2396 /* Simple, quick probe. */
2398 thread
= cores
[CURRENT_CORE
].running
;
2400 return (unsigned)thread
->priority
;
2403 /*---------------------------------------------------------------------------
2404 * Yield that guarantees thread execution once per round regardless of
2405 * thread's scheduler priority - basically a transient realtime boost
2406 * without altering the scheduler's thread precedence.
2408 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2409 *---------------------------------------------------------------------------
2411 void priority_yield(void)
2413 const unsigned int core
= CURRENT_CORE
;
2414 struct thread_entry
*thread
= cores
[core
].running
;
2415 thread
->priority_x
= HIGHEST_PRIORITY
;
2416 switch_thread(NULL
);
2417 thread
->priority_x
= LOWEST_PRIORITY
;
2419 #endif /* HAVE_PRIORITY_SCHEDULING */
2421 /* Resumes a frozen thread - similar logic to wakeup_thread except that
2422 the thread is on no scheduler list at all. It exists simply by virtue of
2423 the slot having a state of STATE_FROZEN. */
2424 void thread_thaw(struct thread_entry
*thread
)
2427 /* Thread could be on any list and therefore on an interrupt accessible
2428 one - disable interrupts */
2429 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
2431 unsigned state
= GET_THREAD_STATE(thread
);
2433 if (state
== STATE_FROZEN
)
2435 const unsigned int core
= CURRENT_CORE
;
2437 if (thread
->core
!= core
)
2439 core_schedule_wakeup(thread
);
2444 add_to_list_l(&cores
[core
].running
, thread
);
2447 UNLOCK_THREAD_SET_STATE(thread
, STATE_RUNNING
);
2449 set_irq_level(oldlevel
);
2455 UNLOCK_THREAD(thread
, state
);
2456 set_irq_level(oldlevel
);
2460 /*---------------------------------------------------------------------------
2461 * Return the ID of the currently executing thread.
2462 *---------------------------------------------------------------------------
2464 struct thread_entry
* thread_get_current(void)
2466 return cores
[CURRENT_CORE
].running
;
2470 /*---------------------------------------------------------------------------
2471 * Switch the processor that the currently executing thread runs on.
2472 *---------------------------------------------------------------------------
2474 unsigned int switch_core(unsigned int new_core
)
2476 const unsigned int core
= CURRENT_CORE
;
2477 struct thread_entry
*current
= cores
[core
].running
;
2478 struct thread_entry
*w
;
2481 /* Interrupts can access the lists that will be used - disable them */
2482 unsigned state
= GET_THREAD_STATE(current
);
2484 if (core
== new_core
)
2486 /* No change - just unlock everything and return same core */
2487 UNLOCK_THREAD(current
, state
);
2491 /* Get us off the running list for the current core */
2492 remove_from_list_l(&cores
[core
].running
, current
);
2494 /* Stash return value (old core) in a safe place */
2495 current
->retval
= core
;
2497 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2498 * the other core will likely attempt a removal from the wrong list! */
2499 if (current
->tmo
.prev
!= NULL
)
2501 remove_from_list_tmo(current
);
2504 /* Change the core number for this thread slot */
2505 current
->core
= new_core
;
2507 /* Do not use core_schedule_wakeup here since this will result in
2508 * the thread starting to run on the other core before being finished on
2509 * this one. Delay the wakeup list unlock to keep the other core stuck
2510 * until this thread is ready. */
2511 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
2512 w
= LOCK_LIST(&cores
[new_core
].waking
);
2513 ADD_TO_LIST_L_SELECT(w
, &cores
[new_core
].waking
, current
);
2515 /* Make a callback into device-specific code, unlock the wakeup list so
2516 * that execution may resume on the new core, unlock our slot and finally
2517 * restore the interrupt level */
2518 cores
[core
].blk_ops
.flags
= TBOP_SWITCH_CORE
| TBOP_UNLOCK_CURRENT
|
2520 cores
[core
].blk_ops
.list_p
= &cores
[new_core
].waking
;
2521 #if CONFIG_CORELOCK == CORELOCK_SWAP
2522 cores
[core
].blk_ops
.state
= STATE_RUNNING
;
2523 cores
[core
].blk_ops
.list_v
= w
;
2526 #ifdef HAVE_PRIORITY_SCHEDULING
2527 current
->priority_x
= HIGHEST_PRIORITY
;
2528 cores
[core
].highest_priority
= LOWEST_PRIORITY
;
2530 /* Do the stack switching, cache_maintenence and switch_thread call -
2531 requires native code */
2532 switch_thread_core(core
, current
);
2534 #ifdef HAVE_PRIORITY_SCHEDULING
2535 current
->priority_x
= LOWEST_PRIORITY
;
2536 cores
[current
->core
].highest_priority
= LOWEST_PRIORITY
;
2539 /* Finally return the old core to caller */
2540 return current
->retval
;
2543 #endif /* NUM_CORES > 1 */
2545 /*---------------------------------------------------------------------------
2546 * Initialize threading API. This assumes interrupts are not yet enabled. On
2547 * multicore setups, no core is allowed to proceed until create_thread calls
2548 * are safe to perform.
2549 *---------------------------------------------------------------------------
2551 void init_threads(void)
2553 const unsigned int core
= CURRENT_CORE
;
2554 struct thread_entry
*thread
;
2557 /* CPU will initialize first and then sleep */
2558 slot
= find_empty_thread_slot();
2560 if (slot
>= MAXTHREADS
)
2562 /* WTF? There really must be a slot available at this stage.
2563 * This can fail if, for example, .bss isn't zero'ed out by the loader
2564 * or threads is in the wrong section. */
2565 THREAD_PANICF("init_threads->no slot", NULL
);
2568 /* Initialize initially non-zero members of core */
2569 thread_queue_init(&cores
[core
].waking
);
2570 cores
[core
].next_tmo_check
= current_tick
; /* Something not in the past */
2571 #ifdef HAVE_PRIORITY_SCHEDULING
2572 cores
[core
].highest_priority
= LOWEST_PRIORITY
;
2575 /* Initialize initially non-zero members of slot */
2576 thread
= &threads
[slot
];
2577 thread
->name
= main_thread_name
;
2578 UNLOCK_THREAD_SET_STATE(thread
, STATE_RUNNING
); /* No sync worries yet */
2580 thread
->core
= core
;
2582 #ifdef HAVE_PRIORITY_SCHEDULING
2583 thread
->priority
= PRIORITY_USER_INTERFACE
;
2584 thread
->priority_x
= LOWEST_PRIORITY
;
2586 #if CONFIG_CORELOCK == SW_CORELOCK
2587 corelock_init(&thread
->cl
);
2590 add_to_list_l(&cores
[core
].running
, thread
);
2594 thread
->stack
= stackbegin
;
2595 thread
->stack_size
= (int)stackend
- (int)stackbegin
;
2596 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2597 /* TODO: HAL interface for this */
2598 /* Wake up coprocessor and let it initialize kernel and threads */
2602 COP_CTL
= PROC_WAKE
;
2603 /* Sleep until finished */
2604 CPU_CTL
= PROC_SLEEP
;
2609 /* Initial stack is the COP idle stack */
2610 thread
->stack
= cop_idlestackbegin
;
2611 thread
->stack_size
= IDLE_STACK_SIZE
;
2612 /* Get COP safely primed inside switch_thread where it will remain
2613 * until a thread actually exists on it */
2614 CPU_CTL
= PROC_WAKE
;
2615 remove_thread(NULL
);
2616 #endif /* NUM_CORES */
2620 /*---------------------------------------------------------------------------
2621 * Returns the maximum percentage of stack a thread ever used while running.
2622 * NOTE: Some large buffer allocations that don't use enough the buffer to
2623 * overwrite stackptr[0] will not be seen.
2624 *---------------------------------------------------------------------------
2626 int thread_stack_usage(const struct thread_entry
*thread
)
2628 unsigned int *stackptr
= thread
->stack
;
2629 int stack_words
= thread
->stack_size
/ sizeof (int);
2632 for (i
= 0; i
< stack_words
; i
++)
2634 if (stackptr
[i
] != DEADBEEF
)
2636 usage
= ((stack_words
- i
) * 100) / stack_words
;
2645 /*---------------------------------------------------------------------------
2646 * Returns the maximum percentage of the core's idle stack ever used during
2648 *---------------------------------------------------------------------------
2650 int idle_stack_usage(unsigned int core
)
2652 unsigned int *stackptr
= idle_stacks
[core
];
2655 for (i
= 0; i
< IDLE_STACK_WORDS
; i
++)
2657 if (stackptr
[i
] != DEADBEEF
)
2659 usage
= ((IDLE_STACK_WORDS
- i
) * 100) / IDLE_STACK_WORDS
;
2668 /*---------------------------------------------------------------------------
2669 * Fills in the buffer with the specified thread's name. If the name is NULL,
2670 * empty, or the thread is in destruct state a formatted ID is written
2672 *---------------------------------------------------------------------------
2674 void thread_get_name(char *buffer
, int size
,
2675 struct thread_entry
*thread
)
2684 /* Display thread name if one or ID if none */
2685 const char *name
= thread
->name
;
2686 const char *fmt
= "%s";
2687 if (name
== NULL
IF_COP(|| name
== THREAD_DESTRUCT
) || *name
== '\0')
2689 name
= (const char *)thread
;
2692 snprintf(buffer
, size
, fmt
, name
);