Implement core_sleep() for TCC780x (single core).
[kugel-rb.git] / firmware / thread.c
blob8bebfedbf55a093408c80cd84596b5d19a2a16e6
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
33 #ifdef DEBUG
34 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
35 #else
36 #define THREAD_EXTRA_CHECKS 0
37 #endif
39 /**
40 * General locking order to guarantee progress. Order must be observed but
41 * all stages are not nescessarily obligatory. Going from 1) to 3) is
42 * perfectly legal.
44 * 1) IRQ
45 * This is first because of the likelyhood of having an interrupt occur that
46 * also accesses one of the objects farther down the list. Any non-blocking
47 * synchronization done may already have a lock on something during normal
48 * execution and if an interrupt handler running on the same processor as
49 * the one that has the resource locked were to attempt to access the
50 * resource, the interrupt handler would wait forever waiting for an unlock
51 * that will never happen. There is no danger if the interrupt occurs on
52 * a different processor because the one that has the lock will eventually
53 * unlock and the other processor's handler may proceed at that time. Not
54 * nescessary when the resource in question is definitely not available to
55 * interrupt handlers.
57 * 2) Kernel Object
58 * 1) May be needed beforehand if the kernel object allows dual-use such as
59 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
66 * 3) Thread Slot
67 * This locks access to the thread's slot such that its state cannot be
68 * altered by another processor when a state change is in progress such as
69 * when it is in the process of going on a blocked list. An attempt to wake
70 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state.
73 * 4) Lists
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
82 * 5) Core Lists
83 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an
85 * operation may only be performed by the thread's own core in a normal
86 * execution context. The wakeup list is the prime example where a thread
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
91 #define DEADBEEF ((unsigned int)0xdeadbeef)
92 /* Cast to the the machine int type, whose size could be < 4. */
93 struct core_entry cores[NUM_CORES] IBSS_ATTR;
94 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
96 static const char main_thread_name[] = "main";
97 extern int stackbegin[];
98 extern int stackend[];
100 /* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
101 * never results in requiring a wait until the next tick (up to 10000uS!). May
102 * require assembly and careful instruction ordering.
104 * 1) On multicore, stay awake if directed to do so by another. If so, goto step 4.
105 * 2) If processor requires, atomically reenable interrupts and perform step 3.
106 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
107 * goto step 5.
108 * 4) Enable interrupts.
109 * 5) Exit procedure.
111 static inline void core_sleep(IF_COP_VOID(unsigned int core))
112 __attribute__((always_inline));
114 static void check_tmo_threads(void)
115 __attribute__((noinline));
117 static inline void block_thread_on_l(
118 struct thread_queue *list, struct thread_entry *thread, unsigned state)
119 __attribute__((always_inline));
121 static inline void block_thread_on_l_no_listlock(
122 struct thread_entry **list, struct thread_entry *thread, unsigned state)
123 __attribute__((always_inline));
125 static inline void _block_thread_on_l(
126 struct thread_queue *list, struct thread_entry *thread,
127 unsigned state IF_SWCL(, const bool single))
128 __attribute__((always_inline));
130 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
131 struct thread_queue *list IF_SWCL(, const bool nolock))
132 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
134 IF_SWCL(static inline) void _block_thread(
135 struct thread_queue *list IF_SWCL(, const bool nolock))
136 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
138 static void add_to_list_tmo(struct thread_entry *thread)
139 __attribute__((noinline));
141 static void core_schedule_wakeup(struct thread_entry *thread)
142 __attribute__((noinline));
144 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
145 __attribute__((always_inline));
147 #if NUM_CORES > 1
148 static inline void run_blocking_ops(
149 unsigned int core, struct thread_entry *thread)
150 __attribute__((always_inline));
151 #endif
153 static void thread_stkov(struct thread_entry *thread)
154 __attribute__((noinline));
156 static inline void store_context(void* addr)
157 __attribute__((always_inline));
159 static inline void load_context(const void* addr)
160 __attribute__((always_inline));
162 void switch_thread(struct thread_entry *old)
163 __attribute__((noinline));
166 /****************************************************************************
167 * Processor-specific section
170 #if defined(CPU_ARM)
171 /*---------------------------------------------------------------------------
172 * Start the thread running and terminate it if it returns
173 *---------------------------------------------------------------------------
175 static void start_thread(void) __attribute__((naked,used));
176 static void start_thread(void)
178 /* r0 = context */
179 asm volatile (
180 "ldr sp, [r0, #32] \n" /* Load initial sp */
181 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
182 "mov r1, #0 \n" /* Mark thread as running */
183 "str r1, [r0, #40] \n"
184 #if NUM_CORES > 1
185 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
186 "mov lr, pc \n" /* This could be the first entry into */
187 "bx r0 \n" /* plugin or codec code for this core. */
188 #endif
189 "mov lr, pc \n" /* Call thread function */
190 "bx r4 \n"
191 "mov r0, #0 \n" /* remove_thread(NULL) */
192 "ldr pc, =remove_thread \n"
193 ".ltorg \n" /* Dump constant pool */
194 ); /* No clobber list - new thread doesn't care */
197 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
198 * slot, and thread function pointer in context.start. See load_context for
199 * what happens when thread is initially going to run. */
200 #define THREAD_STARTUP_INIT(core, thread, function) \
201 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
202 (thread)->context.r[1] = (unsigned int)start_thread, \
203 (thread)->context.start = (void *)function; })
205 /*---------------------------------------------------------------------------
206 * Store non-volatile context.
207 *---------------------------------------------------------------------------
209 static inline void store_context(void* addr)
211 asm volatile(
212 "stmia %0, { r4-r11, sp, lr } \n"
213 : : "r" (addr)
217 /*---------------------------------------------------------------------------
218 * Load non-volatile context.
219 *---------------------------------------------------------------------------
221 static inline void load_context(const void* addr)
223 asm volatile(
224 "ldr r0, [%0, #40] \n" /* Load start pointer */
225 "cmp r0, #0 \n" /* Check for NULL */
226 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
227 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
228 : : "r" (addr) : "r0" /* only! */
232 #if defined (CPU_PP)
234 #if NUM_CORES > 1
235 extern int cpu_idlestackbegin[];
236 extern int cpu_idlestackend[];
237 extern int cop_idlestackbegin[];
238 extern int cop_idlestackend[];
239 static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
241 [CPU] = cpu_idlestackbegin,
242 [COP] = cop_idlestackbegin
245 #if CONFIG_CPU == PP5002
246 /* Bytes to emulate the PP502x mailbox bits */
247 struct core_semaphores
249 volatile uint8_t intend_wake; /* 00h */
250 volatile uint8_t stay_awake; /* 01h */
251 volatile uint8_t intend_sleep; /* 02h */
252 volatile uint8_t unused; /* 03h */
255 static struct core_semaphores core_semaphores[NUM_CORES] NOCACHEBSS_ATTR;
256 #endif
258 #endif /* NUM_CORES */
260 #if CONFIG_CORELOCK == SW_CORELOCK
261 /* Software core locks using Peterson's mutual exclusion algorithm */
263 /*---------------------------------------------------------------------------
264 * Initialize the corelock structure.
265 *---------------------------------------------------------------------------
267 void corelock_init(struct corelock *cl)
269 memset(cl, 0, sizeof (*cl));
272 #if 1 /* Assembly locks to minimize overhead */
273 /*---------------------------------------------------------------------------
274 * Wait for the corelock to become free and acquire it when it does.
275 *---------------------------------------------------------------------------
277 void corelock_lock(struct corelock *cl) __attribute__((naked));
278 void corelock_lock(struct corelock *cl)
280 asm volatile (
281 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
282 "ldrb r1, [r1] \n"
283 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
284 "and r2, r1, #1 \n" /* r2 = othercore */
285 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
286 "1: \n"
287 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
288 "cmp r3, #0 \n"
289 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core ? */
290 "cmpne r3, r1, lsr #7 \n"
291 "bxeq lr \n" /* yes? lock acquired */
292 "b 1b \n" /* keep trying */
293 : : "i"(&PROCESSOR_ID)
295 (void)cl;
298 /*---------------------------------------------------------------------------
299 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
300 *---------------------------------------------------------------------------
302 int corelock_try_lock(struct corelock *cl) __attribute__((naked));
303 int corelock_try_lock(struct corelock *cl)
305 asm volatile (
306 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
307 "ldrb r1, [r1] \n"
308 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
309 "and r2, r1, #1 \n" /* r2 = othercore */
310 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
311 "1: \n"
312 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
313 "cmp r3, #0 \n"
314 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core? */
315 "cmpne r3, r1, lsr #7 \n"
316 "moveq r0, #1 \n" /* yes? lock acquired */
317 "bxeq lr \n"
318 "mov r2, #0 \n" /* cl->myl[core] = 0 */
319 "strb r2, [r0, r1, lsr #7] \n"
320 "mov r0, r2 \n"
321 "bx lr \n" /* acquisition failed */
322 : : "i"(&PROCESSOR_ID)
325 return 0;
326 (void)cl;
329 /*---------------------------------------------------------------------------
330 * Release ownership of the corelock
331 *---------------------------------------------------------------------------
333 void corelock_unlock(struct corelock *cl) __attribute__((naked));
334 void corelock_unlock(struct corelock *cl)
336 asm volatile (
337 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
338 "ldrb r1, [r1] \n"
339 "mov r2, #0 \n" /* cl->myl[core] = 0 */
340 "strb r2, [r0, r1, lsr #7] \n"
341 "bx lr \n"
342 : : "i"(&PROCESSOR_ID)
344 (void)cl;
346 #else /* C versions for reference */
347 /*---------------------------------------------------------------------------
348 * Wait for the corelock to become free and aquire it when it does.
349 *---------------------------------------------------------------------------
351 void corelock_lock(struct corelock *cl)
353 const unsigned int core = CURRENT_CORE;
354 const unsigned int othercore = 1 - core;
356 cl->myl[core] = core;
357 cl->turn = othercore;
359 for (;;)
361 if (cl->myl[othercore] == 0 || cl->turn == core)
362 break;
366 /*---------------------------------------------------------------------------
367 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
368 *---------------------------------------------------------------------------
370 int corelock_try_lock(struct corelock *cl)
372 const unsigned int core = CURRENT_CORE;
373 const unsigned int othercore = 1 - core;
375 cl->myl[core] = core;
376 cl->turn = othercore;
378 if (cl->myl[othercore] == 0 || cl->turn == core)
380 return 1;
383 cl->myl[core] = 0;
384 return 0;
387 /*---------------------------------------------------------------------------
388 * Release ownership of the corelock
389 *---------------------------------------------------------------------------
391 void corelock_unlock(struct corelock *cl)
393 cl->myl[CURRENT_CORE] = 0;
395 #endif /* ASM / C selection */
397 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
399 /*---------------------------------------------------------------------------
400 * Put core in a power-saving state if waking list wasn't repopulated and if
401 * no other core requested a wakeup for it to perform a task.
402 *---------------------------------------------------------------------------
404 #if NUM_CORES == 1
405 /* Shared single-core build debugging version */
406 static inline void core_sleep(void)
408 PROC_CTL(CURRENT_CORE) = PROC_SLEEP;
409 nop; nop; nop;
410 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
412 #elif defined (CPU_PP502x)
413 static inline void core_sleep(unsigned int core)
415 #if 1
416 asm volatile (
417 "mov r0, #4 \n" /* r0 = 0x4 << core */
418 "mov r0, r0, lsl %[c] \n"
419 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
420 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
421 "tst r1, r0, lsl #2 \n"
422 "moveq r1, #0x80000000 \n" /* Then sleep */
423 "streq r1, [%[ctl], %[c], lsl #2] \n"
424 "moveq r1, #0 \n" /* Clear control reg */
425 "streq r1, [%[ctl], %[c], lsl #2] \n"
426 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
427 "str r1, [%[mbx], #8] \n"
428 "1: \n" /* Wait for wake procedure to finish */
429 "ldr r1, [%[mbx], #0] \n"
430 "tst r1, r0, lsr #2 \n"
431 "bne 1b \n"
432 "mrs r1, cpsr \n" /* Enable interrupts */
433 "bic r1, r1, #0xc0 \n"
434 "msr cpsr_c, r1 \n"
436 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [c]"r"(core)
437 : "r0", "r1");
438 #else /* C version for reference */
439 /* Signal intent to sleep */
440 MBX_MSG_SET = 0x4 << core;
442 /* Something waking or other processor intends to wake us? */
443 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
445 PROC_CTL(core) = PROC_SLEEP; nop; /* Snooze */
446 PROC_CTL(core) = 0; /* Clear control reg */
449 /* Signal wake - clear wake flag */
450 MBX_MSG_CLR = 0x14 << core;
452 /* Wait for other processor to finish wake procedure */
453 while (MBX_MSG_STAT & (0x1 << core));
455 /* Enable IRQ, FIQ */
456 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
457 #endif /* ASM/C selection */
459 #elif CONFIG_CPU == PP5002
460 /* PP5002 has no mailboxes - emulate using bytes */
461 static inline void core_sleep(unsigned int core)
463 #if 1
464 asm volatile (
465 "mov r0, #1 \n" /* Signal intent to sleep */
466 "strb r0, [%[sem], #2] \n"
467 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
468 "cmp r0, #0 \n"
469 "bne 2f \n"
470 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
471 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
472 * that the correct alternative is executed. Don't change the order
473 * of the next 4 instructions! */
474 "tst pc, #0x0c \n"
475 "mov r0, #0xca \n"
476 "strne r0, [%[ctl], %[c], lsl #2] \n"
477 "streq r0, [%[ctl], %[c], lsl #2] \n"
478 "nop \n" /* nop's needed because of pipeline */
479 "nop \n"
480 "nop \n"
481 "2: \n"
482 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
483 "strb r0, [%[sem], #1] \n"
484 "strb r0, [%[sem], #2] \n"
485 "1: \n" /* Wait for wake procedure to finish */
486 "ldrb r0, [%[sem], #0] \n"
487 "cmp r0, #0 \n"
488 "bne 1b \n"
489 "mrs r0, cpsr \n" /* Enable interrupts */
490 "bic r0, r0, #0xc0 \n"
491 "msr cpsr_c, r0 \n"
493 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
494 [ctl]"r"(&PROC_CTL(CPU))
495 : "r0"
497 #else /* C version for reference */
498 /* Signal intent to sleep */
499 core_semaphores[core].intend_sleep = 1;
501 /* Something waking or other processor intends to wake us? */
502 if (core_semaphores[core].stay_awake == 0)
504 PROC_CTL(core) = PROC_SLEEP; /* Snooze */
505 nop; nop; nop;
508 /* Signal wake - clear wake flag */
509 core_semaphores[core].stay_awake = 0;
510 core_semaphores[core].intend_sleep = 0;
512 /* Wait for other processor to finish wake procedure */
513 while (core_semaphores[core].intend_wake != 0);
515 /* Enable IRQ, FIQ */
516 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
517 #endif /* ASM/C selection */
519 #endif /* CPU type */
521 /*---------------------------------------------------------------------------
522 * Wake another processor core that is sleeping or prevent it from doing so
523 * if it was already destined. FIQ, IRQ should be disabled before calling.
524 *---------------------------------------------------------------------------
526 #if NUM_CORES == 1
527 /* Shared single-core build debugging version */
528 void core_wake(void)
530 /* No wakey - core already wakey */
532 #elif defined (CPU_PP502x)
533 void core_wake(unsigned int othercore)
535 #if 1
536 /* avoid r0 since that contains othercore */
537 asm volatile (
538 "mrs r3, cpsr \n" /* Disable IRQ */
539 "orr r1, r3, #0x80 \n"
540 "msr cpsr_c, r1 \n"
541 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
542 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
543 "str r2, [%[mbx], #4] \n"
544 "1: \n" /* If it intends to sleep, let it first */
545 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
546 "eor r1, r1, #0xc \n"
547 "tst r1, r2, lsr #2 \n"
548 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
549 "tsteq r1, #0x80000000 \n"
550 "beq 1b \n" /* Wait for sleep or wake */
551 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
552 "movne r1, #0x0 \n"
553 "strne r1, [%[ctl], %[oc], lsl #2] \n"
554 "mov r1, r2, lsr #4 \n"
555 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
556 "msr cpsr_c, r3 \n" /* Restore int status */
558 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
559 [oc]"r"(othercore)
560 : "r1", "r2", "r3");
561 #else /* C version for reference */
562 /* Disable interrupts - avoid reentrancy from the tick */
563 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
565 /* Signal intent to wake other processor - set stay awake */
566 MBX_MSG_SET = 0x11 << othercore;
568 /* If it intends to sleep, wait until it does or aborts */
569 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
570 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
572 /* If sleeping, wake it up */
573 if (PROC_CTL(othercore) & PROC_SLEEP)
574 PROC_CTL(othercore) = 0;
576 /* Done with wake procedure */
577 MBX_MSG_CLR = 0x1 << othercore;
578 set_irq_level(oldlevel);
579 #endif /* ASM/C selection */
581 #elif CONFIG_CPU == PP5002
582 /* PP5002 has no mailboxes - emulate using bytes */
583 void core_wake(unsigned int othercore)
585 #if 1
586 /* avoid r0 since that contains othercore */
587 asm volatile (
588 "mrs r3, cpsr \n" /* Disable IRQ */
589 "orr r1, r3, #0x80 \n"
590 "msr cpsr_c, r1 \n"
591 "mov r1, #1 \n" /* Signal intent to wake other core */
592 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
593 "strh r1, [%[sem], #0] \n"
594 "mov r2, #0x8000 \n"
595 "1: \n" /* If it intends to sleep, let it first */
596 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
597 "cmp r1, #1 \n"
598 "ldr r1, [%[st]] \n" /* && not sleeping ? */
599 "tsteq r1, r2, lsr %[oc] \n"
600 "beq 1b \n" /* Wait for sleep or wake */
601 "tst r1, r2, lsr %[oc] \n"
602 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
603 "movne r1, #0xce \n"
604 "strne r1, [r2, %[oc], lsl #2] \n"
605 "mov r1, #0 \n" /* Done with wake procedure */
606 "strb r1, [%[sem], #0] \n"
607 "msr cpsr_c, r3 \n" /* Restore int status */
609 : [sem]"r"(&core_semaphores[othercore]),
610 [st]"r"(&PROC_STAT),
611 [oc]"r"(othercore)
612 : "r1", "r2", "r3"
614 #else /* C version for reference */
615 /* Disable interrupts - avoid reentrancy from the tick */
616 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
618 /* Signal intent to wake other processor - set stay awake */
619 core_semaphores[othercore].intend_wake = 1;
620 core_semaphores[othercore].stay_awake = 1;
622 /* If it intends to sleep, wait until it does or aborts */
623 while (core_semaphores[othercore].intend_sleep != 0 &&
624 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
626 /* If sleeping, wake it up */
627 if (PROC_STAT & PROC_SLEEPING(othercore))
628 PROC_CTL(othercore) = PROC_WAKE;
630 /* Done with wake procedure */
631 core_semaphores[othercore].intend_wake = 0;
632 set_irq_level(oldlevel);
633 #endif /* ASM/C selection */
635 #endif /* CPU type */
637 #if NUM_CORES > 1
638 /*---------------------------------------------------------------------------
639 * Switches to a stack that always resides in the Rockbox core.
641 * Needed when a thread suicides on a core other than the main CPU since the
642 * stack used when idling is the stack of the last thread to run. This stack
643 * may not reside in the core in which case the core will continue to use a
644 * stack from an unloaded module until another thread runs on it.
645 *---------------------------------------------------------------------------
647 static inline void switch_to_idle_stack(const unsigned int core)
649 asm volatile (
650 "str sp, [%0] \n" /* save original stack pointer on idle stack */
651 "mov sp, %0 \n" /* switch stacks */
652 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
653 (void)core;
656 /*---------------------------------------------------------------------------
657 * Perform core switch steps that need to take place inside switch_thread.
659 * These steps must take place while before changing the processor and after
660 * having entered switch_thread since switch_thread may not do a normal return
661 * because the stack being used for anything the compiler saved will not belong
662 * to the thread's destination core and it may have been recycled for other
663 * purposes by the time a normal context load has taken place. switch_thread
664 * will also clobber anything stashed in the thread's context or stored in the
665 * nonvolatile registers if it is saved there before the call since the
666 * compiler's order of operations cannot be known for certain.
668 static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
670 /* Flush our data to ram */
671 flush_icache();
672 /* Stash thread in r4 slot */
673 thread->context.r[0] = (unsigned int)thread;
674 /* Stash restart address in r5 slot */
675 thread->context.r[1] = (unsigned int)thread->context.start;
676 /* Save sp in context.sp while still running on old core */
677 thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1];
680 /*---------------------------------------------------------------------------
681 * Machine-specific helper function for switching the processor a thread is
682 * running on. Basically, the thread suicides on the departing core and is
683 * reborn on the destination. Were it not for gcc's ill-behavior regarding
684 * naked functions written in C where it actually clobbers non-volatile
685 * registers before the intended prologue code, this would all be much
686 * simpler. Generic setup is done in switch_core itself.
689 /*---------------------------------------------------------------------------
690 * This actually performs the core switch.
692 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
693 __attribute__((naked));
694 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
696 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
697 * Stack access also isn't permitted until restoring the original stack and
698 * context. */
699 asm volatile (
700 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
701 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
702 "ldr r2, [r2, r0, lsl #2] \n"
703 "add r2, r2, %0*4 \n"
704 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
705 "mov sp, r2 \n" /* switch stacks */
706 "adr r2, 1f \n" /* r2 = new core restart address */
707 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
708 "mov r0, r1 \n" /* switch_thread(thread) */
709 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
710 "1: \n"
711 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
712 "mov r1, #0 \n" /* Clear start address */
713 "str r1, [r0, #40] \n"
714 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
715 "mov lr, pc \n"
716 "bx r0 \n"
717 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
718 ".ltorg \n" /* Dump constant pool */
719 : : "i"(IDLE_STACK_WORDS)
721 (void)core; (void)thread;
723 #endif /* NUM_CORES */
725 #elif CONFIG_CPU == S3C2440
727 /*---------------------------------------------------------------------------
728 * Put core in a power-saving state if waking list wasn't repopulated.
729 *---------------------------------------------------------------------------
731 static inline void core_sleep(void)
733 /* FIQ also changes the CLKCON register so FIQ must be disabled
734 when changing it here */
735 asm volatile (
736 "mrs r0, cpsr \n" /* Prepare IRQ, FIQ enable */
737 "bic r0, r0, #0xc0 \n"
738 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
739 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
740 "orr r2, r2, #4 \n"
741 "str r2, [r1, #0xc] \n"
742 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
743 "mov r2, #0 \n" /* wait for IDLE */
744 "1: \n"
745 "add r2, r2, #1 \n"
746 "cmp r2, #10 \n"
747 "bne 1b \n"
748 "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */
749 "msr cpsr_c, r2 \n"
750 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
751 "bic r2, r2, #4 \n"
752 "str r2, [r1, #0xc] \n"
753 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
754 : : : "r0", "r1", "r2");
756 #elif defined(CPU_TCC77X)
757 static inline void core_sleep(void)
759 #warning TODO: Implement core_sleep
761 #elif defined(CPU_TCC780X)
762 static inline void core_sleep(void)
764 /* Single core only for now. Use the generic ARMv5 wait for IRQ */
765 asm volatile (
766 "mov r0, #0 \n"
767 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
768 "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */
769 "bic r0, r0, #0xc0 \n"
770 "msr cpsr_c, r0 \n"
771 : : : "r0"
774 #elif CONFIG_CPU == IMX31L
775 static inline void core_sleep(void)
777 asm volatile (
778 "mov r0, #0 \n"
779 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
780 "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */
781 "bic r0, r0, #0xc0 \n"
782 "msr cpsr_c, r0 \n"
783 : : : "r0"
786 #else
787 static inline void core_sleep(void)
789 #warning core_sleep not implemented, battery life will be decreased
791 #endif /* CONFIG_CPU == */
793 #elif defined(CPU_COLDFIRE)
794 /*---------------------------------------------------------------------------
795 * Start the thread running and terminate it if it returns
796 *---------------------------------------------------------------------------
798 void start_thread(void); /* Provide C access to ASM label */
799 static void __start_thread(void) __attribute__((used));
800 static void __start_thread(void)
802 /* a0=macsr, a1=context */
803 asm volatile (
804 "start_thread: \n" /* Start here - no naked attribute */
805 "move.l %a0, %macsr \n" /* Set initial mac status reg */
806 "lea.l 48(%a1), %a1 \n"
807 "move.l (%a1)+, %sp \n" /* Set initial stack */
808 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
809 "clr.l (%a1) \n" /* Mark thread running */
810 "jsr (%a2) \n" /* Call thread function */
811 "clr.l -(%sp) \n" /* remove_thread(NULL) */
812 "jsr remove_thread \n"
816 /* Set EMAC unit to fractional mode with saturation for each new thread,
817 * since that's what'll be the most useful for most things which the dsp
818 * will do. Codecs should still initialize their preferred modes
819 * explicitly. Context pointer is placed in d2 slot and start_thread
820 * pointer in d3 slot. thread function pointer is placed in context.start.
821 * See load_context for what happens when thread is initially going to
822 * run.
824 #define THREAD_STARTUP_INIT(core, thread, function) \
825 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
826 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
827 (thread)->context.d[1] = (unsigned int)start_thread, \
828 (thread)->context.start = (void *)(function); })
830 /*---------------------------------------------------------------------------
831 * Store non-volatile context.
832 *---------------------------------------------------------------------------
834 static inline void store_context(void* addr)
836 asm volatile (
837 "move.l %%macsr,%%d0 \n"
838 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
839 : : "a" (addr) : "d0" /* only! */
843 /*---------------------------------------------------------------------------
844 * Load non-volatile context.
845 *---------------------------------------------------------------------------
847 static inline void load_context(const void* addr)
849 asm volatile (
850 "move.l 52(%0), %%d0 \n" /* Get start address */
851 "beq.b 1f \n" /* NULL -> already running */
852 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
853 "jmp (%%a2) \n" /* Start the thread */
854 "1: \n"
855 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
856 "move.l %%d0, %%macsr \n"
857 : : "a" (addr) : "d0" /* only! */
861 /*---------------------------------------------------------------------------
862 * Put core in a power-saving state if waking list wasn't repopulated.
863 *---------------------------------------------------------------------------
865 static inline void core_sleep(void)
867 /* Supervisor mode, interrupts enabled upon wakeup */
868 asm volatile ("stop #0x2000");
871 #elif CONFIG_CPU == SH7034
872 /*---------------------------------------------------------------------------
873 * Start the thread running and terminate it if it returns
874 *---------------------------------------------------------------------------
876 void start_thread(void); /* Provide C access to ASM label */
877 static void __start_thread(void) __attribute__((used));
878 static void __start_thread(void)
880 /* r8 = context */
881 asm volatile (
882 "_start_thread: \n" /* Start here - no naked attribute */
883 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
884 "mov.l @(28, r8), r15 \n" /* Set initial sp */
885 "mov #0, r1 \n" /* Start the thread */
886 "jsr @r0 \n"
887 "mov.l r1, @(36, r8) \n" /* Clear start address */
888 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
889 "jmp @r0 \n"
890 "mov #0, r4 \n"
891 "1: \n"
892 ".long _remove_thread \n"
896 /* Place context pointer in r8 slot, function pointer in r9 slot, and
897 * start_thread pointer in context_start */
898 #define THREAD_STARTUP_INIT(core, thread, function) \
899 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
900 (thread)->context.r[1] = (unsigned int)(function), \
901 (thread)->context.start = (void*)start_thread; })
903 /*---------------------------------------------------------------------------
904 * Store non-volatile context.
905 *---------------------------------------------------------------------------
907 static inline void store_context(void* addr)
909 asm volatile (
910 "add #36, %0 \n" /* Start at last reg. By the time routine */
911 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
912 "mov.l r15,@-%0 \n"
913 "mov.l r14,@-%0 \n"
914 "mov.l r13,@-%0 \n"
915 "mov.l r12,@-%0 \n"
916 "mov.l r11,@-%0 \n"
917 "mov.l r10,@-%0 \n"
918 "mov.l r9, @-%0 \n"
919 "mov.l r8, @-%0 \n"
920 : : "r" (addr)
924 /*---------------------------------------------------------------------------
925 * Load non-volatile context.
926 *---------------------------------------------------------------------------
928 static inline void load_context(const void* addr)
930 asm volatile (
931 "mov.l @(36, %0), r0 \n" /* Get start address */
932 "tst r0, r0 \n"
933 "bt .running \n" /* NULL -> already running */
934 "jmp @r0 \n" /* r8 = context */
935 ".running: \n"
936 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
937 "mov.l @%0+, r9 \n"
938 "mov.l @%0+, r10 \n"
939 "mov.l @%0+, r11 \n"
940 "mov.l @%0+, r12 \n"
941 "mov.l @%0+, r13 \n"
942 "mov.l @%0+, r14 \n"
943 "mov.l @%0+, r15 \n"
944 "lds.l @%0+, pr \n"
945 : : "r" (addr) : "r0" /* only! */
949 /*---------------------------------------------------------------------------
950 * Put core in a power-saving state if waking list wasn't repopulated.
951 *---------------------------------------------------------------------------
953 static inline void core_sleep(void)
955 asm volatile (
956 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
957 "mov #0, r1 \n" /* Enable interrupts */
958 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
959 "sleep \n" /* Execute standby */
960 : : "z"(&SBYCR-GBR) : "r1");
963 #endif /* CONFIG_CPU == */
966 * End Processor-specific section
967 ***************************************************************************/
969 #if THREAD_EXTRA_CHECKS
970 static void thread_panicf(const char *msg, struct thread_entry *thread)
972 #if NUM_CORES > 1
973 const unsigned int core = thread->core;
974 #endif
975 static char name[32];
976 thread_get_name(name, 32, thread);
977 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
979 static void thread_stkov(struct thread_entry *thread)
981 thread_panicf("Stkov", thread);
983 #define THREAD_PANICF(msg, thread) \
984 thread_panicf(msg, thread)
985 #define THREAD_ASSERT(exp, msg, thread) \
986 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
987 #else
988 static void thread_stkov(struct thread_entry *thread)
990 #if NUM_CORES > 1
991 const unsigned int core = thread->core;
992 #endif
993 static char name[32];
994 thread_get_name(name, 32, thread);
995 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
997 #define THREAD_PANICF(msg, thread)
998 #define THREAD_ASSERT(exp, msg, thread)
999 #endif /* THREAD_EXTRA_CHECKS */
1001 /*---------------------------------------------------------------------------
1002 * Lock a list pointer and returns its value
1003 *---------------------------------------------------------------------------
1005 #if CONFIG_CORELOCK == SW_CORELOCK
1006 /* Separate locking function versions */
1008 /* Thread locking */
1009 #define GET_THREAD_STATE(thread) \
1010 ({ corelock_lock(&(thread)->cl); (thread)->state; })
1011 #define TRY_GET_THREAD_STATE(thread) \
1012 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
1013 #define UNLOCK_THREAD(thread, state) \
1014 ({ corelock_unlock(&(thread)->cl); })
1015 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1016 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
1018 /* List locking */
1019 #define LOCK_LIST(tqp) \
1020 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
1021 #define UNLOCK_LIST(tqp, mod) \
1022 ({ corelock_unlock(&(tqp)->cl); })
1023 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1024 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
1026 /* Select the queue pointer directly */
1027 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1028 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1029 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1030 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1032 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1033 /* Native swap/exchange versions */
1035 /* Thread locking */
1036 #define GET_THREAD_STATE(thread) \
1037 ({ unsigned _s; \
1038 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
1039 _s; })
1040 #define TRY_GET_THREAD_STATE(thread) \
1041 ({ xchg8(&(thread)->state, STATE_BUSY); })
1042 #define UNLOCK_THREAD(thread, _state) \
1043 ({ (thread)->state = (_state); })
1044 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1045 ({ (thread)->state = (_state); })
1047 /* List locking */
1048 #define LOCK_LIST(tqp) \
1049 ({ struct thread_entry *_l; \
1050 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
1051 _l; })
1052 #define UNLOCK_LIST(tqp, mod) \
1053 ({ (tqp)->queue = (mod); })
1054 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1055 ({ (tqp)->queue = (mod); })
1057 /* Select the local queue pointer copy returned from LOCK_LIST */
1058 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1059 ({ add_to_list_l(&(tc), (thread)); })
1060 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1061 ({ remove_from_list_l(&(tc), (thread)); })
1063 #else
1064 /* Single-core/non-locked versions */
1066 /* Threads */
1067 #define GET_THREAD_STATE(thread) \
1068 ({ (thread)->state; })
1069 #define UNLOCK_THREAD(thread, _state)
1070 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1071 ({ (thread)->state = (_state); })
1073 /* Lists */
1074 #define LOCK_LIST(tqp) \
1075 ({ (tqp)->queue; })
1076 #define UNLOCK_LIST(tqp, mod)
1077 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1078 ({ (tqp)->queue = (mod); })
1080 /* Select the queue pointer directly */
1081 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1082 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1083 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1084 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1086 #endif /* locking selection */
1088 #if THREAD_EXTRA_CHECKS
1089 /*---------------------------------------------------------------------------
1090 * Lock the thread slot to obtain the state and then unlock it. Waits for
1091 * it not to be busy. Used for debugging.
1092 *---------------------------------------------------------------------------
1094 static unsigned peek_thread_state(struct thread_entry *thread)
1096 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1097 unsigned state = GET_THREAD_STATE(thread);
1098 UNLOCK_THREAD(thread, state);
1099 set_irq_level(oldlevel);
1100 return state;
1102 #endif /* THREAD_EXTRA_CHECKS */
1104 /*---------------------------------------------------------------------------
1105 * Adds a thread to a list of threads using "intert last". Uses the "l"
1106 * links.
1107 *---------------------------------------------------------------------------
1109 static void add_to_list_l(struct thread_entry **list,
1110 struct thread_entry *thread)
1112 struct thread_entry *l = *list;
1114 if (l == NULL)
1116 /* Insert into unoccupied list */
1117 thread->l.next = thread;
1118 thread->l.prev = thread;
1119 *list = thread;
1120 return;
1123 /* Insert last */
1124 thread->l.next = l;
1125 thread->l.prev = l->l.prev;
1126 thread->l.prev->l.next = thread;
1127 l->l.prev = thread;
1129 /* Insert next
1130 thread->l.next = l->l.next;
1131 thread->l.prev = l;
1132 thread->l.next->l.prev = thread;
1133 l->l.next = thread;
1137 /*---------------------------------------------------------------------------
1138 * Locks a list, adds the thread entry and unlocks the list on multicore.
1139 * Defined as add_to_list_l on single-core.
1140 *---------------------------------------------------------------------------
1142 #if NUM_CORES > 1
1143 static void add_to_list_l_locked(struct thread_queue *tq,
1144 struct thread_entry *thread)
1146 struct thread_entry *t = LOCK_LIST(tq);
1147 ADD_TO_LIST_L_SELECT(t, tq, thread);
1148 UNLOCK_LIST(tq, t);
1149 (void)t;
1151 #else
1152 #define add_to_list_l_locked(tq, thread) \
1153 add_to_list_l(&(tq)->queue, (thread))
1154 #endif
1156 /*---------------------------------------------------------------------------
1157 * Removes a thread from a list of threads. Uses the "l" links.
1158 *---------------------------------------------------------------------------
1160 static void remove_from_list_l(struct thread_entry **list,
1161 struct thread_entry *thread)
1163 struct thread_entry *prev, *next;
1165 next = thread->l.next;
1167 if (thread == next)
1169 /* The only item */
1170 *list = NULL;
1171 return;
1174 if (thread == *list)
1176 /* List becomes next item */
1177 *list = next;
1180 prev = thread->l.prev;
1182 /* Fix links to jump over the removed entry. */
1183 prev->l.next = next;
1184 next->l.prev = prev;
1187 /*---------------------------------------------------------------------------
1188 * Locks a list, removes the thread entry and unlocks the list on multicore.
1189 * Defined as remove_from_list_l on single-core.
1190 *---------------------------------------------------------------------------
1192 #if NUM_CORES > 1
1193 static void remove_from_list_l_locked(struct thread_queue *tq,
1194 struct thread_entry *thread)
1196 struct thread_entry *t = LOCK_LIST(tq);
1197 REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
1198 UNLOCK_LIST(tq, t);
1199 (void)t;
1201 #else
1202 #define remove_from_list_l_locked(tq, thread) \
1203 remove_from_list_l(&(tq)->queue, (thread))
1204 #endif
1206 /*---------------------------------------------------------------------------
1207 * Add a thread from the core's timout list by linking the pointers in its
1208 * tmo structure.
1209 *---------------------------------------------------------------------------
1211 static void add_to_list_tmo(struct thread_entry *thread)
1213 /* Insert first */
1214 struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout;
1216 thread->tmo.prev = thread;
1217 thread->tmo.next = t;
1219 if (t != NULL)
1221 /* Fix second item's prev pointer to point to this thread */
1222 t->tmo.prev = thread;
1225 cores[IF_COP_CORE(thread->core)].timeout = thread;
1228 /*---------------------------------------------------------------------------
1229 * Remove a thread from the core's timout list by unlinking the pointers in
1230 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1231 * is cancelled.
1232 *---------------------------------------------------------------------------
1234 static void remove_from_list_tmo(struct thread_entry *thread)
1236 struct thread_entry *next = thread->tmo.next;
1237 struct thread_entry *prev;
1239 if (thread == cores[IF_COP_CORE(thread->core)].timeout)
1241 /* Next item becomes list head */
1242 cores[IF_COP_CORE(thread->core)].timeout = next;
1244 if (next != NULL)
1246 /* Fix new list head's prev to point to itself. */
1247 next->tmo.prev = next;
1250 thread->tmo.prev = NULL;
1251 return;
1254 prev = thread->tmo.prev;
1256 if (next != NULL)
1258 next->tmo.prev = prev;
1261 prev->tmo.next = next;
1262 thread->tmo.prev = NULL;
1265 /*---------------------------------------------------------------------------
1266 * Schedules a thread wakeup on the specified core. Threads will be made
1267 * ready to run when the next task switch occurs. Note that this does not
1268 * introduce an on-core delay since the soonest the next thread may run is
1269 * no sooner than that. Other cores and on-core interrupts may only ever
1270 * add to the list.
1271 *---------------------------------------------------------------------------
1273 static void core_schedule_wakeup(struct thread_entry *thread)
1275 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1276 const unsigned int core = IF_COP_CORE(thread->core);
1277 add_to_list_l_locked(&cores[core].waking, thread);
1278 #if NUM_CORES > 1
1279 if (core != CURRENT_CORE)
1281 core_wake(core);
1283 #endif
1284 set_irq_level(oldlevel);
1287 /*---------------------------------------------------------------------------
1288 * If the waking list was populated, move all threads on it onto the running
1289 * list so they may be run ASAP.
1290 *---------------------------------------------------------------------------
1292 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1294 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
1295 struct thread_entry *r = cores[IF_COP_CORE(core)].running;
1297 /* Tranfer all threads on waking list to running list in one
1298 swoop */
1299 if (r != NULL)
1301 /* Place waking threads at the end of the running list. */
1302 struct thread_entry *tmp;
1303 w->l.prev->l.next = r;
1304 r->l.prev->l.next = w;
1305 tmp = r->l.prev;
1306 r->l.prev = w->l.prev;
1307 w->l.prev = tmp;
1309 else
1311 /* Just transfer the list as-is */
1312 cores[IF_COP_CORE(core)].running = w;
1314 /* Just leave any timeout threads on the timeout list. If a timeout check
1315 * is due, they will be removed there. If they do a timeout again before
1316 * being removed, they will just stay on the list with a new expiration
1317 * tick. */
1319 /* Waking list is clear - NULL and unlock it */
1320 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
1323 /*---------------------------------------------------------------------------
1324 * Check the core's timeout list when at least one thread is due to wake.
1325 * Filtering for the condition is done before making the call. Resets the
1326 * tick when the next check will occur.
1327 *---------------------------------------------------------------------------
1329 static void check_tmo_threads(void)
1331 const unsigned int core = CURRENT_CORE;
1332 const long tick = current_tick; /* snapshot the current tick */
1333 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1334 struct thread_entry *next = cores[core].timeout;
1336 /* If there are no processes waiting for a timeout, just keep the check
1337 tick from falling into the past. */
1338 if (next != NULL)
1340 /* Check sleeping threads. */
1343 /* Must make sure noone else is examining the state, wait until
1344 slot is no longer busy */
1345 struct thread_entry *curr = next;
1346 next = curr->tmo.next;
1348 unsigned state = GET_THREAD_STATE(curr);
1350 if (state < TIMEOUT_STATE_FIRST)
1352 /* Cleanup threads no longer on a timeout but still on the
1353 * list. */
1354 remove_from_list_tmo(curr);
1355 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1357 else if (TIME_BEFORE(tick, curr->tmo_tick))
1359 /* Timeout still pending - this will be the usual case */
1360 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1362 /* Earliest timeout found so far - move the next check up
1363 to its time */
1364 next_tmo_check = curr->tmo_tick;
1366 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1368 else
1370 /* Sleep timeout has been reached so bring the thread back to
1371 * life again. */
1372 if (state == STATE_BLOCKED_W_TMO)
1374 remove_from_list_l_locked(curr->bqp, curr);
1377 remove_from_list_tmo(curr);
1378 add_to_list_l(&cores[core].running, curr);
1379 UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING);
1382 /* Break the loop once we have walked through the list of all
1383 * sleeping processes or have removed them all. */
1385 while (next != NULL);
1388 cores[core].next_tmo_check = next_tmo_check;
1391 /*---------------------------------------------------------------------------
1392 * Performs operations that must be done before blocking a thread but after
1393 * the state is saved - follows reverse of locking order. blk_ops.flags is
1394 * assumed to be nonzero.
1395 *---------------------------------------------------------------------------
1397 #if NUM_CORES > 1
1398 static inline void run_blocking_ops(
1399 unsigned int core, struct thread_entry *thread)
1401 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
1402 const unsigned flags = ops->flags;
1404 if (flags == 0)
1405 return;
1407 if (flags & TBOP_SWITCH_CORE)
1409 core_switch_blk_op(core, thread);
1412 #if CONFIG_CORELOCK == SW_CORELOCK
1413 if (flags & TBOP_UNLOCK_LIST)
1415 UNLOCK_LIST(ops->list_p, NULL);
1418 if (flags & TBOP_UNLOCK_CORELOCK)
1420 corelock_unlock(ops->cl_p);
1423 if (flags & TBOP_UNLOCK_THREAD)
1425 UNLOCK_THREAD(ops->thread, 0);
1427 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1428 /* Write updated variable value into memory location */
1429 switch (flags & TBOP_VAR_TYPE_MASK)
1431 case TBOP_UNLOCK_LIST:
1432 UNLOCK_LIST(ops->list_p, ops->list_v);
1433 break;
1434 case TBOP_SET_VARi:
1435 *ops->var_ip = ops->var_iv;
1436 break;
1437 case TBOP_SET_VARu8:
1438 *ops->var_u8p = ops->var_u8v;
1439 break;
1441 #endif /* CONFIG_CORELOCK == */
1443 /* Unlock thread's slot */
1444 if (flags & TBOP_UNLOCK_CURRENT)
1446 UNLOCK_THREAD(thread, ops->state);
1449 ops->flags = 0;
1451 #endif /* NUM_CORES > 1 */
1454 /*---------------------------------------------------------------------------
1455 * Runs any operations that may cause threads to be ready to run and then
1456 * sleeps the processor core until the next interrupt if none are.
1457 *---------------------------------------------------------------------------
1459 static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1461 for (;;)
1463 set_irq_level(HIGHEST_IRQ_LEVEL);
1464 /* We want to do these ASAP as it may change the decision to sleep
1465 * the core or a core has woken because an interrupt occurred
1466 * and posted a message to a queue. */
1467 if (cores[IF_COP_CORE(core)].waking.queue != NULL)
1469 core_perform_wakeup(IF_COP(core));
1472 /* If there are threads on a timeout and the earliest wakeup is due,
1473 * check the list and wake any threads that need to start running
1474 * again. */
1475 if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
1477 check_tmo_threads();
1480 /* If there is a ready to run task, return its ID and keep core
1481 * awake. */
1482 if (cores[IF_COP_CORE(core)].running == NULL)
1484 /* Enter sleep mode to reduce power usage - woken up on interrupt
1485 * or wakeup request from another core - expected to enable all
1486 * interrupts. */
1487 core_sleep(IF_COP(core));
1488 continue;
1491 set_irq_level(0);
1492 return cores[IF_COP_CORE(core)].running;
1496 #ifdef RB_PROFILE
1497 void profile_thread(void)
1499 profstart(cores[CURRENT_CORE].running - threads);
1501 #endif
1503 /*---------------------------------------------------------------------------
1504 * Prepares a thread to block on an object's list and/or for a specified
1505 * duration - expects object and slot to be appropriately locked if needed.
1506 *---------------------------------------------------------------------------
1508 static inline void _block_thread_on_l(struct thread_queue *list,
1509 struct thread_entry *thread,
1510 unsigned state
1511 IF_SWCL(, const bool nolock))
1513 /* If inlined, unreachable branches will be pruned with no size penalty
1514 because constant params are used for state and nolock. */
1515 const unsigned int core = IF_COP_CORE(thread->core);
1517 /* Remove the thread from the list of running threads. */
1518 remove_from_list_l(&cores[core].running, thread);
1520 /* Add a timeout to the block if not infinite */
1521 switch (state)
1523 case STATE_BLOCKED:
1524 /* Put the thread into a new list of inactive threads. */
1525 #if CONFIG_CORELOCK == SW_CORELOCK
1526 if (nolock)
1528 thread->bqp = NULL; /* Indicate nolock list */
1529 thread->bqnlp = (struct thread_entry **)list;
1530 add_to_list_l((struct thread_entry **)list, thread);
1532 else
1533 #endif
1535 thread->bqp = list;
1536 add_to_list_l_locked(list, thread);
1538 break;
1539 case STATE_BLOCKED_W_TMO:
1540 /* Put the thread into a new list of inactive threads. */
1541 #if CONFIG_CORELOCK == SW_CORELOCK
1542 if (nolock)
1544 thread->bqp = NULL; /* Indicate nolock list */
1545 thread->bqnlp = (struct thread_entry **)list;
1546 add_to_list_l((struct thread_entry **)list, thread);
1548 else
1549 #endif
1551 thread->bqp = list;
1552 add_to_list_l_locked(list, thread);
1554 /* Fall-through */
1555 case STATE_SLEEPING:
1556 /* If this thread times out sooner than any other thread, update
1557 next_tmo_check to its timeout */
1558 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1560 cores[core].next_tmo_check = thread->tmo_tick;
1563 if (thread->tmo.prev == NULL)
1565 add_to_list_tmo(thread);
1567 /* else thread was never removed from list - just keep it there */
1568 break;
1571 #ifdef HAVE_PRIORITY_SCHEDULING
1572 /* Reset priorities */
1573 if (thread->priority == cores[core].highest_priority)
1574 cores[core].highest_priority = LOWEST_PRIORITY;
1575 #endif
1577 #if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
1578 /* Safe to set state now */
1579 thread->state = state;
1580 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1581 cores[core].blk_ops.state = state;
1582 #endif
1584 #if NUM_CORES > 1
1585 /* Delay slot unlock until task switch */
1586 cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
1587 #endif
1590 static inline void block_thread_on_l(
1591 struct thread_queue *list, struct thread_entry *thread, unsigned state)
1593 _block_thread_on_l(list, thread, state IF_SWCL(, false));
1596 static inline void block_thread_on_l_no_listlock(
1597 struct thread_entry **list, struct thread_entry *thread, unsigned state)
1599 _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
1602 /*---------------------------------------------------------------------------
1603 * Switch thread in round robin fashion for any given priority. Any thread
1604 * that removed itself from the running list first must specify itself in
1605 * the paramter.
1607 * INTERNAL: Intended for use by kernel and not for programs.
1608 *---------------------------------------------------------------------------
1610 void switch_thread(struct thread_entry *old)
1612 const unsigned int core = CURRENT_CORE;
1613 struct thread_entry *thread = cores[core].running;
1614 struct thread_entry *block = old;
1616 if (block == NULL)
1617 old = thread;
1619 #ifdef RB_PROFILE
1620 profile_thread_stopped(old - threads);
1621 #endif
1623 /* Begin task switching by saving our current context so that we can
1624 * restore the state of the current thread later to the point prior
1625 * to this call. */
1626 store_context(&old->context);
1628 /* Check if the current thread stack is overflown */
1629 if(((unsigned int *)old->stack)[0] != DEADBEEF)
1630 thread_stkov(old);
1632 #if NUM_CORES > 1
1633 /* Run any blocking operations requested before switching/sleeping */
1634 run_blocking_ops(core, old);
1635 #endif
1637 /* Go through the list of sleeping task to check if we need to wake up
1638 * any of them due to timeout. Also puts core into sleep state until
1639 * there is at least one running process again. */
1640 thread = sleep_core(IF_COP(core));
1642 #ifdef HAVE_PRIORITY_SCHEDULING
1643 /* Select the new task based on priorities and the last time a process
1644 * got CPU time. */
1645 if (block == NULL)
1646 thread = thread->l.next;
1648 for (;;)
1650 int priority = thread->priority;
1652 if (priority < cores[core].highest_priority)
1653 cores[core].highest_priority = priority;
1655 if (priority == cores[core].highest_priority ||
1656 thread->priority_x < cores[core].highest_priority ||
1657 (current_tick - thread->last_run > priority * 8))
1659 cores[core].running = thread;
1660 break;
1663 thread = thread->l.next;
1666 /* Reset the value of thread's last running time to the current time. */
1667 thread->last_run = current_tick;
1668 #else
1669 if (block == NULL)
1671 thread = thread->l.next;
1672 cores[core].running = thread;
1674 #endif /* HAVE_PRIORITY_SCHEDULING */
1676 /* And finally give control to the next thread. */
1677 load_context(&thread->context);
1679 #ifdef RB_PROFILE
1680 profile_thread_started(thread - threads);
1681 #endif
1684 /*---------------------------------------------------------------------------
1685 * Change the boost state of a thread boosting or unboosting the CPU
1686 * as required. Require thread slot to be locked first.
1687 *---------------------------------------------------------------------------
1689 static inline void boost_thread(struct thread_entry *thread, bool boost)
1691 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1692 if ((thread->boosted != 0) != boost)
1694 thread->boosted = boost;
1695 cpu_boost(boost);
1697 #endif
1698 (void)thread; (void)boost;
1701 /*---------------------------------------------------------------------------
1702 * Sleeps a thread for a specified number of ticks and unboost the thread if
1703 * if it is boosted. If ticks is zero, it does not delay but instead switches
1704 * tasks.
1706 * INTERNAL: Intended for use by kernel and not for programs.
1707 *---------------------------------------------------------------------------
1709 void sleep_thread(int ticks)
1711 /* Get the entry for the current running thread. */
1712 struct thread_entry *current = cores[CURRENT_CORE].running;
1714 #if NUM_CORES > 1
1715 /* Lock thread slot */
1716 GET_THREAD_STATE(current);
1717 #endif
1719 /* Set our timeout, change lists, and finally switch threads.
1720 * Unlock during switch on mulicore. */
1721 current->tmo_tick = current_tick + ticks + 1;
1722 block_thread_on_l(NULL, current, STATE_SLEEPING);
1723 switch_thread(current);
1725 /* Our status should be STATE_RUNNING */
1726 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1727 "S:R->!*R", current);
1730 /*---------------------------------------------------------------------------
1731 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1732 * Caller with interrupt-accessible lists should disable interrupts first
1733 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1735 * INTERNAL: Intended for use by kernel objects and not for programs.
1736 *---------------------------------------------------------------------------
1738 IF_SWCL(static inline) void _block_thread(struct thread_queue *list
1739 IF_SWCL(, const bool nolock))
1741 /* Get the entry for the current running thread. */
1742 struct thread_entry *current = cores[CURRENT_CORE].running;
1744 /* Set the state to blocked and ask the scheduler to switch tasks,
1745 * this takes us off of the run queue until we are explicitly woken */
1747 #if NUM_CORES > 1
1748 /* Lock thread slot */
1749 GET_THREAD_STATE(current);
1750 #endif
1752 #if CONFIG_CORELOCK == SW_CORELOCK
1753 /* One branch optimized away during inlining */
1754 if (nolock)
1756 block_thread_on_l_no_listlock((struct thread_entry **)list,
1757 current, STATE_BLOCKED);
1759 else
1760 #endif
1762 block_thread_on_l(list, current, STATE_BLOCKED);
1765 switch_thread(current);
1767 /* Our status should be STATE_RUNNING */
1768 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1769 "B:R->!*R", current);
1772 #if CONFIG_CORELOCK == SW_CORELOCK
1773 /* Inline lock/nolock version of _block_thread into these functions */
1774 void block_thread(struct thread_queue *tq)
1776 _block_thread(tq, false);
1779 void block_thread_no_listlock(struct thread_entry **list)
1781 _block_thread((struct thread_queue *)list, true);
1783 #endif /* CONFIG_CORELOCK */
1785 /*---------------------------------------------------------------------------
1786 * Block a thread on a blocking queue for a specified time interval or until
1787 * explicitly woken - whichever happens first.
1788 * Caller with interrupt-accessible lists should disable interrupts first
1789 * and request that interrupt level be restored after switching out the
1790 * current thread.
1792 * INTERNAL: Intended for use by kernel objects and not for programs.
1793 *---------------------------------------------------------------------------
1795 void block_thread_w_tmo(struct thread_queue *list, int timeout)
1797 /* Get the entry for the current running thread. */
1798 struct thread_entry *current = cores[CURRENT_CORE].running;
1800 #if NUM_CORES > 1
1801 /* Lock thread slot */
1802 GET_THREAD_STATE(current);
1803 #endif
1805 /* Set the state to blocked with the specified timeout */
1806 current->tmo_tick = current_tick + timeout;
1807 /* Set the list for explicit wakeup */
1808 block_thread_on_l(list, current, STATE_BLOCKED_W_TMO);
1810 /* Now force a task switch and block until we have been woken up
1811 * by another thread or timeout is reached - whichever happens first */
1812 switch_thread(current);
1814 /* Our status should be STATE_RUNNING */
1815 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1816 "T:R->!*R", current);
1819 /*---------------------------------------------------------------------------
1820 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
1821 * that called sleep().
1822 * Caller with interrupt-accessible lists should disable interrupts first.
1823 * This code should be considered a critical section by the caller.
1825 * INTERNAL: Intended for use by kernel objects and not for programs.
1826 *---------------------------------------------------------------------------
1828 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
1829 struct thread_queue *list IF_SWCL(, const bool nolock))
1831 struct thread_entry *t;
1832 struct thread_entry *thread;
1833 unsigned state;
1835 /* Wake up the last thread first. */
1836 #if CONFIG_CORELOCK == SW_CORELOCK
1837 /* One branch optimized away during inlining */
1838 if (nolock)
1840 t = list->queue;
1842 else
1843 #endif
1845 t = LOCK_LIST(list);
1848 /* Check if there is a blocked thread at all. */
1849 if (t == NULL)
1851 #if CONFIG_CORELOCK == SW_CORELOCK
1852 if (!nolock)
1853 #endif
1855 UNLOCK_LIST(list, NULL);
1857 return NULL;
1860 thread = t;
1862 #if NUM_CORES > 1
1863 #if CONFIG_CORELOCK == SW_CORELOCK
1864 if (nolock)
1866 /* Lock thread only, not list */
1867 state = GET_THREAD_STATE(thread);
1869 else
1870 #endif
1872 /* This locks in reverse order from other routines so a retry in the
1873 correct order may be needed */
1874 state = TRY_GET_THREAD_STATE(thread);
1875 if (state == STATE_BUSY)
1877 /* Unlock list and retry slot, then list */
1878 UNLOCK_LIST(list, t);
1879 state = GET_THREAD_STATE(thread);
1880 t = LOCK_LIST(list);
1881 /* Be sure thread still exists here - it couldn't have re-added
1882 itself if it was woken elsewhere because this function is
1883 serialized within the object that owns the list. */
1884 if (thread != t)
1886 /* Thread disappeared :( */
1887 UNLOCK_LIST(list, t);
1888 UNLOCK_THREAD(thread, state);
1889 return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
1893 #else /* NUM_CORES == 1 */
1894 state = GET_THREAD_STATE(thread);
1895 #endif /* NUM_CORES */
1897 /* Determine thread's current state. */
1898 switch (state)
1900 case STATE_BLOCKED:
1901 case STATE_BLOCKED_W_TMO:
1902 /* Remove thread from object's blocked list - select t or list depending
1903 on locking type at compile time */
1904 REMOVE_FROM_LIST_L_SELECT(t, list, thread);
1905 #if CONFIG_CORELOCK == SW_CORELOCK
1906 /* Statment optimized away during inlining if nolock != false */
1907 if (!nolock)
1908 #endif
1910 UNLOCK_LIST(list, t); /* Unlock list - removal complete */
1913 #ifdef HAVE_PRIORITY_SCHEDULING
1914 /* Give the task a kick to avoid a stall after wakeup.
1915 Not really proper treatment - TODO later. */
1916 thread->last_run = current_tick - 8*LOWEST_PRIORITY;
1917 #endif
1918 core_schedule_wakeup(thread);
1919 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
1920 return thread;
1921 default:
1922 /* Nothing to do. State is not blocked. */
1923 #if THREAD_EXTRA_CHECKS
1924 THREAD_PANICF("wakeup_thread->block invalid", thread);
1925 case STATE_RUNNING:
1926 case STATE_KILLED:
1927 #endif
1928 #if CONFIG_CORELOCK == SW_CORELOCK
1929 /* Statement optimized away during inlining if nolock != false */
1930 if (!nolock)
1931 #endif
1933 UNLOCK_LIST(list, t); /* Unlock the object's list */
1935 UNLOCK_THREAD(thread, state); /* Unlock thread slot */
1936 return NULL;
1940 #if CONFIG_CORELOCK == SW_CORELOCK
1941 /* Inline lock/nolock version of _wakeup_thread into these functions */
1942 struct thread_entry * wakeup_thread(struct thread_queue *tq)
1944 return _wakeup_thread(tq, false);
1947 struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
1949 return _wakeup_thread((struct thread_queue *)list, true);
1951 #endif /* CONFIG_CORELOCK */
1953 /*---------------------------------------------------------------------------
1954 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1955 * will be locked on multicore.
1956 *---------------------------------------------------------------------------
1958 static int find_empty_thread_slot(void)
1960 #if NUM_CORES > 1
1961 /* Any slot could be on an IRQ-accessible list */
1962 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1963 #endif
1964 /* Thread slots are not locked on single core */
1966 int n;
1968 for (n = 0; n < MAXTHREADS; n++)
1970 /* Obtain current slot state - lock it on multicore */
1971 unsigned state = GET_THREAD_STATE(&threads[n]);
1973 if (state == STATE_KILLED
1974 #if NUM_CORES > 1
1975 && threads[n].name != THREAD_DESTRUCT
1976 #endif
1979 /* Slot is empty - leave it locked and caller will unlock */
1980 break;
1983 /* Finished examining slot - no longer busy - unlock on multicore */
1984 UNLOCK_THREAD(&threads[n], state);
1987 #if NUM_CORES > 1
1988 set_irq_level(oldlevel); /* Reenable interrups - this slot is
1989 not accesible to them yet */
1990 #endif
1992 return n;
1996 /*---------------------------------------------------------------------------
1997 * Place the current core in idle mode - woken up on interrupt or wake
1998 * request from another core.
1999 *---------------------------------------------------------------------------
2001 void core_idle(void)
2003 #if NUM_CORES > 1
2004 const unsigned int core = CURRENT_CORE;
2005 #endif
2006 set_irq_level(HIGHEST_IRQ_LEVEL);
2007 core_sleep(IF_COP(core));
2010 /*---------------------------------------------------------------------------
2011 * Create a thread
2012 * If using a dual core architecture, specify which core to start the thread
2013 * on, and whether to fall back to the other core if it can't be created
2014 * Return ID if context area could be allocated, else NULL.
2015 *---------------------------------------------------------------------------
2017 struct thread_entry*
2018 create_thread(void (*function)(void), void* stack, int stack_size,
2019 unsigned flags, const char *name
2020 IF_PRIO(, int priority)
2021 IF_COP(, unsigned int core))
2023 unsigned int i;
2024 unsigned int stacklen;
2025 unsigned int *stackptr;
2026 int slot;
2027 struct thread_entry *thread;
2028 unsigned state;
2030 slot = find_empty_thread_slot();
2031 if (slot >= MAXTHREADS)
2033 return NULL;
2036 /* Munge the stack to make it easy to spot stack overflows */
2037 stacklen = stack_size / sizeof(int);
2038 stackptr = stack;
2039 for(i = 0;i < stacklen;i++)
2041 stackptr[i] = DEADBEEF;
2044 /* Store interesting information */
2045 thread = &threads[slot];
2046 thread->name = name;
2047 thread->stack = stack;
2048 thread->stack_size = stack_size;
2049 thread->bqp = NULL;
2050 #if CONFIG_CORELOCK == SW_CORELOCK
2051 thread->bqnlp = NULL;
2052 #endif
2053 thread->queue = NULL;
2054 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2055 thread->boosted = 0;
2056 #endif
2057 #ifdef HAVE_PRIORITY_SCHEDULING
2058 thread->priority_x = LOWEST_PRIORITY;
2059 thread->priority = priority;
2060 thread->last_run = current_tick - priority * 8;
2061 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2062 #endif
2064 #if NUM_CORES > 1
2065 thread->core = core;
2067 /* Writeback stack munging or anything else before starting */
2068 if (core != CURRENT_CORE)
2070 flush_icache();
2072 #endif
2074 /* Thread is not on any timeout list but be a bit paranoid */
2075 thread->tmo.prev = NULL;
2077 state = (flags & CREATE_THREAD_FROZEN) ?
2078 STATE_FROZEN : STATE_RUNNING;
2080 /* Align stack to an even 32 bit boundary */
2081 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
2083 /* Load the thread's context structure with needed startup information */
2084 THREAD_STARTUP_INIT(core, thread, function);
2086 if (state == STATE_RUNNING)
2088 #if NUM_CORES > 1
2089 if (core != CURRENT_CORE)
2091 /* Next task switch on other core moves thread to running list */
2092 core_schedule_wakeup(thread);
2094 else
2095 #endif
2097 /* Place on running list immediately */
2098 add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
2102 /* remove lock and set state */
2103 UNLOCK_THREAD_SET_STATE(thread, state);
2105 return thread;
2108 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2109 void trigger_cpu_boost(void)
2111 /* No IRQ disable nescessary since the current thread cannot be blocked
2112 on an IRQ-accessible list */
2113 struct thread_entry *current = cores[CURRENT_CORE].running;
2114 unsigned state;
2116 state = GET_THREAD_STATE(current);
2117 boost_thread(current, true);
2118 UNLOCK_THREAD(current, state);
2120 (void)state;
2123 void cancel_cpu_boost(void)
2125 struct thread_entry *current = cores[CURRENT_CORE].running;
2126 unsigned state;
2128 state = GET_THREAD_STATE(current);
2129 boost_thread(current, false);
2130 UNLOCK_THREAD(current, state);
2132 (void)state;
2134 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
2136 /*---------------------------------------------------------------------------
2137 * Remove a thread from the scheduler.
2138 * Parameter is the ID as returned from create_thread().
2140 * Use with care on threads that are not under careful control as this may
2141 * leave various objects in an undefined state. When trying to kill a thread
2142 * on another processor, be sure you know what it's doing and won't be
2143 * switching around itself.
2144 *---------------------------------------------------------------------------
2146 void remove_thread(struct thread_entry *thread)
2148 #if NUM_CORES > 1
2149 /* core is not constant here because of core switching */
2150 unsigned int core = CURRENT_CORE;
2151 unsigned int old_core = NUM_CORES;
2152 #else
2153 const unsigned int core = CURRENT_CORE;
2154 #endif
2155 unsigned state;
2156 int oldlevel;
2158 if (thread == NULL)
2159 thread = cores[core].running;
2161 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2162 state = GET_THREAD_STATE(thread);
2164 if (state == STATE_KILLED)
2166 goto thread_killed;
2169 #if NUM_CORES > 1
2170 if (thread->core != core)
2172 /* Switch cores and safely extract the thread there */
2173 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
2174 condition if the thread runs away to another processor. */
2175 unsigned int new_core = thread->core;
2176 const char *old_name = thread->name;
2178 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2179 UNLOCK_THREAD(thread, state);
2180 set_irq_level(oldlevel);
2182 old_core = switch_core(new_core);
2184 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2185 state = GET_THREAD_STATE(thread);
2187 core = new_core;
2189 if (state == STATE_KILLED)
2191 /* Thread suicided before we could kill it */
2192 goto thread_killed;
2195 /* Reopen slot - it's locked again anyway */
2196 thread->name = old_name;
2198 if (thread->core != core)
2200 /* We won't play thread tag - just forget it */
2201 UNLOCK_THREAD(thread, state);
2202 set_irq_level(oldlevel);
2203 goto thread_kill_abort;
2206 /* Perform the extraction and switch ourselves back to the original
2207 processor */
2209 #endif /* NUM_CORES > 1 */
2211 #ifdef HAVE_PRIORITY_SCHEDULING
2212 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2213 #endif
2214 if (thread->tmo.prev != NULL)
2216 /* Clean thread off the timeout list if a timeout check hasn't
2217 * run yet */
2218 remove_from_list_tmo(thread);
2221 boost_thread(thread, false);
2223 if (thread == cores[core].running)
2225 /* Suicide - thread has unconditional rights to do this */
2226 /* Maintain locks until switch-out */
2227 block_thread_on_l(NULL, thread, STATE_KILLED);
2229 #if NUM_CORES > 1
2230 /* Switch to the idle stack if not on the main core (where "main"
2231 * runs) */
2232 if (core != CPU)
2234 switch_to_idle_stack(core);
2237 flush_icache();
2238 #endif
2239 /* Signal this thread */
2240 thread_queue_wake_no_listlock(&thread->queue);
2241 /* Switch tasks and never return */
2242 switch_thread(thread);
2243 /* This should never and must never be reached - if it is, the
2244 * state is corrupted */
2245 THREAD_PANICF("remove_thread->K:*R", thread);
2248 #if NUM_CORES > 1
2249 if (thread->name == THREAD_DESTRUCT)
2251 /* Another core is doing this operation already */
2252 UNLOCK_THREAD(thread, state);
2253 set_irq_level(oldlevel);
2254 return;
2256 #endif
2257 if (cores[core].waking.queue != NULL)
2259 /* Get any threads off the waking list and onto the running
2260 * list first - waking and running cannot be distinguished by
2261 * state */
2262 core_perform_wakeup(IF_COP(core));
2265 switch (state)
2267 case STATE_RUNNING:
2268 /* Remove thread from ready to run tasks */
2269 remove_from_list_l(&cores[core].running, thread);
2270 break;
2271 case STATE_BLOCKED:
2272 case STATE_BLOCKED_W_TMO:
2273 /* Remove thread from the queue it's blocked on - including its
2274 * own if waiting there */
2275 #if CONFIG_CORELOCK == SW_CORELOCK
2276 /* One or the other will be valid */
2277 if (thread->bqp == NULL)
2279 remove_from_list_l(thread->bqnlp, thread);
2281 else
2282 #endif /* CONFIG_CORELOCK */
2284 remove_from_list_l_locked(thread->bqp, thread);
2286 break;
2287 /* Otherwise thread is killed or is frozen and hasn't run yet */
2290 /* If thread was waiting on itself, it will have been removed above.
2291 * The wrong order would result in waking the thread first and deadlocking
2292 * since the slot is already locked. */
2293 thread_queue_wake_no_listlock(&thread->queue);
2295 thread_killed: /* Thread was already killed */
2296 /* Removal complete - safe to unlock state and reenable interrupts */
2297 UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED);
2298 set_irq_level(oldlevel);
2300 #if NUM_CORES > 1
2301 thread_kill_abort: /* Something stopped us from killing the thread */
2302 if (old_core < NUM_CORES)
2304 /* Did a removal on another processor's thread - switch back to
2305 native core */
2306 switch_core(old_core);
2308 #endif
2311 /*---------------------------------------------------------------------------
2312 * Block the current thread until another thread terminates. A thread may
2313 * wait on itself to terminate which prevents it from running again and it
2314 * will need to be killed externally.
2315 * Parameter is the ID as returned from create_thread().
2316 *---------------------------------------------------------------------------
2318 void thread_wait(struct thread_entry *thread)
2320 const unsigned int core = CURRENT_CORE;
2321 struct thread_entry *current = cores[core].running;
2322 unsigned thread_state;
2323 #if NUM_CORES > 1
2324 int oldlevel;
2325 unsigned current_state;
2326 #endif
2328 if (thread == NULL)
2329 thread = current;
2331 #if NUM_CORES > 1
2332 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2333 #endif
2335 thread_state = GET_THREAD_STATE(thread);
2337 #if NUM_CORES > 1
2338 /* We can't lock the same slot twice. The waitee will also lock itself
2339 first then the thread slots that will be locked and woken in turn.
2340 The same order must be observed here as well. */
2341 if (thread == current)
2343 current_state = thread_state;
2345 else
2347 current_state = GET_THREAD_STATE(current);
2349 #endif
2351 if (thread_state != STATE_KILLED)
2353 /* Unlock the waitee state at task switch - not done for self-wait
2354 because the would double-unlock the state and potentially
2355 corrupt another's busy assert on the slot */
2356 if (thread != current)
2358 #if CONFIG_CORELOCK == SW_CORELOCK
2359 cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
2360 cores[core].blk_ops.thread = thread;
2361 #elif CONFIG_CORELOCK == CORELOCK_SWAP
2362 cores[core].blk_ops.flags |= TBOP_SET_VARu8;
2363 cores[core].blk_ops.var_u8p = &thread->state;
2364 cores[core].blk_ops.var_u8v = thread_state;
2365 #endif
2367 block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED);
2368 switch_thread(current);
2369 return;
2372 /* Unlock both slots - obviously the current thread can't have
2373 STATE_KILLED so the above if clause will always catch a thread
2374 waiting on itself */
2375 #if NUM_CORES > 1
2376 UNLOCK_THREAD(current, current_state);
2377 UNLOCK_THREAD(thread, thread_state);
2378 set_irq_level(oldlevel);
2379 #endif
2382 #ifdef HAVE_PRIORITY_SCHEDULING
2383 /*---------------------------------------------------------------------------
2384 * Sets the thread's relative priority for the core it runs on.
2385 *---------------------------------------------------------------------------
2387 int thread_set_priority(struct thread_entry *thread, int priority)
2389 unsigned old_priority = (unsigned)-1;
2391 if (thread == NULL)
2392 thread = cores[CURRENT_CORE].running;
2394 #if NUM_CORES > 1
2395 /* Thread could be on any list and therefore on an interrupt accessible
2396 one - disable interrupts */
2397 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2398 #endif
2399 unsigned state = GET_THREAD_STATE(thread);
2401 /* Make sure it's not killed */
2402 if (state != STATE_KILLED)
2404 old_priority = thread->priority;
2405 thread->priority = priority;
2406 cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY;
2409 #if NUM_CORES > 1
2410 UNLOCK_THREAD(thread, state);
2411 set_irq_level(oldlevel);
2412 #endif
2413 return old_priority;
2416 /*---------------------------------------------------------------------------
2417 * Returns the current priority for a thread.
2418 *---------------------------------------------------------------------------
2420 int thread_get_priority(struct thread_entry *thread)
2422 /* Simple, quick probe. */
2423 if (thread == NULL)
2424 thread = cores[CURRENT_CORE].running;
2426 return (unsigned)thread->priority;
2429 /*---------------------------------------------------------------------------
2430 * Yield that guarantees thread execution once per round regardless of
2431 * thread's scheduler priority - basically a transient realtime boost
2432 * without altering the scheduler's thread precedence.
2434 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2435 *---------------------------------------------------------------------------
2437 void priority_yield(void)
2439 const unsigned int core = CURRENT_CORE;
2440 struct thread_entry *thread = cores[core].running;
2441 thread->priority_x = HIGHEST_PRIORITY;
2442 switch_thread(NULL);
2443 thread->priority_x = LOWEST_PRIORITY;
2445 #endif /* HAVE_PRIORITY_SCHEDULING */
2447 /* Resumes a frozen thread - similar logic to wakeup_thread except that
2448 the thread is on no scheduler list at all. It exists simply by virtue of
2449 the slot having a state of STATE_FROZEN. */
2450 void thread_thaw(struct thread_entry *thread)
2452 #if NUM_CORES > 1
2453 /* Thread could be on any list and therefore on an interrupt accessible
2454 one - disable interrupts */
2455 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2456 #endif
2457 unsigned state = GET_THREAD_STATE(thread);
2459 if (state == STATE_FROZEN)
2461 const unsigned int core = CURRENT_CORE;
2462 #if NUM_CORES > 1
2463 if (thread->core != core)
2465 core_schedule_wakeup(thread);
2467 else
2468 #endif
2470 add_to_list_l(&cores[core].running, thread);
2473 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
2474 #if NUM_CORES > 1
2475 set_irq_level(oldlevel);
2476 #endif
2477 return;
2480 #if NUM_CORES > 1
2481 UNLOCK_THREAD(thread, state);
2482 set_irq_level(oldlevel);
2483 #endif
2486 /*---------------------------------------------------------------------------
2487 * Return the ID of the currently executing thread.
2488 *---------------------------------------------------------------------------
2490 struct thread_entry * thread_get_current(void)
2492 return cores[CURRENT_CORE].running;
2495 #if NUM_CORES > 1
2496 /*---------------------------------------------------------------------------
2497 * Switch the processor that the currently executing thread runs on.
2498 *---------------------------------------------------------------------------
2500 unsigned int switch_core(unsigned int new_core)
2502 const unsigned int core = CURRENT_CORE;
2503 struct thread_entry *current = cores[core].running;
2504 struct thread_entry *w;
2505 int oldlevel;
2507 /* Interrupts can access the lists that will be used - disable them */
2508 unsigned state = GET_THREAD_STATE(current);
2510 if (core == new_core)
2512 /* No change - just unlock everything and return same core */
2513 UNLOCK_THREAD(current, state);
2514 return core;
2517 /* Get us off the running list for the current core */
2518 remove_from_list_l(&cores[core].running, current);
2520 /* Stash return value (old core) in a safe place */
2521 current->retval = core;
2523 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2524 * the other core will likely attempt a removal from the wrong list! */
2525 if (current->tmo.prev != NULL)
2527 remove_from_list_tmo(current);
2530 /* Change the core number for this thread slot */
2531 current->core = new_core;
2533 /* Do not use core_schedule_wakeup here since this will result in
2534 * the thread starting to run on the other core before being finished on
2535 * this one. Delay the wakeup list unlock to keep the other core stuck
2536 * until this thread is ready. */
2537 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2538 w = LOCK_LIST(&cores[new_core].waking);
2539 ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current);
2541 /* Make a callback into device-specific code, unlock the wakeup list so
2542 * that execution may resume on the new core, unlock our slot and finally
2543 * restore the interrupt level */
2544 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
2545 TBOP_UNLOCK_LIST;
2546 cores[core].blk_ops.list_p = &cores[new_core].waking;
2547 #if CONFIG_CORELOCK == CORELOCK_SWAP
2548 cores[core].blk_ops.state = STATE_RUNNING;
2549 cores[core].blk_ops.list_v = w;
2550 #endif
2552 #ifdef HAVE_PRIORITY_SCHEDULING
2553 current->priority_x = HIGHEST_PRIORITY;
2554 cores[core].highest_priority = LOWEST_PRIORITY;
2555 #endif
2556 /* Do the stack switching, cache_maintenence and switch_thread call -
2557 requires native code */
2558 switch_thread_core(core, current);
2560 #ifdef HAVE_PRIORITY_SCHEDULING
2561 current->priority_x = LOWEST_PRIORITY;
2562 cores[current->core].highest_priority = LOWEST_PRIORITY;
2563 #endif
2565 /* Finally return the old core to caller */
2566 return current->retval;
2567 (void)state;
2569 #endif /* NUM_CORES > 1 */
2571 /*---------------------------------------------------------------------------
2572 * Initialize threading API. This assumes interrupts are not yet enabled. On
2573 * multicore setups, no core is allowed to proceed until create_thread calls
2574 * are safe to perform.
2575 *---------------------------------------------------------------------------
2577 void init_threads(void)
2579 const unsigned int core = CURRENT_CORE;
2580 struct thread_entry *thread;
2581 int slot;
2583 /* CPU will initialize first and then sleep */
2584 slot = find_empty_thread_slot();
2586 if (slot >= MAXTHREADS)
2588 /* WTF? There really must be a slot available at this stage.
2589 * This can fail if, for example, .bss isn't zero'ed out by the loader
2590 * or threads is in the wrong section. */
2591 THREAD_PANICF("init_threads->no slot", NULL);
2594 /* Initialize initially non-zero members of core */
2595 thread_queue_init(&cores[core].waking);
2596 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2597 #ifdef HAVE_PRIORITY_SCHEDULING
2598 cores[core].highest_priority = LOWEST_PRIORITY;
2599 #endif
2601 /* Initialize initially non-zero members of slot */
2602 thread = &threads[slot];
2603 thread->name = main_thread_name;
2604 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); /* No sync worries yet */
2605 #if NUM_CORES > 1
2606 thread->core = core;
2607 #endif
2608 #ifdef HAVE_PRIORITY_SCHEDULING
2609 thread->priority = PRIORITY_USER_INTERFACE;
2610 thread->priority_x = LOWEST_PRIORITY;
2611 #endif
2612 #if CONFIG_CORELOCK == SW_CORELOCK
2613 corelock_init(&thread->cl);
2614 #endif
2616 add_to_list_l(&cores[core].running, thread);
2618 if (core == CPU)
2620 thread->stack = stackbegin;
2621 thread->stack_size = (int)stackend - (int)stackbegin;
2622 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2623 /* TODO: HAL interface for this */
2624 /* Wake up coprocessor and let it initialize kernel and threads */
2625 #ifdef CPU_PP502x
2626 MBX_MSG_CLR = 0x3f;
2627 #endif
2628 COP_CTL = PROC_WAKE;
2629 /* Sleep until finished */
2630 CPU_CTL = PROC_SLEEP;
2631 nop; nop; nop; nop;
2633 else
2635 /* Initial stack is the COP idle stack */
2636 thread->stack = cop_idlestackbegin;
2637 thread->stack_size = IDLE_STACK_SIZE;
2638 /* Get COP safely primed inside switch_thread where it will remain
2639 * until a thread actually exists on it */
2640 CPU_CTL = PROC_WAKE;
2641 remove_thread(NULL);
2642 #endif /* NUM_CORES */
2646 /*---------------------------------------------------------------------------
2647 * Returns the maximum percentage of stack a thread ever used while running.
2648 * NOTE: Some large buffer allocations that don't use enough the buffer to
2649 * overwrite stackptr[0] will not be seen.
2650 *---------------------------------------------------------------------------
2652 int thread_stack_usage(const struct thread_entry *thread)
2654 unsigned int *stackptr = thread->stack;
2655 int stack_words = thread->stack_size / sizeof (int);
2656 int i, usage = 0;
2658 for (i = 0; i < stack_words; i++)
2660 if (stackptr[i] != DEADBEEF)
2662 usage = ((stack_words - i) * 100) / stack_words;
2663 break;
2667 return usage;
2670 #if NUM_CORES > 1
2671 /*---------------------------------------------------------------------------
2672 * Returns the maximum percentage of the core's idle stack ever used during
2673 * runtime.
2674 *---------------------------------------------------------------------------
2676 int idle_stack_usage(unsigned int core)
2678 unsigned int *stackptr = idle_stacks[core];
2679 int i, usage = 0;
2681 for (i = 0; i < IDLE_STACK_WORDS; i++)
2683 if (stackptr[i] != DEADBEEF)
2685 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
2686 break;
2690 return usage;
2692 #endif
2694 /*---------------------------------------------------------------------------
2695 * Fills in the buffer with the specified thread's name. If the name is NULL,
2696 * empty, or the thread is in destruct state a formatted ID is written
2697 * instead.
2698 *---------------------------------------------------------------------------
2700 void thread_get_name(char *buffer, int size,
2701 struct thread_entry *thread)
2703 if (size <= 0)
2704 return;
2706 *buffer = '\0';
2708 if (thread)
2710 /* Display thread name if one or ID if none */
2711 const char *name = thread->name;
2712 const char *fmt = "%s";
2713 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2715 name = (const char *)thread;
2716 fmt = "%08lX";
2718 snprintf(buffer, size, fmt, name);