Manual: make sure that there won't be any additional spaces in the \opt{}. This shoul...
[Rockbox.git] / firmware / thread.c
blobd348f7439dd0a257b03e7172a0d23baeb5a29aee
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
33 #ifdef DEBUG
34 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
35 #else
36 #define THREAD_EXTRA_CHECKS 0
37 #endif
39 /**
40 * General locking order to guarantee progress. Order must be observed but
41 * all stages are not nescessarily obligatory. Going from 1) to 3) is
42 * perfectly legal.
44 * 1) IRQ
45 * This is first because of the likelyhood of having an interrupt occur that
46 * also accesses one of the objects farther down the list. Any non-blocking
47 * synchronization done may already have a lock on something during normal
48 * execution and if an interrupt handler running on the same processor as
49 * the one that has the resource locked were to attempt to access the
50 * resource, the interrupt handler would wait forever waiting for an unlock
51 * that will never happen. There is no danger if the interrupt occurs on
52 * a different processor because the one that has the lock will eventually
53 * unlock and the other processor's handler may proceed at that time. Not
54 * nescessary when the resource in question is definitely not available to
55 * interrupt handlers.
57 * 2) Kernel Object
58 * 1) May be needed beforehand if the kernel object allows dual-use such as
59 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
66 * 3) Thread Slot
67 * This locks access to the thread's slot such that its state cannot be
68 * altered by another processor when a state change is in progress such as
69 * when it is in the process of going on a blocked list. An attempt to wake
70 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state.
73 * 4) Lists
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
82 * 5) Core Lists
83 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an
85 * operation may only be performed by the thread's own core in a normal
86 * execution context. The wakeup list is the prime example where a thread
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
91 #define DEADBEEF ((unsigned int)0xdeadbeef)
92 /* Cast to the the machine int type, whose size could be < 4. */
93 struct core_entry cores[NUM_CORES] IBSS_ATTR;
94 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
96 static const char main_thread_name[] = "main";
97 extern int stackbegin[];
98 extern int stackend[];
100 /* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
101 * never results in requiring a wait until the next tick (up to 10000uS!). May
102 * require assembly and careful instruction ordering.
104 * 1) On multicore, stay awake if directed to do so by another. If so, goto step 4.
105 * 2) If processor requires, atomically reenable interrupts and perform step 3.
106 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
107 * goto step 5.
108 * 4) Enable interrupts.
109 * 5) Exit procedure.
111 static inline void core_sleep(IF_COP_VOID(unsigned int core))
112 __attribute__((always_inline));
114 static void check_tmo_threads(void)
115 __attribute__((noinline));
117 static inline void block_thread_on_l(
118 struct thread_queue *list, struct thread_entry *thread, unsigned state)
119 __attribute__((always_inline));
121 static inline void block_thread_on_l_no_listlock(
122 struct thread_entry **list, struct thread_entry *thread, unsigned state)
123 __attribute__((always_inline));
125 static inline void _block_thread_on_l(
126 struct thread_queue *list, struct thread_entry *thread,
127 unsigned state IF_SWCL(, const bool single))
128 __attribute__((always_inline));
130 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
131 struct thread_queue *list IF_SWCL(, const bool nolock))
132 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
134 IF_SWCL(static inline) void _block_thread(
135 struct thread_queue *list IF_SWCL(, const bool nolock))
136 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
138 static void add_to_list_tmo(struct thread_entry *thread)
139 __attribute__((noinline));
141 static void core_schedule_wakeup(struct thread_entry *thread)
142 __attribute__((noinline));
144 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
145 __attribute__((always_inline));
147 #if NUM_CORES > 1
148 static inline void run_blocking_ops(
149 unsigned int core, struct thread_entry *thread)
150 __attribute__((always_inline));
151 #endif
153 static void thread_stkov(struct thread_entry *thread)
154 __attribute__((noinline));
156 static inline void store_context(void* addr)
157 __attribute__((always_inline));
159 static inline void load_context(const void* addr)
160 __attribute__((always_inline));
162 void switch_thread(struct thread_entry *old)
163 __attribute__((noinline));
166 /****************************************************************************
167 * Processor-specific section
170 #if defined(CPU_ARM)
171 /*---------------------------------------------------------------------------
172 * Start the thread running and terminate it if it returns
173 *---------------------------------------------------------------------------
175 static void start_thread(void) __attribute__((naked,used));
176 static void start_thread(void)
178 /* r0 = context */
179 asm volatile (
180 "ldr sp, [r0, #32] \n" /* Load initial sp */
181 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
182 "mov r1, #0 \n" /* Mark thread as running */
183 "str r1, [r0, #40] \n"
184 #if NUM_CORES > 1
185 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
186 "mov lr, pc \n" /* This could be the first entry into */
187 "bx r0 \n" /* plugin or codec code for this core. */
188 #endif
189 "mov lr, pc \n" /* Call thread function */
190 "bx r4 \n"
191 "mov r0, #0 \n" /* remove_thread(NULL) */
192 "ldr pc, =remove_thread \n"
193 ".ltorg \n" /* Dump constant pool */
194 ); /* No clobber list - new thread doesn't care */
197 /* For startup, place context pointer in r4 slot, start_thread pointer in r5
198 * slot, and thread function pointer in context.start. See load_context for
199 * what happens when thread is initially going to run. */
200 #define THREAD_STARTUP_INIT(core, thread, function) \
201 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
202 (thread)->context.r[1] = (unsigned int)start_thread, \
203 (thread)->context.start = (void *)function; })
205 /*---------------------------------------------------------------------------
206 * Store non-volatile context.
207 *---------------------------------------------------------------------------
209 static inline void store_context(void* addr)
211 asm volatile(
212 "stmia %0, { r4-r11, sp, lr } \n"
213 : : "r" (addr)
217 /*---------------------------------------------------------------------------
218 * Load non-volatile context.
219 *---------------------------------------------------------------------------
221 static inline void load_context(const void* addr)
223 asm volatile(
224 "ldr r0, [%0, #40] \n" /* Load start pointer */
225 "cmp r0, #0 \n" /* Check for NULL */
226 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
227 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
228 : : "r" (addr) : "r0" /* only! */
232 #if defined (CPU_PP)
234 #if NUM_CORES > 1
235 extern int cpu_idlestackbegin[];
236 extern int cpu_idlestackend[];
237 extern int cop_idlestackbegin[];
238 extern int cop_idlestackend[];
239 static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
241 [CPU] = cpu_idlestackbegin,
242 [COP] = cop_idlestackbegin
245 #if CONFIG_CPU == PP5002
246 /* Bytes to emulate the PP502x mailbox bits */
247 struct core_semaphores
249 volatile uint8_t intend_wake; /* 00h */
250 volatile uint8_t stay_awake; /* 01h */
251 volatile uint8_t intend_sleep; /* 02h */
252 volatile uint8_t unused; /* 03h */
255 static struct core_semaphores core_semaphores[NUM_CORES] NOCACHEBSS_ATTR;
256 #endif
258 #endif /* NUM_CORES */
260 #if CONFIG_CORELOCK == SW_CORELOCK
261 /* Software core locks using Peterson's mutual exclusion algorithm */
263 /*---------------------------------------------------------------------------
264 * Initialize the corelock structure.
265 *---------------------------------------------------------------------------
267 void corelock_init(struct corelock *cl)
269 memset(cl, 0, sizeof (*cl));
272 #if 1 /* Assembly locks to minimize overhead */
273 /*---------------------------------------------------------------------------
274 * Wait for the corelock to become free and acquire it when it does.
275 *---------------------------------------------------------------------------
277 void corelock_lock(struct corelock *cl) __attribute__((naked));
278 void corelock_lock(struct corelock *cl)
280 asm volatile (
281 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
282 "ldrb r1, [r1] \n"
283 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
284 "and r2, r1, #1 \n" /* r2 = othercore */
285 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
286 "1: \n"
287 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
288 "cmp r3, #0 \n"
289 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core ? */
290 "cmpne r3, r1, lsr #7 \n"
291 "bxeq lr \n" /* yes? lock acquired */
292 "b 1b \n" /* keep trying */
293 : : "i"(&PROCESSOR_ID)
295 (void)cl;
298 /*---------------------------------------------------------------------------
299 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
300 *---------------------------------------------------------------------------
302 int corelock_try_lock(struct corelock *cl) __attribute__((naked));
303 int corelock_try_lock(struct corelock *cl)
305 asm volatile (
306 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
307 "ldrb r1, [r1] \n"
308 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
309 "and r2, r1, #1 \n" /* r2 = othercore */
310 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
311 "1: \n"
312 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 0 ? */
313 "cmp r3, #0 \n"
314 "ldrneb r3, [r0, #2] \n" /* || cl->turn == core? */
315 "cmpne r3, r1, lsr #7 \n"
316 "moveq r0, #1 \n" /* yes? lock acquired */
317 "bxeq lr \n"
318 "mov r2, #0 \n" /* cl->myl[core] = 0 */
319 "strb r2, [r0, r1, lsr #7] \n"
320 "mov r0, r2 \n"
321 "bx lr \n" /* acquisition failed */
322 : : "i"(&PROCESSOR_ID)
325 return 0;
326 (void)cl;
329 /*---------------------------------------------------------------------------
330 * Release ownership of the corelock
331 *---------------------------------------------------------------------------
333 void corelock_unlock(struct corelock *cl) __attribute__((naked));
334 void corelock_unlock(struct corelock *cl)
336 asm volatile (
337 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
338 "ldrb r1, [r1] \n"
339 "mov r2, #0 \n" /* cl->myl[core] = 0 */
340 "strb r2, [r0, r1, lsr #7] \n"
341 "bx lr \n"
342 : : "i"(&PROCESSOR_ID)
344 (void)cl;
346 #else /* C versions for reference */
347 /*---------------------------------------------------------------------------
348 * Wait for the corelock to become free and aquire it when it does.
349 *---------------------------------------------------------------------------
351 void corelock_lock(struct corelock *cl)
353 const unsigned int core = CURRENT_CORE;
354 const unsigned int othercore = 1 - core;
356 cl->myl[core] = core;
357 cl->turn = othercore;
359 for (;;)
361 if (cl->myl[othercore] == 0 || cl->turn == core)
362 break;
366 /*---------------------------------------------------------------------------
367 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
368 *---------------------------------------------------------------------------
370 int corelock_try_lock(struct corelock *cl)
372 const unsigned int core = CURRENT_CORE;
373 const unsigned int othercore = 1 - core;
375 cl->myl[core] = core;
376 cl->turn = othercore;
378 if (cl->myl[othercore] == 0 || cl->turn == core)
380 return 1;
383 cl->myl[core] = 0;
384 return 0;
387 /*---------------------------------------------------------------------------
388 * Release ownership of the corelock
389 *---------------------------------------------------------------------------
391 void corelock_unlock(struct corelock *cl)
393 cl->myl[CURRENT_CORE] = 0;
395 #endif /* ASM / C selection */
397 #endif /* CONFIG_CORELOCK == SW_CORELOCK */
399 /*---------------------------------------------------------------------------
400 * Put core in a power-saving state if waking list wasn't repopulated and if
401 * no other core requested a wakeup for it to perform a task.
402 *---------------------------------------------------------------------------
404 #if NUM_CORES == 1
405 /* Shared single-core build debugging version */
406 static inline void core_sleep(void)
408 PROC_CTL(CURRENT_CORE) = PROC_SLEEP;
409 nop; nop; nop;
410 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
412 #elif defined (CPU_PP502x)
413 static inline void core_sleep(unsigned int core)
415 #if 1
416 asm volatile (
417 "mov r0, #4 \n" /* r0 = 0x4 << core */
418 "mov r0, r0, lsl %[c] \n"
419 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
420 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
421 "tst r1, r0, lsl #2 \n"
422 "moveq r1, #0x80000000 \n" /* Then sleep */
423 "streq r1, [%[ctl], %[c], lsl #2] \n"
424 "moveq r1, #0 \n" /* Clear control reg */
425 "streq r1, [%[ctl], %[c], lsl #2] \n"
426 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
427 "str r1, [%[mbx], #8] \n"
428 "1: \n" /* Wait for wake procedure to finish */
429 "ldr r1, [%[mbx], #0] \n"
430 "tst r1, r0, lsr #2 \n"
431 "bne 1b \n"
432 "mrs r1, cpsr \n" /* Enable interrupts */
433 "bic r1, r1, #0xc0 \n"
434 "msr cpsr_c, r1 \n"
436 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [c]"r"(core)
437 : "r0", "r1");
438 #else /* C version for reference */
439 /* Signal intent to sleep */
440 MBX_MSG_SET = 0x4 << core;
442 /* Something waking or other processor intends to wake us? */
443 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
445 PROC_CTL(core) = PROC_SLEEP; nop; /* Snooze */
446 PROC_CTL(core) = 0; /* Clear control reg */
449 /* Signal wake - clear wake flag */
450 MBX_MSG_CLR = 0x14 << core;
452 /* Wait for other processor to finish wake procedure */
453 while (MBX_MSG_STAT & (0x1 << core));
455 /* Enable IRQ, FIQ */
456 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
457 #endif /* ASM/C selection */
459 #elif CONFIG_CPU == PP5002
460 /* PP5002 has no mailboxes - emulate using bytes */
461 static inline void core_sleep(unsigned int core)
463 #if 1
464 asm volatile (
465 "mov r0, #1 \n" /* Signal intent to sleep */
466 "strb r0, [%[sem], #2] \n"
467 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
468 "cmp r0, #0 \n"
469 "bne 2f \n"
470 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
471 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
472 * that the correct alternative is executed. Don't change the order
473 * of the next 4 instructions! */
474 "tst pc, #0x0c \n"
475 "mov r0, #0xca \n"
476 "strne r0, [%[ctl], %[c], lsl #2] \n"
477 "streq r0, [%[ctl], %[c], lsl #2] \n"
478 "nop \n" /* nop's needed because of pipeline */
479 "nop \n"
480 "nop \n"
481 "2: \n"
482 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
483 "strb r0, [%[sem], #1] \n"
484 "strb r0, [%[sem], #2] \n"
485 "1: \n" /* Wait for wake procedure to finish */
486 "ldrb r0, [%[sem], #0] \n"
487 "cmp r0, #0 \n"
488 "bne 1b \n"
489 "mrs r0, cpsr \n" /* Enable interrupts */
490 "bic r0, r0, #0xc0 \n"
491 "msr cpsr_c, r0 \n"
493 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
494 [ctl]"r"(&PROC_CTL(CPU))
495 : "r0"
497 #else /* C version for reference */
498 /* Signal intent to sleep */
499 core_semaphores[core].intend_sleep = 1;
501 /* Something waking or other processor intends to wake us? */
502 if (core_semaphores[core].stay_awake == 0)
504 PROC_CTL(core) = PROC_SLEEP; /* Snooze */
505 nop; nop; nop;
508 /* Signal wake - clear wake flag */
509 core_semaphores[core].stay_awake = 0;
510 core_semaphores[core].intend_sleep = 0;
512 /* Wait for other processor to finish wake procedure */
513 while (core_semaphores[core].intend_wake != 0);
515 /* Enable IRQ, FIQ */
516 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
517 #endif /* ASM/C selection */
519 #endif /* CPU type */
521 /*---------------------------------------------------------------------------
522 * Wake another processor core that is sleeping or prevent it from doing so
523 * if it was already destined. FIQ, IRQ should be disabled before calling.
524 *---------------------------------------------------------------------------
526 #if NUM_CORES == 1
527 /* Shared single-core build debugging version */
528 void core_wake(void)
530 /* No wakey - core already wakey */
532 #elif defined (CPU_PP502x)
533 void core_wake(unsigned int othercore)
535 #if 1
536 /* avoid r0 since that contains othercore */
537 asm volatile (
538 "mrs r3, cpsr \n" /* Disable IRQ */
539 "orr r1, r3, #0x80 \n"
540 "msr cpsr_c, r1 \n"
541 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
542 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
543 "str r2, [%[mbx], #4] \n"
544 "1: \n" /* If it intends to sleep, let it first */
545 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
546 "eor r1, r1, #0xc \n"
547 "tst r1, r2, lsr #2 \n"
548 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
549 "tsteq r1, #0x80000000 \n"
550 "beq 1b \n" /* Wait for sleep or wake */
551 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
552 "movne r1, #0x0 \n"
553 "strne r1, [%[ctl], %[oc], lsl #2] \n"
554 "mov r1, r2, lsr #4 \n"
555 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
556 "msr cpsr_c, r3 \n" /* Restore int status */
558 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
559 [oc]"r"(othercore)
560 : "r1", "r2", "r3");
561 #else /* C version for reference */
562 /* Disable interrupts - avoid reentrancy from the tick */
563 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
565 /* Signal intent to wake other processor - set stay awake */
566 MBX_MSG_SET = 0x11 << othercore;
568 /* If it intends to sleep, wait until it does or aborts */
569 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
570 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
572 /* If sleeping, wake it up */
573 if (PROC_CTL(othercore) & PROC_SLEEP)
574 PROC_CTL(othercore) = 0;
576 /* Done with wake procedure */
577 MBX_MSG_CLR = 0x1 << othercore;
578 set_irq_level(oldlevel);
579 #endif /* ASM/C selection */
581 #elif CONFIG_CPU == PP5002
582 /* PP5002 has no mailboxes - emulate using bytes */
583 void core_wake(unsigned int othercore)
585 #if 1
586 /* avoid r0 since that contains othercore */
587 asm volatile (
588 "mrs r3, cpsr \n" /* Disable IRQ */
589 "orr r1, r3, #0x80 \n"
590 "msr cpsr_c, r1 \n"
591 "mov r1, #1 \n" /* Signal intent to wake other core */
592 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
593 "strh r1, [%[sem], #0] \n"
594 "mov r2, #0x8000 \n"
595 "1: \n" /* If it intends to sleep, let it first */
596 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
597 "cmp r1, #1 \n"
598 "ldr r1, [%[st]] \n" /* && not sleeping ? */
599 "tsteq r1, r2, lsr %[oc] \n"
600 "beq 1b \n" /* Wait for sleep or wake */
601 "tst r1, r2, lsr %[oc] \n"
602 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
603 "movne r1, #0xce \n"
604 "strne r1, [r2, %[oc], lsl #2] \n"
605 "mov r1, #0 \n" /* Done with wake procedure */
606 "strb r1, [%[sem], #0] \n"
607 "msr cpsr_c, r3 \n" /* Restore int status */
609 : [sem]"r"(&core_semaphores[othercore]),
610 [st]"r"(&PROC_STAT),
611 [oc]"r"(othercore)
612 : "r1", "r2", "r3"
614 #else /* C version for reference */
615 /* Disable interrupts - avoid reentrancy from the tick */
616 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
618 /* Signal intent to wake other processor - set stay awake */
619 core_semaphores[othercore].intend_wake = 1;
620 core_semaphores[othercore].stay_awake = 1;
622 /* If it intends to sleep, wait until it does or aborts */
623 while (core_semaphores[othercore].intend_sleep != 0 &&
624 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
626 /* If sleeping, wake it up */
627 if (PROC_STAT & PROC_SLEEPING(othercore))
628 PROC_CTL(othercore) = PROC_WAKE;
630 /* Done with wake procedure */
631 core_semaphores[othercore].intend_wake = 0;
632 set_irq_level(oldlevel);
633 #endif /* ASM/C selection */
635 #endif /* CPU type */
637 #if NUM_CORES > 1
638 /*---------------------------------------------------------------------------
639 * Switches to a stack that always resides in the Rockbox core.
641 * Needed when a thread suicides on a core other than the main CPU since the
642 * stack used when idling is the stack of the last thread to run. This stack
643 * may not reside in the core in which case the core will continue to use a
644 * stack from an unloaded module until another thread runs on it.
645 *---------------------------------------------------------------------------
647 static inline void switch_to_idle_stack(const unsigned int core)
649 asm volatile (
650 "str sp, [%0] \n" /* save original stack pointer on idle stack */
651 "mov sp, %0 \n" /* switch stacks */
652 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
653 (void)core;
656 /*---------------------------------------------------------------------------
657 * Perform core switch steps that need to take place inside switch_thread.
659 * These steps must take place while before changing the processor and after
660 * having entered switch_thread since switch_thread may not do a normal return
661 * because the stack being used for anything the compiler saved will not belong
662 * to the thread's destination core and it may have been recycled for other
663 * purposes by the time a normal context load has taken place. switch_thread
664 * will also clobber anything stashed in the thread's context or stored in the
665 * nonvolatile registers if it is saved there before the call since the
666 * compiler's order of operations cannot be known for certain.
668 static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
670 /* Flush our data to ram */
671 flush_icache();
672 /* Stash thread in r4 slot */
673 thread->context.r[0] = (unsigned int)thread;
674 /* Stash restart address in r5 slot */
675 thread->context.r[1] = (unsigned int)thread->context.start;
676 /* Save sp in context.sp while still running on old core */
677 thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1];
680 /*---------------------------------------------------------------------------
681 * Machine-specific helper function for switching the processor a thread is
682 * running on. Basically, the thread suicides on the departing core and is
683 * reborn on the destination. Were it not for gcc's ill-behavior regarding
684 * naked functions written in C where it actually clobbers non-volatile
685 * registers before the intended prologue code, this would all be much
686 * simpler. Generic setup is done in switch_core itself.
689 /*---------------------------------------------------------------------------
690 * This actually performs the core switch.
692 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
693 __attribute__((naked));
694 static void switch_thread_core(unsigned int core, struct thread_entry *thread)
696 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
697 * Stack access also isn't permitted until restoring the original stack and
698 * context. */
699 asm volatile (
700 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
701 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
702 "ldr r2, [r2, r0, lsl #2] \n"
703 "add r2, r2, %0*4 \n"
704 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
705 "mov sp, r2 \n" /* switch stacks */
706 "adr r2, 1f \n" /* r2 = new core restart address */
707 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
708 "mov r0, r1 \n" /* switch_thread(thread) */
709 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
710 "1: \n"
711 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
712 "mov r1, #0 \n" /* Clear start address */
713 "str r1, [r0, #40] \n"
714 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
715 "mov lr, pc \n"
716 "bx r0 \n"
717 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
718 ".ltorg \n" /* Dump constant pool */
719 : : "i"(IDLE_STACK_WORDS)
721 (void)core; (void)thread;
723 #endif /* NUM_CORES */
725 #elif CONFIG_CPU == S3C2440
727 /*---------------------------------------------------------------------------
728 * Put core in a power-saving state if waking list wasn't repopulated.
729 *---------------------------------------------------------------------------
731 static inline void core_sleep(void)
733 /* FIQ also changes the CLKCON register so FIQ must be disabled
734 when changing it here */
735 asm volatile (
736 "mrs r0, cpsr \n" /* Prepare IRQ, FIQ enable */
737 "bic r0, r0, #0xc0 \n"
738 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
739 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
740 "orr r2, r2, #4 \n"
741 "str r2, [r1, #0xc] \n"
742 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
743 "mov r2, #0 \n" /* wait for IDLE */
744 "1: \n"
745 "add r2, r2, #1 \n"
746 "cmp r2, #10 \n"
747 "bne 1b \n"
748 "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */
749 "msr cpsr_c, r2 \n"
750 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
751 "bic r2, r2, #4 \n"
752 "str r2, [r1, #0xc] \n"
753 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
754 : : : "r0", "r1", "r2");
756 #elif defined(CPU_TCC77X)
757 static inline void core_sleep(void)
759 #warning TODO: Implement core_sleep
761 #elif CONFIG_CPU == IMX31L
762 static inline void core_sleep(void)
764 asm volatile (
765 "mov r0, #0 \n"
766 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
767 "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */
768 "bic r0, r0, #0xc0 \n"
769 "msr cpsr_c, r0 \n"
770 : : : "r0"
773 #else
774 static inline void core_sleep(void)
776 #warning core_sleep not implemented, battery life will be decreased
778 #endif /* CONFIG_CPU == */
780 #elif defined(CPU_COLDFIRE)
781 /*---------------------------------------------------------------------------
782 * Start the thread running and terminate it if it returns
783 *---------------------------------------------------------------------------
785 void start_thread(void); /* Provide C access to ASM label */
786 static void __start_thread(void) __attribute__((used));
787 static void __start_thread(void)
789 /* a0=macsr, a1=context */
790 asm volatile (
791 "start_thread: \n" /* Start here - no naked attribute */
792 "move.l %a0, %macsr \n" /* Set initial mac status reg */
793 "lea.l 48(%a1), %a1 \n"
794 "move.l (%a1)+, %sp \n" /* Set initial stack */
795 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
796 "clr.l (%a1) \n" /* Mark thread running */
797 "jsr (%a2) \n" /* Call thread function */
798 "clr.l -(%sp) \n" /* remove_thread(NULL) */
799 "jsr remove_thread \n"
803 /* Set EMAC unit to fractional mode with saturation for each new thread,
804 * since that's what'll be the most useful for most things which the dsp
805 * will do. Codecs should still initialize their preferred modes
806 * explicitly. Context pointer is placed in d2 slot and start_thread
807 * pointer in d3 slot. thread function pointer is placed in context.start.
808 * See load_context for what happens when thread is initially going to
809 * run.
811 #define THREAD_STARTUP_INIT(core, thread, function) \
812 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
813 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
814 (thread)->context.d[1] = (unsigned int)start_thread, \
815 (thread)->context.start = (void *)(function); })
817 /*---------------------------------------------------------------------------
818 * Store non-volatile context.
819 *---------------------------------------------------------------------------
821 static inline void store_context(void* addr)
823 asm volatile (
824 "move.l %%macsr,%%d0 \n"
825 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
826 : : "a" (addr) : "d0" /* only! */
830 /*---------------------------------------------------------------------------
831 * Load non-volatile context.
832 *---------------------------------------------------------------------------
834 static inline void load_context(const void* addr)
836 asm volatile (
837 "move.l 52(%0), %%d0 \n" /* Get start address */
838 "beq.b 1f \n" /* NULL -> already running */
839 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
840 "jmp (%%a2) \n" /* Start the thread */
841 "1: \n"
842 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
843 "move.l %%d0, %%macsr \n"
844 : : "a" (addr) : "d0" /* only! */
848 /*---------------------------------------------------------------------------
849 * Put core in a power-saving state if waking list wasn't repopulated.
850 *---------------------------------------------------------------------------
852 static inline void core_sleep(void)
854 /* Supervisor mode, interrupts enabled upon wakeup */
855 asm volatile ("stop #0x2000");
858 #elif CONFIG_CPU == SH7034
859 /*---------------------------------------------------------------------------
860 * Start the thread running and terminate it if it returns
861 *---------------------------------------------------------------------------
863 void start_thread(void); /* Provide C access to ASM label */
864 static void __start_thread(void) __attribute__((used));
865 static void __start_thread(void)
867 /* r8 = context */
868 asm volatile (
869 "_start_thread: \n" /* Start here - no naked attribute */
870 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
871 "mov.l @(28, r8), r15 \n" /* Set initial sp */
872 "mov #0, r1 \n" /* Start the thread */
873 "jsr @r0 \n"
874 "mov.l r1, @(36, r8) \n" /* Clear start address */
875 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
876 "jmp @r0 \n"
877 "mov #0, r4 \n"
878 "1: \n"
879 ".long _remove_thread \n"
883 /* Place context pointer in r8 slot, function pointer in r9 slot, and
884 * start_thread pointer in context_start */
885 #define THREAD_STARTUP_INIT(core, thread, function) \
886 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
887 (thread)->context.r[1] = (unsigned int)(function), \
888 (thread)->context.start = (void*)start_thread; })
890 /*---------------------------------------------------------------------------
891 * Store non-volatile context.
892 *---------------------------------------------------------------------------
894 static inline void store_context(void* addr)
896 asm volatile (
897 "add #36, %0 \n" /* Start at last reg. By the time routine */
898 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
899 "mov.l r15,@-%0 \n"
900 "mov.l r14,@-%0 \n"
901 "mov.l r13,@-%0 \n"
902 "mov.l r12,@-%0 \n"
903 "mov.l r11,@-%0 \n"
904 "mov.l r10,@-%0 \n"
905 "mov.l r9, @-%0 \n"
906 "mov.l r8, @-%0 \n"
907 : : "r" (addr)
911 /*---------------------------------------------------------------------------
912 * Load non-volatile context.
913 *---------------------------------------------------------------------------
915 static inline void load_context(const void* addr)
917 asm volatile (
918 "mov.l @(36, %0), r0 \n" /* Get start address */
919 "tst r0, r0 \n"
920 "bt .running \n" /* NULL -> already running */
921 "jmp @r0 \n" /* r8 = context */
922 ".running: \n"
923 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
924 "mov.l @%0+, r9 \n"
925 "mov.l @%0+, r10 \n"
926 "mov.l @%0+, r11 \n"
927 "mov.l @%0+, r12 \n"
928 "mov.l @%0+, r13 \n"
929 "mov.l @%0+, r14 \n"
930 "mov.l @%0+, r15 \n"
931 "lds.l @%0+, pr \n"
932 : : "r" (addr) : "r0" /* only! */
936 /*---------------------------------------------------------------------------
937 * Put core in a power-saving state if waking list wasn't repopulated.
938 *---------------------------------------------------------------------------
940 static inline void core_sleep(void)
942 asm volatile (
943 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
944 "mov #0, r1 \n" /* Enable interrupts */
945 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
946 "sleep \n" /* Execute standby */
947 : : "z"(&SBYCR-GBR) : "r1");
950 #endif /* CONFIG_CPU == */
953 * End Processor-specific section
954 ***************************************************************************/
956 #if THREAD_EXTRA_CHECKS
957 static void thread_panicf(const char *msg, struct thread_entry *thread)
959 #if NUM_CORES > 1
960 const unsigned int core = thread->core;
961 #endif
962 static char name[32];
963 thread_get_name(name, 32, thread);
964 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
966 static void thread_stkov(struct thread_entry *thread)
968 thread_panicf("Stkov", thread);
970 #define THREAD_PANICF(msg, thread) \
971 thread_panicf(msg, thread)
972 #define THREAD_ASSERT(exp, msg, thread) \
973 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
974 #else
975 static void thread_stkov(struct thread_entry *thread)
977 #if NUM_CORES > 1
978 const unsigned int core = thread->core;
979 #endif
980 static char name[32];
981 thread_get_name(name, 32, thread);
982 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
984 #define THREAD_PANICF(msg, thread)
985 #define THREAD_ASSERT(exp, msg, thread)
986 #endif /* THREAD_EXTRA_CHECKS */
988 /*---------------------------------------------------------------------------
989 * Lock a list pointer and returns its value
990 *---------------------------------------------------------------------------
992 #if CONFIG_CORELOCK == SW_CORELOCK
993 /* Separate locking function versions */
995 /* Thread locking */
996 #define GET_THREAD_STATE(thread) \
997 ({ corelock_lock(&(thread)->cl); (thread)->state; })
998 #define TRY_GET_THREAD_STATE(thread) \
999 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
1000 #define UNLOCK_THREAD(thread, state) \
1001 ({ corelock_unlock(&(thread)->cl); })
1002 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1003 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
1005 /* List locking */
1006 #define LOCK_LIST(tqp) \
1007 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
1008 #define UNLOCK_LIST(tqp, mod) \
1009 ({ corelock_unlock(&(tqp)->cl); })
1010 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1011 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
1013 /* Select the queue pointer directly */
1014 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1015 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1016 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1017 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1019 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1020 /* Native swap/exchange versions */
1022 /* Thread locking */
1023 #define GET_THREAD_STATE(thread) \
1024 ({ unsigned _s; \
1025 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
1026 _s; })
1027 #define TRY_GET_THREAD_STATE(thread) \
1028 ({ xchg8(&(thread)->state, STATE_BUSY); })
1029 #define UNLOCK_THREAD(thread, _state) \
1030 ({ (thread)->state = (_state); })
1031 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1032 ({ (thread)->state = (_state); })
1034 /* List locking */
1035 #define LOCK_LIST(tqp) \
1036 ({ struct thread_entry *_l; \
1037 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
1038 _l; })
1039 #define UNLOCK_LIST(tqp, mod) \
1040 ({ (tqp)->queue = (mod); })
1041 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1042 ({ (tqp)->queue = (mod); })
1044 /* Select the local queue pointer copy returned from LOCK_LIST */
1045 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1046 ({ add_to_list_l(&(tc), (thread)); })
1047 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1048 ({ remove_from_list_l(&(tc), (thread)); })
1050 #else
1051 /* Single-core/non-locked versions */
1053 /* Threads */
1054 #define GET_THREAD_STATE(thread) \
1055 ({ (thread)->state; })
1056 #define UNLOCK_THREAD(thread, _state)
1057 #define UNLOCK_THREAD_SET_STATE(thread, _state) \
1058 ({ (thread)->state = (_state); })
1060 /* Lists */
1061 #define LOCK_LIST(tqp) \
1062 ({ (tqp)->queue; })
1063 #define UNLOCK_LIST(tqp, mod)
1064 #define UNLOCK_LIST_SET_PTR(tqp, mod) \
1065 ({ (tqp)->queue = (mod); })
1067 /* Select the queue pointer directly */
1068 #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1069 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1070 #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1071 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1073 #endif /* locking selection */
1075 #if THREAD_EXTRA_CHECKS
1076 /*---------------------------------------------------------------------------
1077 * Lock the thread slot to obtain the state and then unlock it. Waits for
1078 * it not to be busy. Used for debugging.
1079 *---------------------------------------------------------------------------
1081 static unsigned peek_thread_state(struct thread_entry *thread)
1083 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1084 unsigned state = GET_THREAD_STATE(thread);
1085 UNLOCK_THREAD(thread, state);
1086 set_irq_level(oldlevel);
1087 return state;
1089 #endif /* THREAD_EXTRA_CHECKS */
1091 /*---------------------------------------------------------------------------
1092 * Adds a thread to a list of threads using "intert last". Uses the "l"
1093 * links.
1094 *---------------------------------------------------------------------------
1096 static void add_to_list_l(struct thread_entry **list,
1097 struct thread_entry *thread)
1099 struct thread_entry *l = *list;
1101 if (l == NULL)
1103 /* Insert into unoccupied list */
1104 thread->l.next = thread;
1105 thread->l.prev = thread;
1106 *list = thread;
1107 return;
1110 /* Insert last */
1111 thread->l.next = l;
1112 thread->l.prev = l->l.prev;
1113 thread->l.prev->l.next = thread;
1114 l->l.prev = thread;
1116 /* Insert next
1117 thread->l.next = l->l.next;
1118 thread->l.prev = l;
1119 thread->l.next->l.prev = thread;
1120 l->l.next = thread;
1124 /*---------------------------------------------------------------------------
1125 * Locks a list, adds the thread entry and unlocks the list on multicore.
1126 * Defined as add_to_list_l on single-core.
1127 *---------------------------------------------------------------------------
1129 #if NUM_CORES > 1
1130 static void add_to_list_l_locked(struct thread_queue *tq,
1131 struct thread_entry *thread)
1133 struct thread_entry *t = LOCK_LIST(tq);
1134 ADD_TO_LIST_L_SELECT(t, tq, thread);
1135 UNLOCK_LIST(tq, t);
1136 (void)t;
1138 #else
1139 #define add_to_list_l_locked(tq, thread) \
1140 add_to_list_l(&(tq)->queue, (thread))
1141 #endif
1143 /*---------------------------------------------------------------------------
1144 * Removes a thread from a list of threads. Uses the "l" links.
1145 *---------------------------------------------------------------------------
1147 static void remove_from_list_l(struct thread_entry **list,
1148 struct thread_entry *thread)
1150 struct thread_entry *prev, *next;
1152 next = thread->l.next;
1154 if (thread == next)
1156 /* The only item */
1157 *list = NULL;
1158 return;
1161 if (thread == *list)
1163 /* List becomes next item */
1164 *list = next;
1167 prev = thread->l.prev;
1169 /* Fix links to jump over the removed entry. */
1170 prev->l.next = next;
1171 next->l.prev = prev;
1174 /*---------------------------------------------------------------------------
1175 * Locks a list, removes the thread entry and unlocks the list on multicore.
1176 * Defined as remove_from_list_l on single-core.
1177 *---------------------------------------------------------------------------
1179 #if NUM_CORES > 1
1180 static void remove_from_list_l_locked(struct thread_queue *tq,
1181 struct thread_entry *thread)
1183 struct thread_entry *t = LOCK_LIST(tq);
1184 REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
1185 UNLOCK_LIST(tq, t);
1186 (void)t;
1188 #else
1189 #define remove_from_list_l_locked(tq, thread) \
1190 remove_from_list_l(&(tq)->queue, (thread))
1191 #endif
1193 /*---------------------------------------------------------------------------
1194 * Add a thread from the core's timout list by linking the pointers in its
1195 * tmo structure.
1196 *---------------------------------------------------------------------------
1198 static void add_to_list_tmo(struct thread_entry *thread)
1200 /* Insert first */
1201 struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout;
1203 thread->tmo.prev = thread;
1204 thread->tmo.next = t;
1206 if (t != NULL)
1208 /* Fix second item's prev pointer to point to this thread */
1209 t->tmo.prev = thread;
1212 cores[IF_COP_CORE(thread->core)].timeout = thread;
1215 /*---------------------------------------------------------------------------
1216 * Remove a thread from the core's timout list by unlinking the pointers in
1217 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1218 * is cancelled.
1219 *---------------------------------------------------------------------------
1221 static void remove_from_list_tmo(struct thread_entry *thread)
1223 struct thread_entry *next = thread->tmo.next;
1224 struct thread_entry *prev;
1226 if (thread == cores[IF_COP_CORE(thread->core)].timeout)
1228 /* Next item becomes list head */
1229 cores[IF_COP_CORE(thread->core)].timeout = next;
1231 if (next != NULL)
1233 /* Fix new list head's prev to point to itself. */
1234 next->tmo.prev = next;
1237 thread->tmo.prev = NULL;
1238 return;
1241 prev = thread->tmo.prev;
1243 if (next != NULL)
1245 next->tmo.prev = prev;
1248 prev->tmo.next = next;
1249 thread->tmo.prev = NULL;
1252 /*---------------------------------------------------------------------------
1253 * Schedules a thread wakeup on the specified core. Threads will be made
1254 * ready to run when the next task switch occurs. Note that this does not
1255 * introduce an on-core delay since the soonest the next thread may run is
1256 * no sooner than that. Other cores and on-core interrupts may only ever
1257 * add to the list.
1258 *---------------------------------------------------------------------------
1260 static void core_schedule_wakeup(struct thread_entry *thread)
1262 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1263 const unsigned int core = IF_COP_CORE(thread->core);
1264 add_to_list_l_locked(&cores[core].waking, thread);
1265 #if NUM_CORES > 1
1266 if (core != CURRENT_CORE)
1268 core_wake(core);
1270 #endif
1271 set_irq_level(oldlevel);
1274 /*---------------------------------------------------------------------------
1275 * If the waking list was populated, move all threads on it onto the running
1276 * list so they may be run ASAP.
1277 *---------------------------------------------------------------------------
1279 static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1281 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
1282 struct thread_entry *r = cores[IF_COP_CORE(core)].running;
1284 /* Tranfer all threads on waking list to running list in one
1285 swoop */
1286 if (r != NULL)
1288 /* Place waking threads at the end of the running list. */
1289 struct thread_entry *tmp;
1290 w->l.prev->l.next = r;
1291 r->l.prev->l.next = w;
1292 tmp = r->l.prev;
1293 r->l.prev = w->l.prev;
1294 w->l.prev = tmp;
1296 else
1298 /* Just transfer the list as-is */
1299 cores[IF_COP_CORE(core)].running = w;
1301 /* Just leave any timeout threads on the timeout list. If a timeout check
1302 * is due, they will be removed there. If they do a timeout again before
1303 * being removed, they will just stay on the list with a new expiration
1304 * tick. */
1306 /* Waking list is clear - NULL and unlock it */
1307 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
1310 /*---------------------------------------------------------------------------
1311 * Check the core's timeout list when at least one thread is due to wake.
1312 * Filtering for the condition is done before making the call. Resets the
1313 * tick when the next check will occur.
1314 *---------------------------------------------------------------------------
1316 static void check_tmo_threads(void)
1318 const unsigned int core = CURRENT_CORE;
1319 const long tick = current_tick; /* snapshot the current tick */
1320 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1321 struct thread_entry *next = cores[core].timeout;
1323 /* If there are no processes waiting for a timeout, just keep the check
1324 tick from falling into the past. */
1325 if (next != NULL)
1327 /* Check sleeping threads. */
1330 /* Must make sure noone else is examining the state, wait until
1331 slot is no longer busy */
1332 struct thread_entry *curr = next;
1333 next = curr->tmo.next;
1335 unsigned state = GET_THREAD_STATE(curr);
1337 if (state < TIMEOUT_STATE_FIRST)
1339 /* Cleanup threads no longer on a timeout but still on the
1340 * list. */
1341 remove_from_list_tmo(curr);
1342 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1344 else if (TIME_BEFORE(tick, curr->tmo_tick))
1346 /* Timeout still pending - this will be the usual case */
1347 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1349 /* Earliest timeout found so far - move the next check up
1350 to its time */
1351 next_tmo_check = curr->tmo_tick;
1353 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1355 else
1357 /* Sleep timeout has been reached so bring the thread back to
1358 * life again. */
1359 if (state == STATE_BLOCKED_W_TMO)
1361 remove_from_list_l_locked(curr->bqp, curr);
1364 remove_from_list_tmo(curr);
1365 add_to_list_l(&cores[core].running, curr);
1366 UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING);
1369 /* Break the loop once we have walked through the list of all
1370 * sleeping processes or have removed them all. */
1372 while (next != NULL);
1375 cores[core].next_tmo_check = next_tmo_check;
1378 /*---------------------------------------------------------------------------
1379 * Performs operations that must be done before blocking a thread but after
1380 * the state is saved - follows reverse of locking order. blk_ops.flags is
1381 * assumed to be nonzero.
1382 *---------------------------------------------------------------------------
1384 #if NUM_CORES > 1
1385 static inline void run_blocking_ops(
1386 unsigned int core, struct thread_entry *thread)
1388 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
1389 const unsigned flags = ops->flags;
1391 if (flags == 0)
1392 return;
1394 if (flags & TBOP_SWITCH_CORE)
1396 core_switch_blk_op(core, thread);
1399 #if CONFIG_CORELOCK == SW_CORELOCK
1400 if (flags & TBOP_UNLOCK_LIST)
1402 UNLOCK_LIST(ops->list_p, NULL);
1405 if (flags & TBOP_UNLOCK_CORELOCK)
1407 corelock_unlock(ops->cl_p);
1410 if (flags & TBOP_UNLOCK_THREAD)
1412 UNLOCK_THREAD(ops->thread, 0);
1414 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1415 /* Write updated variable value into memory location */
1416 switch (flags & TBOP_VAR_TYPE_MASK)
1418 case TBOP_UNLOCK_LIST:
1419 UNLOCK_LIST(ops->list_p, ops->list_v);
1420 break;
1421 case TBOP_SET_VARi:
1422 *ops->var_ip = ops->var_iv;
1423 break;
1424 case TBOP_SET_VARu8:
1425 *ops->var_u8p = ops->var_u8v;
1426 break;
1428 #endif /* CONFIG_CORELOCK == */
1430 /* Unlock thread's slot */
1431 if (flags & TBOP_UNLOCK_CURRENT)
1433 UNLOCK_THREAD(thread, ops->state);
1436 ops->flags = 0;
1438 #endif /* NUM_CORES > 1 */
1441 /*---------------------------------------------------------------------------
1442 * Runs any operations that may cause threads to be ready to run and then
1443 * sleeps the processor core until the next interrupt if none are.
1444 *---------------------------------------------------------------------------
1446 static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1448 for (;;)
1450 set_irq_level(HIGHEST_IRQ_LEVEL);
1451 /* We want to do these ASAP as it may change the decision to sleep
1452 * the core or a core has woken because an interrupt occurred
1453 * and posted a message to a queue. */
1454 if (cores[IF_COP_CORE(core)].waking.queue != NULL)
1456 core_perform_wakeup(IF_COP(core));
1459 /* If there are threads on a timeout and the earliest wakeup is due,
1460 * check the list and wake any threads that need to start running
1461 * again. */
1462 if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
1464 check_tmo_threads();
1467 /* If there is a ready to run task, return its ID and keep core
1468 * awake. */
1469 if (cores[IF_COP_CORE(core)].running == NULL)
1471 /* Enter sleep mode to reduce power usage - woken up on interrupt
1472 * or wakeup request from another core - expected to enable all
1473 * interrupts. */
1474 core_sleep(IF_COP(core));
1475 continue;
1478 set_irq_level(0);
1479 return cores[IF_COP_CORE(core)].running;
1483 #ifdef RB_PROFILE
1484 void profile_thread(void)
1486 profstart(cores[CURRENT_CORE].running - threads);
1488 #endif
1490 /*---------------------------------------------------------------------------
1491 * Prepares a thread to block on an object's list and/or for a specified
1492 * duration - expects object and slot to be appropriately locked if needed.
1493 *---------------------------------------------------------------------------
1495 static inline void _block_thread_on_l(struct thread_queue *list,
1496 struct thread_entry *thread,
1497 unsigned state
1498 IF_SWCL(, const bool nolock))
1500 /* If inlined, unreachable branches will be pruned with no size penalty
1501 because constant params are used for state and nolock. */
1502 const unsigned int core = IF_COP_CORE(thread->core);
1504 /* Remove the thread from the list of running threads. */
1505 remove_from_list_l(&cores[core].running, thread);
1507 /* Add a timeout to the block if not infinite */
1508 switch (state)
1510 case STATE_BLOCKED:
1511 /* Put the thread into a new list of inactive threads. */
1512 #if CONFIG_CORELOCK == SW_CORELOCK
1513 if (nolock)
1515 thread->bqp = NULL; /* Indicate nolock list */
1516 thread->bqnlp = (struct thread_entry **)list;
1517 add_to_list_l((struct thread_entry **)list, thread);
1519 else
1520 #endif
1522 thread->bqp = list;
1523 add_to_list_l_locked(list, thread);
1525 break;
1526 case STATE_BLOCKED_W_TMO:
1527 /* Put the thread into a new list of inactive threads. */
1528 #if CONFIG_CORELOCK == SW_CORELOCK
1529 if (nolock)
1531 thread->bqp = NULL; /* Indicate nolock list */
1532 thread->bqnlp = (struct thread_entry **)list;
1533 add_to_list_l((struct thread_entry **)list, thread);
1535 else
1536 #endif
1538 thread->bqp = list;
1539 add_to_list_l_locked(list, thread);
1541 /* Fall-through */
1542 case STATE_SLEEPING:
1543 /* If this thread times out sooner than any other thread, update
1544 next_tmo_check to its timeout */
1545 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1547 cores[core].next_tmo_check = thread->tmo_tick;
1550 if (thread->tmo.prev == NULL)
1552 add_to_list_tmo(thread);
1554 /* else thread was never removed from list - just keep it there */
1555 break;
1558 #ifdef HAVE_PRIORITY_SCHEDULING
1559 /* Reset priorities */
1560 if (thread->priority == cores[core].highest_priority)
1561 cores[core].highest_priority = LOWEST_PRIORITY;
1562 #endif
1564 #if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
1565 /* Safe to set state now */
1566 thread->state = state;
1567 #elif CONFIG_CORELOCK == CORELOCK_SWAP
1568 cores[core].blk_ops.state = state;
1569 #endif
1571 #if NUM_CORES > 1
1572 /* Delay slot unlock until task switch */
1573 cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
1574 #endif
1577 static inline void block_thread_on_l(
1578 struct thread_queue *list, struct thread_entry *thread, unsigned state)
1580 _block_thread_on_l(list, thread, state IF_SWCL(, false));
1583 static inline void block_thread_on_l_no_listlock(
1584 struct thread_entry **list, struct thread_entry *thread, unsigned state)
1586 _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
1589 /*---------------------------------------------------------------------------
1590 * Switch thread in round robin fashion for any given priority. Any thread
1591 * that removed itself from the running list first must specify itself in
1592 * the paramter.
1594 * INTERNAL: Intended for use by kernel and not for programs.
1595 *---------------------------------------------------------------------------
1597 void switch_thread(struct thread_entry *old)
1599 const unsigned int core = CURRENT_CORE;
1600 struct thread_entry *thread = cores[core].running;
1601 struct thread_entry *block = old;
1603 if (block == NULL)
1604 old = thread;
1606 #ifdef RB_PROFILE
1607 profile_thread_stopped(old - threads);
1608 #endif
1610 /* Begin task switching by saving our current context so that we can
1611 * restore the state of the current thread later to the point prior
1612 * to this call. */
1613 store_context(&old->context);
1615 /* Check if the current thread stack is overflown */
1616 if(((unsigned int *)old->stack)[0] != DEADBEEF)
1617 thread_stkov(old);
1619 #if NUM_CORES > 1
1620 /* Run any blocking operations requested before switching/sleeping */
1621 run_blocking_ops(core, old);
1622 #endif
1624 /* Go through the list of sleeping task to check if we need to wake up
1625 * any of them due to timeout. Also puts core into sleep state until
1626 * there is at least one running process again. */
1627 thread = sleep_core(IF_COP(core));
1629 #ifdef HAVE_PRIORITY_SCHEDULING
1630 /* Select the new task based on priorities and the last time a process
1631 * got CPU time. */
1632 if (block == NULL)
1633 thread = thread->l.next;
1635 for (;;)
1637 int priority = thread->priority;
1639 if (priority < cores[core].highest_priority)
1640 cores[core].highest_priority = priority;
1642 if (priority == cores[core].highest_priority ||
1643 thread->priority_x < cores[core].highest_priority ||
1644 (current_tick - thread->last_run > priority * 8))
1646 cores[core].running = thread;
1647 break;
1650 thread = thread->l.next;
1653 /* Reset the value of thread's last running time to the current time. */
1654 thread->last_run = current_tick;
1655 #else
1656 if (block == NULL)
1658 thread = thread->l.next;
1659 cores[core].running = thread;
1661 #endif /* HAVE_PRIORITY_SCHEDULING */
1663 /* And finally give control to the next thread. */
1664 load_context(&thread->context);
1666 #ifdef RB_PROFILE
1667 profile_thread_started(thread - threads);
1668 #endif
1671 /*---------------------------------------------------------------------------
1672 * Change the boost state of a thread boosting or unboosting the CPU
1673 * as required. Require thread slot to be locked first.
1674 *---------------------------------------------------------------------------
1676 static inline void boost_thread(struct thread_entry *thread, bool boost)
1678 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1679 if ((thread->boosted != 0) != boost)
1681 thread->boosted = boost;
1682 cpu_boost(boost);
1684 #endif
1685 (void)thread; (void)boost;
1688 /*---------------------------------------------------------------------------
1689 * Sleeps a thread for a specified number of ticks and unboost the thread if
1690 * if it is boosted. If ticks is zero, it does not delay but instead switches
1691 * tasks.
1693 * INTERNAL: Intended for use by kernel and not for programs.
1694 *---------------------------------------------------------------------------
1696 void sleep_thread(int ticks)
1698 /* Get the entry for the current running thread. */
1699 struct thread_entry *current = cores[CURRENT_CORE].running;
1701 #if NUM_CORES > 1
1702 /* Lock thread slot */
1703 GET_THREAD_STATE(current);
1704 #endif
1706 /* Set our timeout, change lists, and finally switch threads.
1707 * Unlock during switch on mulicore. */
1708 current->tmo_tick = current_tick + ticks + 1;
1709 block_thread_on_l(NULL, current, STATE_SLEEPING);
1710 switch_thread(current);
1712 /* Our status should be STATE_RUNNING */
1713 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1714 "S:R->!*R", current);
1717 /*---------------------------------------------------------------------------
1718 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1719 * Caller with interrupt-accessible lists should disable interrupts first
1720 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1722 * INTERNAL: Intended for use by kernel objects and not for programs.
1723 *---------------------------------------------------------------------------
1725 IF_SWCL(static inline) void _block_thread(struct thread_queue *list
1726 IF_SWCL(, const bool nolock))
1728 /* Get the entry for the current running thread. */
1729 struct thread_entry *current = cores[CURRENT_CORE].running;
1731 /* Set the state to blocked and ask the scheduler to switch tasks,
1732 * this takes us off of the run queue until we are explicitly woken */
1734 #if NUM_CORES > 1
1735 /* Lock thread slot */
1736 GET_THREAD_STATE(current);
1737 #endif
1739 #if CONFIG_CORELOCK == SW_CORELOCK
1740 /* One branch optimized away during inlining */
1741 if (nolock)
1743 block_thread_on_l_no_listlock((struct thread_entry **)list,
1744 current, STATE_BLOCKED);
1746 else
1747 #endif
1749 block_thread_on_l(list, current, STATE_BLOCKED);
1752 switch_thread(current);
1754 /* Our status should be STATE_RUNNING */
1755 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1756 "B:R->!*R", current);
1759 #if CONFIG_CORELOCK == SW_CORELOCK
1760 /* Inline lock/nolock version of _block_thread into these functions */
1761 void block_thread(struct thread_queue *tq)
1763 _block_thread(tq, false);
1766 void block_thread_no_listlock(struct thread_entry **list)
1768 _block_thread((struct thread_queue *)list, true);
1770 #endif /* CONFIG_CORELOCK */
1772 /*---------------------------------------------------------------------------
1773 * Block a thread on a blocking queue for a specified time interval or until
1774 * explicitly woken - whichever happens first.
1775 * Caller with interrupt-accessible lists should disable interrupts first
1776 * and request that interrupt level be restored after switching out the
1777 * current thread.
1779 * INTERNAL: Intended for use by kernel objects and not for programs.
1780 *---------------------------------------------------------------------------
1782 void block_thread_w_tmo(struct thread_queue *list, int timeout)
1784 /* Get the entry for the current running thread. */
1785 struct thread_entry *current = cores[CURRENT_CORE].running;
1787 #if NUM_CORES > 1
1788 /* Lock thread slot */
1789 GET_THREAD_STATE(current);
1790 #endif
1792 /* Set the state to blocked with the specified timeout */
1793 current->tmo_tick = current_tick + timeout;
1794 /* Set the list for explicit wakeup */
1795 block_thread_on_l(list, current, STATE_BLOCKED_W_TMO);
1797 /* Now force a task switch and block until we have been woken up
1798 * by another thread or timeout is reached - whichever happens first */
1799 switch_thread(current);
1801 /* Our status should be STATE_RUNNING */
1802 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1803 "T:R->!*R", current);
1806 /*---------------------------------------------------------------------------
1807 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
1808 * that called sleep().
1809 * Caller with interrupt-accessible lists should disable interrupts first.
1810 * This code should be considered a critical section by the caller.
1812 * INTERNAL: Intended for use by kernel objects and not for programs.
1813 *---------------------------------------------------------------------------
1815 IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
1816 struct thread_queue *list IF_SWCL(, const bool nolock))
1818 struct thread_entry *t;
1819 struct thread_entry *thread;
1820 unsigned state;
1822 /* Wake up the last thread first. */
1823 #if CONFIG_CORELOCK == SW_CORELOCK
1824 /* One branch optimized away during inlining */
1825 if (nolock)
1827 t = list->queue;
1829 else
1830 #endif
1832 t = LOCK_LIST(list);
1835 /* Check if there is a blocked thread at all. */
1836 if (t == NULL)
1838 #if CONFIG_CORELOCK == SW_CORELOCK
1839 if (!nolock)
1840 #endif
1842 UNLOCK_LIST(list, NULL);
1844 return NULL;
1847 thread = t;
1849 #if NUM_CORES > 1
1850 #if CONFIG_CORELOCK == SW_CORELOCK
1851 if (nolock)
1853 /* Lock thread only, not list */
1854 state = GET_THREAD_STATE(thread);
1856 else
1857 #endif
1859 /* This locks in reverse order from other routines so a retry in the
1860 correct order may be needed */
1861 state = TRY_GET_THREAD_STATE(thread);
1862 if (state == STATE_BUSY)
1864 /* Unlock list and retry slot, then list */
1865 UNLOCK_LIST(list, t);
1866 state = GET_THREAD_STATE(thread);
1867 t = LOCK_LIST(list);
1868 /* Be sure thread still exists here - it couldn't have re-added
1869 itself if it was woken elsewhere because this function is
1870 serialized within the object that owns the list. */
1871 if (thread != t)
1873 /* Thread disappeared :( */
1874 UNLOCK_LIST(list, t);
1875 UNLOCK_THREAD(thread, state);
1876 return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
1880 #else /* NUM_CORES == 1 */
1881 state = GET_THREAD_STATE(thread);
1882 #endif /* NUM_CORES */
1884 /* Determine thread's current state. */
1885 switch (state)
1887 case STATE_BLOCKED:
1888 case STATE_BLOCKED_W_TMO:
1889 /* Remove thread from object's blocked list - select t or list depending
1890 on locking type at compile time */
1891 REMOVE_FROM_LIST_L_SELECT(t, list, thread);
1892 #if CONFIG_CORELOCK == SW_CORELOCK
1893 /* Statment optimized away during inlining if nolock != false */
1894 if (!nolock)
1895 #endif
1897 UNLOCK_LIST(list, t); /* Unlock list - removal complete */
1900 #ifdef HAVE_PRIORITY_SCHEDULING
1901 /* Give the task a kick to avoid a stall after wakeup.
1902 Not really proper treatment - TODO later. */
1903 thread->last_run = current_tick - 8*LOWEST_PRIORITY;
1904 #endif
1905 core_schedule_wakeup(thread);
1906 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
1907 return thread;
1908 default:
1909 /* Nothing to do. State is not blocked. */
1910 #if THREAD_EXTRA_CHECKS
1911 THREAD_PANICF("wakeup_thread->block invalid", thread);
1912 case STATE_RUNNING:
1913 case STATE_KILLED:
1914 #endif
1915 #if CONFIG_CORELOCK == SW_CORELOCK
1916 /* Statement optimized away during inlining if nolock != false */
1917 if (!nolock)
1918 #endif
1920 UNLOCK_LIST(list, t); /* Unlock the object's list */
1922 UNLOCK_THREAD(thread, state); /* Unlock thread slot */
1923 return NULL;
1927 #if CONFIG_CORELOCK == SW_CORELOCK
1928 /* Inline lock/nolock version of _wakeup_thread into these functions */
1929 struct thread_entry * wakeup_thread(struct thread_queue *tq)
1931 return _wakeup_thread(tq, false);
1934 struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
1936 return _wakeup_thread((struct thread_queue *)list, true);
1938 #endif /* CONFIG_CORELOCK */
1940 /*---------------------------------------------------------------------------
1941 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1942 * will be locked on multicore.
1943 *---------------------------------------------------------------------------
1945 static int find_empty_thread_slot(void)
1947 #if NUM_CORES > 1
1948 /* Any slot could be on an IRQ-accessible list */
1949 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1950 #endif
1951 /* Thread slots are not locked on single core */
1953 int n;
1955 for (n = 0; n < MAXTHREADS; n++)
1957 /* Obtain current slot state - lock it on multicore */
1958 unsigned state = GET_THREAD_STATE(&threads[n]);
1960 if (state == STATE_KILLED
1961 #if NUM_CORES > 1
1962 && threads[n].name != THREAD_DESTRUCT
1963 #endif
1966 /* Slot is empty - leave it locked and caller will unlock */
1967 break;
1970 /* Finished examining slot - no longer busy - unlock on multicore */
1971 UNLOCK_THREAD(&threads[n], state);
1974 #if NUM_CORES > 1
1975 set_irq_level(oldlevel); /* Reenable interrups - this slot is
1976 not accesible to them yet */
1977 #endif
1979 return n;
1983 /*---------------------------------------------------------------------------
1984 * Place the current core in idle mode - woken up on interrupt or wake
1985 * request from another core.
1986 *---------------------------------------------------------------------------
1988 void core_idle(void)
1990 #if NUM_CORES > 1
1991 const unsigned int core = CURRENT_CORE;
1992 #endif
1993 set_irq_level(HIGHEST_IRQ_LEVEL);
1994 core_sleep(IF_COP(core));
1997 /*---------------------------------------------------------------------------
1998 * Create a thread
1999 * If using a dual core architecture, specify which core to start the thread
2000 * on, and whether to fall back to the other core if it can't be created
2001 * Return ID if context area could be allocated, else NULL.
2002 *---------------------------------------------------------------------------
2004 struct thread_entry*
2005 create_thread(void (*function)(void), void* stack, int stack_size,
2006 unsigned flags, const char *name
2007 IF_PRIO(, int priority)
2008 IF_COP(, unsigned int core))
2010 unsigned int i;
2011 unsigned int stacklen;
2012 unsigned int *stackptr;
2013 int slot;
2014 struct thread_entry *thread;
2015 unsigned state;
2017 slot = find_empty_thread_slot();
2018 if (slot >= MAXTHREADS)
2020 return NULL;
2023 /* Munge the stack to make it easy to spot stack overflows */
2024 stacklen = stack_size / sizeof(int);
2025 stackptr = stack;
2026 for(i = 0;i < stacklen;i++)
2028 stackptr[i] = DEADBEEF;
2031 /* Store interesting information */
2032 thread = &threads[slot];
2033 thread->name = name;
2034 thread->stack = stack;
2035 thread->stack_size = stack_size;
2036 thread->bqp = NULL;
2037 #if CONFIG_CORELOCK == SW_CORELOCK
2038 thread->bqnlp = NULL;
2039 #endif
2040 thread->queue = NULL;
2041 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2042 thread->boosted = 0;
2043 #endif
2044 #ifdef HAVE_PRIORITY_SCHEDULING
2045 thread->priority_x = LOWEST_PRIORITY;
2046 thread->priority = priority;
2047 thread->last_run = current_tick - priority * 8;
2048 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2049 #endif
2051 #if NUM_CORES > 1
2052 thread->core = core;
2054 /* Writeback stack munging or anything else before starting */
2055 if (core != CURRENT_CORE)
2057 flush_icache();
2059 #endif
2061 /* Thread is not on any timeout list but be a bit paranoid */
2062 thread->tmo.prev = NULL;
2064 state = (flags & CREATE_THREAD_FROZEN) ?
2065 STATE_FROZEN : STATE_RUNNING;
2067 /* Align stack to an even 32 bit boundary */
2068 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
2070 /* Load the thread's context structure with needed startup information */
2071 THREAD_STARTUP_INIT(core, thread, function);
2073 if (state == STATE_RUNNING)
2075 #if NUM_CORES > 1
2076 if (core != CURRENT_CORE)
2078 /* Next task switch on other core moves thread to running list */
2079 core_schedule_wakeup(thread);
2081 else
2082 #endif
2084 /* Place on running list immediately */
2085 add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
2089 /* remove lock and set state */
2090 UNLOCK_THREAD_SET_STATE(thread, state);
2092 return thread;
2095 #ifdef HAVE_SCHEDULER_BOOSTCTRL
2096 void trigger_cpu_boost(void)
2098 /* No IRQ disable nescessary since the current thread cannot be blocked
2099 on an IRQ-accessible list */
2100 struct thread_entry *current = cores[CURRENT_CORE].running;
2101 unsigned state;
2103 state = GET_THREAD_STATE(current);
2104 boost_thread(current, true);
2105 UNLOCK_THREAD(current, state);
2107 (void)state;
2110 void cancel_cpu_boost(void)
2112 struct thread_entry *current = cores[CURRENT_CORE].running;
2113 unsigned state;
2115 state = GET_THREAD_STATE(current);
2116 boost_thread(current, false);
2117 UNLOCK_THREAD(current, state);
2119 (void)state;
2121 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
2123 /*---------------------------------------------------------------------------
2124 * Remove a thread from the scheduler.
2125 * Parameter is the ID as returned from create_thread().
2127 * Use with care on threads that are not under careful control as this may
2128 * leave various objects in an undefined state. When trying to kill a thread
2129 * on another processor, be sure you know what it's doing and won't be
2130 * switching around itself.
2131 *---------------------------------------------------------------------------
2133 void remove_thread(struct thread_entry *thread)
2135 #if NUM_CORES > 1
2136 /* core is not constant here because of core switching */
2137 unsigned int core = CURRENT_CORE;
2138 unsigned int old_core = NUM_CORES;
2139 #else
2140 const unsigned int core = CURRENT_CORE;
2141 #endif
2142 unsigned state;
2143 int oldlevel;
2145 if (thread == NULL)
2146 thread = cores[core].running;
2148 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2149 state = GET_THREAD_STATE(thread);
2151 if (state == STATE_KILLED)
2153 goto thread_killed;
2156 #if NUM_CORES > 1
2157 if (thread->core != core)
2159 /* Switch cores and safely extract the thread there */
2160 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
2161 condition if the thread runs away to another processor. */
2162 unsigned int new_core = thread->core;
2163 const char *old_name = thread->name;
2165 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2166 UNLOCK_THREAD(thread, state);
2167 set_irq_level(oldlevel);
2169 old_core = switch_core(new_core);
2171 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2172 state = GET_THREAD_STATE(thread);
2174 core = new_core;
2176 if (state == STATE_KILLED)
2178 /* Thread suicided before we could kill it */
2179 goto thread_killed;
2182 /* Reopen slot - it's locked again anyway */
2183 thread->name = old_name;
2185 if (thread->core != core)
2187 /* We won't play thread tag - just forget it */
2188 UNLOCK_THREAD(thread, state);
2189 set_irq_level(oldlevel);
2190 goto thread_kill_abort;
2193 /* Perform the extraction and switch ourselves back to the original
2194 processor */
2196 #endif /* NUM_CORES > 1 */
2198 #ifdef HAVE_PRIORITY_SCHEDULING
2199 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2200 #endif
2201 if (thread->tmo.prev != NULL)
2203 /* Clean thread off the timeout list if a timeout check hasn't
2204 * run yet */
2205 remove_from_list_tmo(thread);
2208 boost_thread(thread, false);
2210 if (thread == cores[core].running)
2212 /* Suicide - thread has unconditional rights to do this */
2213 /* Maintain locks until switch-out */
2214 block_thread_on_l(NULL, thread, STATE_KILLED);
2216 #if NUM_CORES > 1
2217 /* Switch to the idle stack if not on the main core (where "main"
2218 * runs) */
2219 if (core != CPU)
2221 switch_to_idle_stack(core);
2224 flush_icache();
2225 #endif
2226 /* Signal this thread */
2227 thread_queue_wake_no_listlock(&thread->queue);
2228 /* Switch tasks and never return */
2229 switch_thread(thread);
2230 /* This should never and must never be reached - if it is, the
2231 * state is corrupted */
2232 THREAD_PANICF("remove_thread->K:*R", thread);
2235 #if NUM_CORES > 1
2236 if (thread->name == THREAD_DESTRUCT)
2238 /* Another core is doing this operation already */
2239 UNLOCK_THREAD(thread, state);
2240 set_irq_level(oldlevel);
2241 return;
2243 #endif
2244 if (cores[core].waking.queue != NULL)
2246 /* Get any threads off the waking list and onto the running
2247 * list first - waking and running cannot be distinguished by
2248 * state */
2249 core_perform_wakeup(IF_COP(core));
2252 switch (state)
2254 case STATE_RUNNING:
2255 /* Remove thread from ready to run tasks */
2256 remove_from_list_l(&cores[core].running, thread);
2257 break;
2258 case STATE_BLOCKED:
2259 case STATE_BLOCKED_W_TMO:
2260 /* Remove thread from the queue it's blocked on - including its
2261 * own if waiting there */
2262 #if CONFIG_CORELOCK == SW_CORELOCK
2263 /* One or the other will be valid */
2264 if (thread->bqp == NULL)
2266 remove_from_list_l(thread->bqnlp, thread);
2268 else
2269 #endif /* CONFIG_CORELOCK */
2271 remove_from_list_l_locked(thread->bqp, thread);
2273 break;
2274 /* Otherwise thread is killed or is frozen and hasn't run yet */
2277 /* If thread was waiting on itself, it will have been removed above.
2278 * The wrong order would result in waking the thread first and deadlocking
2279 * since the slot is already locked. */
2280 thread_queue_wake_no_listlock(&thread->queue);
2282 thread_killed: /* Thread was already killed */
2283 /* Removal complete - safe to unlock state and reenable interrupts */
2284 UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED);
2285 set_irq_level(oldlevel);
2287 #if NUM_CORES > 1
2288 thread_kill_abort: /* Something stopped us from killing the thread */
2289 if (old_core < NUM_CORES)
2291 /* Did a removal on another processor's thread - switch back to
2292 native core */
2293 switch_core(old_core);
2295 #endif
2298 /*---------------------------------------------------------------------------
2299 * Block the current thread until another thread terminates. A thread may
2300 * wait on itself to terminate which prevents it from running again and it
2301 * will need to be killed externally.
2302 * Parameter is the ID as returned from create_thread().
2303 *---------------------------------------------------------------------------
2305 void thread_wait(struct thread_entry *thread)
2307 const unsigned int core = CURRENT_CORE;
2308 struct thread_entry *current = cores[core].running;
2309 unsigned thread_state;
2310 #if NUM_CORES > 1
2311 int oldlevel;
2312 unsigned current_state;
2313 #endif
2315 if (thread == NULL)
2316 thread = current;
2318 #if NUM_CORES > 1
2319 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2320 #endif
2322 thread_state = GET_THREAD_STATE(thread);
2324 #if NUM_CORES > 1
2325 /* We can't lock the same slot twice. The waitee will also lock itself
2326 first then the thread slots that will be locked and woken in turn.
2327 The same order must be observed here as well. */
2328 if (thread == current)
2330 current_state = thread_state;
2332 else
2334 current_state = GET_THREAD_STATE(current);
2336 #endif
2338 if (thread_state != STATE_KILLED)
2340 /* Unlock the waitee state at task switch - not done for self-wait
2341 because the would double-unlock the state and potentially
2342 corrupt another's busy assert on the slot */
2343 if (thread != current)
2345 #if CONFIG_CORELOCK == SW_CORELOCK
2346 cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
2347 cores[core].blk_ops.thread = thread;
2348 #elif CONFIG_CORELOCK == CORELOCK_SWAP
2349 cores[core].blk_ops.flags |= TBOP_SET_VARu8;
2350 cores[core].blk_ops.var_u8p = &thread->state;
2351 cores[core].blk_ops.var_u8v = thread_state;
2352 #endif
2354 block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED);
2355 switch_thread(current);
2356 return;
2359 /* Unlock both slots - obviously the current thread can't have
2360 STATE_KILLED so the above if clause will always catch a thread
2361 waiting on itself */
2362 #if NUM_CORES > 1
2363 UNLOCK_THREAD(current, current_state);
2364 UNLOCK_THREAD(thread, thread_state);
2365 set_irq_level(oldlevel);
2366 #endif
2369 #ifdef HAVE_PRIORITY_SCHEDULING
2370 /*---------------------------------------------------------------------------
2371 * Sets the thread's relative priority for the core it runs on.
2372 *---------------------------------------------------------------------------
2374 int thread_set_priority(struct thread_entry *thread, int priority)
2376 unsigned old_priority = (unsigned)-1;
2378 if (thread == NULL)
2379 thread = cores[CURRENT_CORE].running;
2381 #if NUM_CORES > 1
2382 /* Thread could be on any list and therefore on an interrupt accessible
2383 one - disable interrupts */
2384 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2385 #endif
2386 unsigned state = GET_THREAD_STATE(thread);
2388 /* Make sure it's not killed */
2389 if (state != STATE_KILLED)
2391 old_priority = thread->priority;
2392 thread->priority = priority;
2393 cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY;
2396 #if NUM_CORES > 1
2397 UNLOCK_THREAD(thread, state);
2398 set_irq_level(oldlevel);
2399 #endif
2400 return old_priority;
2403 /*---------------------------------------------------------------------------
2404 * Returns the current priority for a thread.
2405 *---------------------------------------------------------------------------
2407 int thread_get_priority(struct thread_entry *thread)
2409 /* Simple, quick probe. */
2410 if (thread == NULL)
2411 thread = cores[CURRENT_CORE].running;
2413 return (unsigned)thread->priority;
2416 /*---------------------------------------------------------------------------
2417 * Yield that guarantees thread execution once per round regardless of
2418 * thread's scheduler priority - basically a transient realtime boost
2419 * without altering the scheduler's thread precedence.
2421 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2422 *---------------------------------------------------------------------------
2424 void priority_yield(void)
2426 const unsigned int core = CURRENT_CORE;
2427 struct thread_entry *thread = cores[core].running;
2428 thread->priority_x = HIGHEST_PRIORITY;
2429 switch_thread(NULL);
2430 thread->priority_x = LOWEST_PRIORITY;
2432 #endif /* HAVE_PRIORITY_SCHEDULING */
2434 /* Resumes a frozen thread - similar logic to wakeup_thread except that
2435 the thread is on no scheduler list at all. It exists simply by virtue of
2436 the slot having a state of STATE_FROZEN. */
2437 void thread_thaw(struct thread_entry *thread)
2439 #if NUM_CORES > 1
2440 /* Thread could be on any list and therefore on an interrupt accessible
2441 one - disable interrupts */
2442 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2443 #endif
2444 unsigned state = GET_THREAD_STATE(thread);
2446 if (state == STATE_FROZEN)
2448 const unsigned int core = CURRENT_CORE;
2449 #if NUM_CORES > 1
2450 if (thread->core != core)
2452 core_schedule_wakeup(thread);
2454 else
2455 #endif
2457 add_to_list_l(&cores[core].running, thread);
2460 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
2461 #if NUM_CORES > 1
2462 set_irq_level(oldlevel);
2463 #endif
2464 return;
2467 #if NUM_CORES > 1
2468 UNLOCK_THREAD(thread, state);
2469 set_irq_level(oldlevel);
2470 #endif
2473 /*---------------------------------------------------------------------------
2474 * Return the ID of the currently executing thread.
2475 *---------------------------------------------------------------------------
2477 struct thread_entry * thread_get_current(void)
2479 return cores[CURRENT_CORE].running;
2482 #if NUM_CORES > 1
2483 /*---------------------------------------------------------------------------
2484 * Switch the processor that the currently executing thread runs on.
2485 *---------------------------------------------------------------------------
2487 unsigned int switch_core(unsigned int new_core)
2489 const unsigned int core = CURRENT_CORE;
2490 struct thread_entry *current = cores[core].running;
2491 struct thread_entry *w;
2492 int oldlevel;
2494 /* Interrupts can access the lists that will be used - disable them */
2495 unsigned state = GET_THREAD_STATE(current);
2497 if (core == new_core)
2499 /* No change - just unlock everything and return same core */
2500 UNLOCK_THREAD(current, state);
2501 return core;
2504 /* Get us off the running list for the current core */
2505 remove_from_list_l(&cores[core].running, current);
2507 /* Stash return value (old core) in a safe place */
2508 current->retval = core;
2510 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2511 * the other core will likely attempt a removal from the wrong list! */
2512 if (current->tmo.prev != NULL)
2514 remove_from_list_tmo(current);
2517 /* Change the core number for this thread slot */
2518 current->core = new_core;
2520 /* Do not use core_schedule_wakeup here since this will result in
2521 * the thread starting to run on the other core before being finished on
2522 * this one. Delay the wakeup list unlock to keep the other core stuck
2523 * until this thread is ready. */
2524 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2525 w = LOCK_LIST(&cores[new_core].waking);
2526 ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current);
2528 /* Make a callback into device-specific code, unlock the wakeup list so
2529 * that execution may resume on the new core, unlock our slot and finally
2530 * restore the interrupt level */
2531 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
2532 TBOP_UNLOCK_LIST;
2533 cores[core].blk_ops.list_p = &cores[new_core].waking;
2534 #if CONFIG_CORELOCK == CORELOCK_SWAP
2535 cores[core].blk_ops.state = STATE_RUNNING;
2536 cores[core].blk_ops.list_v = w;
2537 #endif
2539 #ifdef HAVE_PRIORITY_SCHEDULING
2540 current->priority_x = HIGHEST_PRIORITY;
2541 cores[core].highest_priority = LOWEST_PRIORITY;
2542 #endif
2543 /* Do the stack switching, cache_maintenence and switch_thread call -
2544 requires native code */
2545 switch_thread_core(core, current);
2547 #ifdef HAVE_PRIORITY_SCHEDULING
2548 current->priority_x = LOWEST_PRIORITY;
2549 cores[current->core].highest_priority = LOWEST_PRIORITY;
2550 #endif
2552 /* Finally return the old core to caller */
2553 return current->retval;
2554 (void)state;
2556 #endif /* NUM_CORES > 1 */
2558 /*---------------------------------------------------------------------------
2559 * Initialize threading API. This assumes interrupts are not yet enabled. On
2560 * multicore setups, no core is allowed to proceed until create_thread calls
2561 * are safe to perform.
2562 *---------------------------------------------------------------------------
2564 void init_threads(void)
2566 const unsigned int core = CURRENT_CORE;
2567 struct thread_entry *thread;
2568 int slot;
2570 /* CPU will initialize first and then sleep */
2571 slot = find_empty_thread_slot();
2573 if (slot >= MAXTHREADS)
2575 /* WTF? There really must be a slot available at this stage.
2576 * This can fail if, for example, .bss isn't zero'ed out by the loader
2577 * or threads is in the wrong section. */
2578 THREAD_PANICF("init_threads->no slot", NULL);
2581 /* Initialize initially non-zero members of core */
2582 thread_queue_init(&cores[core].waking);
2583 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2584 #ifdef HAVE_PRIORITY_SCHEDULING
2585 cores[core].highest_priority = LOWEST_PRIORITY;
2586 #endif
2588 /* Initialize initially non-zero members of slot */
2589 thread = &threads[slot];
2590 thread->name = main_thread_name;
2591 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); /* No sync worries yet */
2592 #if NUM_CORES > 1
2593 thread->core = core;
2594 #endif
2595 #ifdef HAVE_PRIORITY_SCHEDULING
2596 thread->priority = PRIORITY_USER_INTERFACE;
2597 thread->priority_x = LOWEST_PRIORITY;
2598 #endif
2599 #if CONFIG_CORELOCK == SW_CORELOCK
2600 corelock_init(&thread->cl);
2601 #endif
2603 add_to_list_l(&cores[core].running, thread);
2605 if (core == CPU)
2607 thread->stack = stackbegin;
2608 thread->stack_size = (int)stackend - (int)stackbegin;
2609 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2610 /* TODO: HAL interface for this */
2611 /* Wake up coprocessor and let it initialize kernel and threads */
2612 #ifdef CPU_PP502x
2613 MBX_MSG_CLR = 0x3f;
2614 #endif
2615 COP_CTL = PROC_WAKE;
2616 /* Sleep until finished */
2617 CPU_CTL = PROC_SLEEP;
2618 nop; nop; nop; nop;
2620 else
2622 /* Initial stack is the COP idle stack */
2623 thread->stack = cop_idlestackbegin;
2624 thread->stack_size = IDLE_STACK_SIZE;
2625 /* Get COP safely primed inside switch_thread where it will remain
2626 * until a thread actually exists on it */
2627 CPU_CTL = PROC_WAKE;
2628 remove_thread(NULL);
2629 #endif /* NUM_CORES */
2633 /*---------------------------------------------------------------------------
2634 * Returns the maximum percentage of stack a thread ever used while running.
2635 * NOTE: Some large buffer allocations that don't use enough the buffer to
2636 * overwrite stackptr[0] will not be seen.
2637 *---------------------------------------------------------------------------
2639 int thread_stack_usage(const struct thread_entry *thread)
2641 unsigned int *stackptr = thread->stack;
2642 int stack_words = thread->stack_size / sizeof (int);
2643 int i, usage = 0;
2645 for (i = 0; i < stack_words; i++)
2647 if (stackptr[i] != DEADBEEF)
2649 usage = ((stack_words - i) * 100) / stack_words;
2650 break;
2654 return usage;
2657 #if NUM_CORES > 1
2658 /*---------------------------------------------------------------------------
2659 * Returns the maximum percentage of the core's idle stack ever used during
2660 * runtime.
2661 *---------------------------------------------------------------------------
2663 int idle_stack_usage(unsigned int core)
2665 unsigned int *stackptr = idle_stacks[core];
2666 int i, usage = 0;
2668 for (i = 0; i < IDLE_STACK_WORDS; i++)
2670 if (stackptr[i] != DEADBEEF)
2672 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
2673 break;
2677 return usage;
2679 #endif
2681 /*---------------------------------------------------------------------------
2682 * Fills in the buffer with the specified thread's name. If the name is NULL,
2683 * empty, or the thread is in destruct state a formatted ID is written
2684 * instead.
2685 *---------------------------------------------------------------------------
2687 void thread_get_name(char *buffer, int size,
2688 struct thread_entry *thread)
2690 if (size <= 0)
2691 return;
2693 *buffer = '\0';
2695 if (thread)
2697 /* Display thread name if one or ID if none */
2698 const char *name = thread->name;
2699 const char *fmt = "%s";
2700 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2702 name = (const char *)thread;
2703 fmt = "%08lX";
2705 snprintf(buffer, size, fmt, name);