1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
31 #define DEADBEEF ((unsigned int)0xdeadbeef)
32 /* Cast to the the machine int type, whose size could be < 4. */
34 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
35 #ifdef HAVE_PRIORITY_SCHEDULING
36 static unsigned short highest_priority IBSS_ATTR
;
38 #ifdef HAVE_SCHEDULER_BOOSTCTRL
39 static int boosted_threads IBSS_ATTR
;
42 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
43 #define STAY_IRQ_LEVEL -1
44 static int switch_to_irq_level
= STAY_IRQ_LEVEL
;
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS
50 static const char main_thread_name
[] = "main";
52 extern int stackbegin
[];
53 extern int stackend
[];
57 extern int cop_stackbegin
[];
58 extern int cop_stackend
[];
60 /* The coprocessor stack is not set up in the bootloader code, but the threading
61 * is. No threads are run on the coprocessor, so set up some dummy stack */
62 int *cop_stackbegin
= stackbegin
;
63 int *cop_stackend
= stackend
;
68 static void add_to_list(struct thread_entry **list,
69 struct thread_entry *thread) ICODE_ATTR;
70 static void remove_from_list(struct thread_entry **list,
71 struct thread_entry *thread) ICODE_ATTR;
74 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
77 static inline void store_context(void* addr
) __attribute__ ((always_inline
));
78 static inline void load_context(const void* addr
)
79 __attribute__ ((always_inline
));
82 /*---------------------------------------------------------------------------
83 * Store non-volatile context.
84 *---------------------------------------------------------------------------
86 static inline void store_context(void* addr
)
89 "stmia %0, { r4-r11, sp, lr }\n"
94 /*---------------------------------------------------------------------------
95 * Load non-volatile context.
96 *---------------------------------------------------------------------------
98 static inline void load_context(const void* addr
)
101 "ldmia %0, { r4-r11, sp, lr }\n" /* load regs r4 to r14 from context */
102 "ldr r0, [%0, #40] \n" /* load start pointer */
104 "cmp r0, r1 \n" /* check for NULL */
105 "strne r1, [%0, #40] \n" /* if it's NULL, we're already running */
106 "movne pc, r0 \n" /* not already running, so jump to start */
107 : : "r" (addr
) : "r0", "r1"
111 #elif defined(CPU_COLDFIRE)
112 /*---------------------------------------------------------------------------
113 * Store non-volatile context.
114 *---------------------------------------------------------------------------
116 static inline void store_context(void* addr
)
119 "move.l %%macsr,%%d0 \n"
120 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
121 : : "a" (addr
) : "d0" /* only! */
125 /*---------------------------------------------------------------------------
126 * Load non-volatile context.
127 *---------------------------------------------------------------------------
129 static inline void load_context(const void* addr
)
132 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
133 "move.l %%d0,%%macsr \n"
134 "move.l (52,%0),%%d0 \n" /* Get start address */
135 "beq.b .running \n" /* NULL -> already running */
136 "clr.l (52,%0) \n" /* Clear start address.. */
138 "jmp (%0) \n" /* ..and start the thread */
140 : : "a" (addr
) : "d0" /* only! */
144 #elif CONFIG_CPU == SH7034
145 /*---------------------------------------------------------------------------
146 * Store non-volatile context.
147 *---------------------------------------------------------------------------
149 static inline void store_context(void* addr
)
166 /*---------------------------------------------------------------------------
167 * Load non-volatile context.
168 *---------------------------------------------------------------------------
170 static inline void load_context(const void* addr
)
182 "mov.l @%0,r0 \n" /* Get start address */
184 "bt .running \n" /* NULL -> already running */
187 "rts \n" /* Start the thread */
188 "mov.l r0,@%0 \n" /* Clear start address */
190 : : "r" (addr
) : "r0" /* only! */
196 static void add_to_list(struct thread_entry
**list
, struct thread_entry
*thread
)
200 thread
->next
= thread
;
201 thread
->prev
= thread
;
207 thread
->next
= *list
;
208 thread
->prev
= (*list
)->prev
;
209 thread
->prev
->next
= thread
;
210 (*list
)->prev
= thread
;
213 thread->next = (*list)->next;
214 thread->prev = *list;
215 thread->next->prev = thread;
216 (*list)->next = thread;
221 static void remove_from_list(struct thread_entry
**list
,
222 struct thread_entry
*thread
)
226 if (thread
== thread
->next
)
233 *list
= thread
->next
;
236 /* Fix links to jump over the removed entry. */
237 thread
->prev
->next
= thread
->next
;
238 thread
->next
->prev
= thread
->prev
;
241 /* Compiler trick: Don't declare as static to prevent putting
242 * function in IRAM. */
243 void check_sleepers(void)
245 struct thread_entry
*current
, *next
;
247 /* Check sleeping threads. */
248 current
= cores
[CURRENT_CORE
].sleeping
;
254 next
= current
->next
;
256 if ((unsigned)current_tick
>= GET_STATE_ARG(current
->statearg
))
258 /* Sleep timeout has been reached so bring the thread
259 * back to life again. */
260 remove_from_list(&cores
[CURRENT_CORE
].sleeping
, current
);
261 add_to_list(&cores
[CURRENT_CORE
].running
, current
);
262 current
->statearg
= 0;
264 /* If there is no more processes in the list, break the loop. */
265 if (cores
[CURRENT_CORE
].sleeping
== NULL
)
274 /* Break the loop once we have walked through the list of all
275 * sleeping processes. */
276 if (current
== cores
[CURRENT_CORE
].sleeping
)
281 static inline void sleep_core(void)
283 static long last_tick
= 0;
284 #if CONFIG_CPU == S3C2440
290 if (last_tick
!= current_tick
)
293 last_tick
= current_tick
;
296 /* We must sleep until there is at least one process in the list
297 * of running processes. */
298 if (cores
[CURRENT_CORE
].running
!= NULL
)
301 /* Enter sleep mode to reduce power usage, woken up on interrupt */
303 asm volatile ("stop #0x2000");
304 #elif CONFIG_CPU == SH7034
306 asm volatile ("sleep");
307 #elif CONFIG_CPU == PP5020
308 /* This should sleep the CPU. It appears to wake by itself on
310 CPU_CTL
= 0x80000000;
311 #elif CONFIG_CPU == S3C2440
312 CLKCON
|= (1 << 2); /* set IDLE bit */
313 for(i
=0; i
<10; i
++); /* wait for IDLE */
314 CLKCON
&= ~(1 << 2); /* reset IDLE bit when wake up */
320 static int get_threadnum(struct thread_entry
*thread
)
324 for (i
= 0; i
< MAXTHREADS
; i
++)
326 if (&cores
[CURRENT_CORE
].threads
[i
] == thread
)
333 void profile_thread(void) {
334 profstart(get_threadnum(cores
[CURRENT_CORE
].running
));
338 /* Compiler trick: Don't declare as static to prevent putting
339 * function in IRAM. */
340 void change_thread_state(struct thread_entry
**blocked_list
)
342 struct thread_entry
*old
;
343 unsigned long new_state
;
345 /* Remove the thread from the list of running threads. */
346 old
= cores
[CURRENT_CORE
].running
;
347 new_state
= GET_STATE(old
->statearg
);
349 /* Check if a thread state change has been requested. */
352 /* Change running thread state and switch to next thread. */
353 remove_from_list(&cores
[CURRENT_CORE
].running
, old
);
355 /* And put the thread into a new list of inactive threads. */
356 if (new_state
== STATE_BLOCKED
)
357 add_to_list(blocked_list
, old
);
359 add_to_list(&cores
[CURRENT_CORE
].sleeping
, old
);
361 #ifdef HAVE_PRIORITY_SCHEDULING
362 /* Reset priorities */
363 if (old
->priority
== highest_priority
)
364 highest_priority
= 100;
368 /* Switch to the next running thread. */
369 cores
[CURRENT_CORE
].running
= old
->next
;
372 /*---------------------------------------------------------------------------
373 * Switch thread in round robin fashion.
374 *---------------------------------------------------------------------------
376 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
379 profile_thread_stopped(get_threadnum(cores
[CURRENT_CORE
].running
));
381 unsigned int *stackptr
;
387 /* Begin task switching by saving our current context so that we can
388 * restore the state of the current thread later to the point prior
392 store_context(&cores
[CURRENT_CORE
].running
->context
);
394 /* Check if the current thread stack is overflown */
395 stackptr
= cores
[CURRENT_CORE
].running
->stack
;
396 if(stackptr
[0] != DEADBEEF
)
397 panicf("Stkov %s", cores
[CURRENT_CORE
].running
->name
);
399 /* Rearrange thread lists as needed */
400 change_thread_state(blocked_list
);
402 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
403 /* This has to be done after the scheduler is finished with the
404 blocked_list pointer so that an IRQ can't kill us by attempting
405 a wake but before attempting any core sleep. */
406 if (switch_to_irq_level
!= STAY_IRQ_LEVEL
)
408 int level
= switch_to_irq_level
;
409 switch_to_irq_level
= STAY_IRQ_LEVEL
;
410 set_irq_level(level
);
415 /* Go through the list of sleeping task to check if we need to wake up
416 * any of them due to timeout. Also puts core into sleep state until
417 * there is at least one running process again. */
420 #ifdef HAVE_PRIORITY_SCHEDULING
421 /* Select the new task based on priorities and the last time a process
425 int priority
= cores
[CURRENT_CORE
].running
->priority
;
427 if (priority
< highest_priority
)
428 highest_priority
= priority
;
430 if (priority
== highest_priority
||
431 (current_tick
- cores
[CURRENT_CORE
].running
->last_run
>
435 cores
[CURRENT_CORE
].running
= cores
[CURRENT_CORE
].running
->next
;
438 /* Reset the value of thread's last running time to the current time. */
439 cores
[CURRENT_CORE
].running
->last_run
= current_tick
;
443 /* And finally give control to the next thread. */
444 load_context(&cores
[CURRENT_CORE
].running
->context
);
447 profile_thread_started(get_threadnum(cores
[CURRENT_CORE
].running
));
451 void sleep_thread(int ticks
)
453 struct thread_entry
*current
;
455 current
= cores
[CURRENT_CORE
].running
;
457 #ifdef HAVE_SCHEDULER_BOOSTCTRL
458 if (STATE_IS_BOOSTED(current
->statearg
)) {
460 if (!boosted_threads
)
467 /* Set the thread's new state and timeout and finally force a task switch
468 * so that scheduler removes thread from the list of running processes
469 * and puts it in list of sleeping tasks. */
470 SET_STATE(current
->statearg
, STATE_SLEEPING
, current_tick
+ ticks
+ 1);
471 switch_thread(true, NULL
);
474 void block_thread(struct thread_entry
**list
)
476 struct thread_entry
*current
;
477 /* Get the entry for the current running thread. */
478 current
= cores
[CURRENT_CORE
].running
;
480 #ifdef HAVE_SCHEDULER_BOOSTCTRL
481 /* Keep the boosted state over indefinite block calls, because
482 * we are waiting until the earliest time that someone else
483 * completes an action */
484 unsigned long boost_flag
= STATE_IS_BOOSTED(current
->statearg
);
487 #ifdef THREAD_EXTRA_CHECKS
488 /* We are not allowed to mix blocking types in one queue. */
489 if (*list
&& GET_STATE((*list
)->statearg
) == STATE_BLOCKED_W_TMO
)
490 panicf("Blocking violation B->*T");
493 /* Set the state to blocked and ask the scheduler to switch tasks,
494 * this takes us off of the run queue until we are explicitly woken */
495 SET_STATE(current
->statearg
, STATE_BLOCKED
, 0);
497 switch_thread(true, list
);
499 #ifdef HAVE_SCHEDULER_BOOSTCTRL
500 /* Reset only the boosted flag to indicate we are up and running again. */
501 current
->statearg
= boost_flag
;
503 /* Clear all flags to indicate we are up and running again. */
504 current
->statearg
= 0;
508 void block_thread_w_tmo(struct thread_entry
**list
, int timeout
)
510 struct thread_entry
*current
;
511 /* Get the entry for the current running thread. */
512 current
= cores
[CURRENT_CORE
].running
;
514 #ifdef HAVE_SCHEDULER_BOOSTCTRL
515 /* A block with a timeout is a sleep situation, whatever we are waiting
516 * for _may or may not_ happen, regardless of boost state, (user input
517 * for instance), so this thread no longer needs to boost */
518 if (STATE_IS_BOOSTED(current
->statearg
)) {
520 if (!boosted_threads
)
527 #ifdef THREAD_EXTRA_CHECKS
528 /* We can store only one thread to the "list" if thread is used
529 * in other list (such as core's list for sleeping tasks). */
531 panicf("Blocking violation T->*B");
534 /* Set the state to blocked with the specified timeout */
535 SET_STATE(current
->statearg
, STATE_BLOCKED_W_TMO
, current_tick
+ timeout
);
537 /* Set the "list" for explicit wakeup */
540 /* Now force a task switch and block until we have been woken up
541 * by another thread or timeout is reached. */
542 switch_thread(true, NULL
);
544 /* It is now safe for another thread to block on this "list" */
548 #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) && !defined(SIMULATOR)
549 void set_irq_level_and_block_thread(struct thread_entry
**list
, int level
)
551 switch_to_irq_level
= level
;
556 void set_irq_level_and_block_thread_w_tmo(struct thread_entry
**list
,
557 int timeout
, int level
)
559 switch_to_irq_level
= level
;
560 block_thread_w_tmo(list
, timeout
);
563 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
565 void wakeup_thread(struct thread_entry
**list
)
567 struct thread_entry
*thread
;
569 /* Check if there is a blocked thread at all. */
573 /* Wake up the last thread first. */
576 /* Determine thread's current state. */
577 switch (GET_STATE(thread
->statearg
))
580 /* Remove thread from the list of blocked threads and add it
581 * to the scheduler's list of running processes. */
582 remove_from_list(list
, thread
);
583 add_to_list(&cores
[CURRENT_CORE
].running
, thread
);
585 case STATE_BLOCKED_W_TMO
:
586 /* Just remove the timeout to cause scheduler to immediately
587 * wake up the thread. */
588 thread
->statearg
= 0;
592 /* Nothing to do. Thread has already been woken up
593 * or it's state is not blocked or blocked with timeout. */
598 /*---------------------------------------------------------------------------
599 * Create thread on the current core.
600 * Return ID if context area could be allocated, else -1.
601 *---------------------------------------------------------------------------
604 create_thread(void (*function
)(void), void* stack
, int stack_size
,
605 const char *name
IF_PRIO(, int priority
))
607 return create_thread_on_core(CURRENT_CORE
, function
, stack
, stack_size
,
608 name
IF_PRIO(, priority
));
611 /*---------------------------------------------------------------------------
612 * Create thread on a specific core.
613 * Return ID if context area could be allocated, else -1.
614 *---------------------------------------------------------------------------
617 create_thread_on_core(unsigned int core
, void (*function
)(void),
618 void* stack
, int stack_size
,
619 const char *name
IF_PRIO(, int priority
))
622 unsigned int stacklen
;
623 unsigned int *stackptr
;
626 struct thread_entry
*thread
;
628 for (n
= 0; n
< MAXTHREADS
; n
++)
630 if (cores
[core
].threads
[n
].name
== NULL
)
638 /* Munge the stack to make it easy to spot stack overflows */
639 stacklen
= stack_size
/ sizeof(int);
641 for(i
= 0;i
< stacklen
;i
++)
643 stackptr
[i
] = DEADBEEF
;
646 /* Store interesting information */
647 thread
= &cores
[core
].threads
[n
];
649 thread
->stack
= stack
;
650 thread
->stack_size
= stack_size
;
651 thread
->statearg
= 0;
652 #ifdef HAVE_PRIORITY_SCHEDULING
653 thread
->priority
= priority
;
654 highest_priority
= 100;
656 add_to_list(&cores
[core
].running
, thread
);
658 regs
= &thread
->context
;
659 /* Align stack to an even 32 bit boundary */
660 regs
->sp
= (void*)(((unsigned int)stack
+ stack_size
) & ~3);
661 regs
->start
= (void*)function
;
666 #ifdef HAVE_SCHEDULER_BOOSTCTRL
667 void trigger_cpu_boost(void)
669 if (!STATE_IS_BOOSTED(cores
[CURRENT_CORE
].running
->statearg
))
671 SET_BOOST_STATE(cores
[CURRENT_CORE
].running
->statearg
);
672 if (!boosted_threads
)
681 /*---------------------------------------------------------------------------
682 * Remove a thread on the current core from the scheduler.
683 * Parameter is the ID as returned from create_thread().
684 *---------------------------------------------------------------------------
686 void remove_thread(struct thread_entry
*thread
)
689 thread
= cores
[CURRENT_CORE
].running
;
691 /* Free the entry by removing thread name. */
693 #ifdef HAVE_PRIORITY_SCHEDULING
694 highest_priority
= 100;
697 if (thread
== cores
[CURRENT_CORE
].running
)
699 remove_from_list(&cores
[CURRENT_CORE
].running
, thread
);
700 switch_thread(false, NULL
);
704 if (thread
== cores
[CURRENT_CORE
].sleeping
)
705 remove_from_list(&cores
[CURRENT_CORE
].sleeping
, thread
);
707 remove_from_list(NULL
, thread
);
710 #ifdef HAVE_PRIORITY_SCHEDULING
711 int thread_set_priority(struct thread_entry
*thread
, int priority
)
716 thread
= cores
[CURRENT_CORE
].running
;
718 old_priority
= thread
->priority
;
719 thread
->priority
= priority
;
720 highest_priority
= 100;
725 int thread_get_priority(struct thread_entry
*thread
)
728 thread
= cores
[CURRENT_CORE
].running
;
730 return thread
->priority
;
734 void init_threads(void)
736 unsigned int core
= CURRENT_CORE
;
738 memset(cores
, 0, sizeof cores
);
739 cores
[core
].sleeping
= NULL
;
740 cores
[core
].running
= NULL
;
741 cores
[core
].threads
[0].name
= main_thread_name
;
742 cores
[core
].threads
[0].statearg
= 0;
743 #ifdef HAVE_PRIORITY_SCHEDULING
744 cores
[core
].threads
[0].priority
= PRIORITY_USER_INTERFACE
;
745 highest_priority
= 100;
747 #ifdef HAVE_SCHEDULER_BOOSTCTRL
750 add_to_list(&cores
[core
].running
, &cores
[core
].threads
[0]);
752 /* In multiple core setups, each core has a different stack. There is
753 * probably a much better way to do this. */
756 cores
[CPU
].threads
[0].stack
= stackbegin
;
757 cores
[CPU
].threads
[0].stack_size
= (int)stackend
- (int)stackbegin
;
759 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
760 cores
[COP
].threads
[0].stack
= cop_stackbegin
;
761 cores
[COP
].threads
[0].stack_size
=
762 (int)cop_stackend
- (int)cop_stackbegin
;
765 cores
[core
].threads
[0].context
.start
= 0; /* thread 0 already running */
768 int thread_stack_usage(const struct thread_entry
*thread
)
771 unsigned int *stackptr
= thread
->stack
;
773 for (i
= 0;i
< thread
->stack_size
/sizeof(int);i
++)
775 if (stackptr
[i
] != DEADBEEF
)
779 return ((thread
->stack_size
- i
* sizeof(int)) * 100) /
783 int thread_get_status(const struct thread_entry
*thread
)
785 return GET_STATE(thread
->statearg
);