1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
31 #define DEADBEEF ((unsigned int)0xdeadbeef)
32 /* Cast to the the machine int type, whose size could be < 4. */
34 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
35 #ifdef HAVE_PRIORITY_SCHEDULING
36 static unsigned short highest_priority IBSS_ATTR
;
39 /* Define to enable additional checks for blocking violations etc. */
40 // #define THREAD_EXTRA_CHECKS
42 static const char main_thread_name
[] = "main";
44 extern int stackbegin
[];
45 extern int stackend
[];
49 extern int cop_stackbegin
[];
50 extern int cop_stackend
[];
52 /* The coprocessor stack is not set up in the bootloader code, but the
53 threading is. No threads are run on the coprocessor, so set up some dummy
55 int *cop_stackbegin
= stackbegin
;
56 int *cop_stackend
= stackend
;
61 static void add_to_list(struct thread_entry **list,
62 struct thread_entry *thread) ICODE_ATTR;
63 static void remove_from_list(struct thread_entry **list,
64 struct thread_entry *thread) ICODE_ATTR;
67 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
70 static inline void store_context(void* addr
) __attribute__ ((always_inline
));
71 static inline void load_context(const void* addr
) __attribute__ ((always_inline
));
74 /*---------------------------------------------------------------------------
75 * Store non-volatile context.
76 *---------------------------------------------------------------------------
78 static inline void store_context(void* addr
)
81 "stmia %0, { r4-r11, sp, lr }\n"
86 /*---------------------------------------------------------------------------
87 * Load non-volatile context.
88 *---------------------------------------------------------------------------
90 static inline void load_context(const void* addr
)
93 "ldmia %0, { r4-r11, sp, lr }\n" /* load regs r4 to r14 from context */
94 "ldr r0, [%0, #40] \n" /* load start pointer */
96 "cmp r0, r1 \n" /* check for NULL */
97 "strne r1, [%0, #40] \n" /* if it's NULL, we're already running */
98 "movne pc, r0 \n" /* not already running, so jump to start */
99 : : "r" (addr
) : "r0", "r1"
103 #elif defined(CPU_COLDFIRE)
104 /*---------------------------------------------------------------------------
105 * Store non-volatile context.
106 *---------------------------------------------------------------------------
108 static inline void store_context(void* addr
)
111 "move.l %%macsr,%%d0 \n"
112 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
113 : : "a" (addr
) : "d0" /* only! */
117 /*---------------------------------------------------------------------------
118 * Load non-volatile context.
119 *---------------------------------------------------------------------------
121 static inline void load_context(const void* addr
)
124 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
125 "move.l %%d0,%%macsr \n"
126 "move.l (52,%0),%%d0 \n" /* Get start address */
127 "beq.b .running \n" /* NULL -> already running */
128 "clr.l (52,%0) \n" /* Clear start address.. */
130 "jmp (%0) \n" /* ..and start the thread */
132 : : "a" (addr
) : "d0" /* only! */
136 #elif CONFIG_CPU == SH7034
137 /*---------------------------------------------------------------------------
138 * Store non-volatile context.
139 *---------------------------------------------------------------------------
141 static inline void store_context(void* addr
)
158 /*---------------------------------------------------------------------------
159 * Load non-volatile context.
160 *---------------------------------------------------------------------------
162 static inline void load_context(const void* addr
)
174 "mov.l @%0,r0 \n" /* Get start address */
176 "bt .running \n" /* NULL -> already running */
179 "rts \n" /* Start the thread */
180 "mov.l r0,@%0 \n" /* Clear start address */
182 : : "r" (addr
) : "r0" /* only! */
186 #elif CONFIG_CPU == TCC730
187 /*---------------------------------------------------------------------------
188 * Store non-volatile context.
189 *---------------------------------------------------------------------------
191 #define store_context(addr) \
201 "ldw @[%0+0], a15\n\t" : : "a" (addr) );
203 /*---------------------------------------------------------------------------
204 * Load non-volatile context.
205 *---------------------------------------------------------------------------
207 #define load_context(addr) \
209 if (!(addr)->started) { \
210 (addr)->started = 1; \
212 "ldw a15, @[%0+0]\n\t" \
213 "ldw a14, @[%0+4]\n\t" \
214 "jmp a14\n\t" : : "a" (addr) \
218 "ldw a15, @[%0+0]\n\t" \
226 "pop r1,r0\n\t" : : "a" (addr) \
233 static void add_to_list(struct thread_entry
**list
,
234 struct thread_entry
*thread
)
238 thread
->next
= thread
;
239 thread
->prev
= thread
;
245 thread
->next
= *list
;
246 thread
->prev
= (*list
)->prev
;
247 thread
->prev
->next
= thread
;
248 (*list
)->prev
= thread
;
251 thread->next = (*list)->next;
252 thread->prev = *list;
253 thread->next->prev = thread;
254 (*list)->next = thread;
259 static void remove_from_list(struct thread_entry
**list
,
260 struct thread_entry
*thread
)
264 if (thread
== thread
->next
)
271 *list
= thread
->next
;
274 /* Fix links to jump over the removed entry. */
275 thread
->prev
->next
= thread
->next
;
276 thread
->next
->prev
= thread
->prev
;
279 /* Compiler trick: Don't declare as static to prevent putting
280 * function in IRAM. */
281 void check_sleepers(void)
283 struct thread_entry
*current
, *next
;
285 /* Check sleeping threads. */
286 current
= cores
[CURRENT_CORE
].sleeping
;
292 next
= current
->next
;
294 if ((unsigned)current_tick
>= GET_STATE_ARG(current
->statearg
))
296 /* Sleep timeout has been reached so bring the thread
297 * back to life again. */
298 remove_from_list(&cores
[CURRENT_CORE
].sleeping
, current
);
299 add_to_list(&cores
[CURRENT_CORE
].running
, current
);
301 /* If there is no more processes in the list, break the loop. */
302 if (cores
[CURRENT_CORE
].sleeping
== NULL
)
311 /* Break the loop once we have walked through the list of all
312 * sleeping processes. */
313 if (current
== cores
[CURRENT_CORE
].sleeping
)
318 static inline void sleep_core(void)
320 static long last_tick
= 0;
324 if (last_tick
!= current_tick
)
327 last_tick
= current_tick
;
330 /* We must sleep until there is at least one process in the list
331 * of running processes. */
332 if (cores
[CURRENT_CORE
].running
!= NULL
)
335 /* Enter sleep mode to reduce power usage, woken up on interrupt */
337 asm volatile ("stop #0x2000");
338 #elif CONFIG_CPU == SH7034
340 asm volatile ("sleep");
341 #elif CONFIG_CPU == PP5020
342 /* This should sleep the CPU. It appears to wake by itself on
344 CPU_CTL
= 0x80000000;
345 #elif CONFIG_CPU == TCC730
346 /* Sleep mode is triggered by the SYS instr on CalmRisc16.
347 * Unfortunately, the manual doesn't specify which arg to use.
348 __asm__ volatile ("sys #0x0f");
349 0x1f seems to trigger a reset;
350 0x0f is the only one other argument used by Archos.
352 #elif CONFIG_CPU == S3C2440
359 static int get_threadnum(struct thread_entry
*thread
)
363 for (i
= 0; i
< MAXTHREADS
; i
++)
365 if (&cores
[CURRENT_CORE
].threads
[i
] == thread
)
372 void profile_thread(void) {
373 profstart(get_threadnum(cores
[CURRENT_CORE
].running
));
377 /* Compiler trick: Don't declare as static to prevent putting
378 * function in IRAM. */
379 void change_thread_state(struct thread_entry
**blocked_list
)
381 struct thread_entry
*old
;
383 /* Remove the thread from the list of running threads. */
384 old
= cores
[CURRENT_CORE
].running
;
385 remove_from_list(&cores
[CURRENT_CORE
].running
, old
);
387 /* And put the thread into a new list of inactive threads. */
388 if (GET_STATE(old
->statearg
) == STATE_BLOCKED
)
389 add_to_list(blocked_list
, old
);
391 add_to_list(&cores
[CURRENT_CORE
].sleeping
, old
);
393 #ifdef HAVE_PRIORITY_SCHEDULING
394 /* Reset priorities */
395 if (old
->priority
== highest_priority
)
396 highest_priority
= 100;
400 /*---------------------------------------------------------------------------
401 * Switch thread in round robin fashion.
402 *---------------------------------------------------------------------------
404 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
407 profile_thread_stopped(get_threadnum(cores
[CURRENT_CORE
].running
));
409 unsigned int *stackptr
;
415 /* Begin task switching by saving our current context so that we can
416 * restore the state of the current thread later to the point prior
420 store_context(&cores
[CURRENT_CORE
].running
->context
);
422 # if CONFIG_CPU != TCC730
423 /* Check if the current thread stack is overflown */
424 stackptr
= cores
[CURRENT_CORE
].running
->stack
;
425 if(stackptr
[0] != DEADBEEF
)
426 panicf("Stkov %s", cores
[CURRENT_CORE
].running
->name
);
429 /* Check if a thread state change has been requested. */
430 if (cores
[CURRENT_CORE
].running
->statearg
)
432 /* Change running thread state and switch to next thread. */
433 change_thread_state(blocked_list
);
437 /* Switch to the next running thread. */
438 cores
[CURRENT_CORE
].running
= cores
[CURRENT_CORE
].running
->next
;
442 /* Go through the list of sleeping task to check if we need to wake up
443 * any of them due to timeout. Also puts core into sleep state until
444 * there is at least one running process again. */
447 #ifdef HAVE_PRIORITY_SCHEDULING
448 /* Select the new task based on priorities and the last time a process
452 int priority
= cores
[CURRENT_CORE
].running
->priority
;
454 if (priority
< highest_priority
)
455 highest_priority
= priority
;
457 if (priority
== highest_priority
|| (current_tick
458 - cores
[CURRENT_CORE
].running
->last_run
> priority
* 8))
462 cores
[CURRENT_CORE
].running
= cores
[CURRENT_CORE
].running
->next
;
465 /* Reset the value of thread's last running time to the current time. */
466 cores
[CURRENT_CORE
].running
->last_run
= current_tick
;
470 /* And finally give control to the next thread. */
471 load_context(&cores
[CURRENT_CORE
].running
->context
);
474 profile_thread_started(get_threadnum(cores
[CURRENT_CORE
].running
));
478 void sleep_thread(int ticks
)
480 /* Set the thread's new state and timeout and finally force a task switch
481 * so that scheduler removes thread from the list of running processes
482 * and puts it in list of sleeping tasks. */
483 cores
[CURRENT_CORE
].running
->statearg
=
484 SET_STATE(STATE_SLEEPING
, current_tick
+ ticks
+ 1);
485 switch_thread(true, NULL
);
487 /* Clear all flags to indicate we are up and running again. */
488 cores
[CURRENT_CORE
].running
->statearg
= 0;
491 void block_thread(struct thread_entry
**list
, int timeout
)
493 struct thread_entry
*current
;
495 /* Get the entry for the current running thread. */
496 current
= cores
[CURRENT_CORE
].running
;
498 /* At next task switch scheduler will immediately change the thread
499 * state (and we also force the task switch to happen). */
502 #ifdef THREAD_EXTRA_CHECKS
503 /* We can store only one thread to the "list" if thread is used
504 * in other list (such as core's list for sleeping tasks). */
506 panicf("Blocking violation T->*B");
510 SET_STATE(STATE_BLOCKED_W_TMO
, current_tick
+ timeout
);
513 /* Now force a task switch and block until we have been woken up
514 * by another thread or timeout is reached. */
515 switch_thread(true, NULL
);
517 /* If timeout is reached, we must set list back to NULL here. */
522 #ifdef THREAD_EXTRA_CHECKS
523 /* We are not allowed to mix blocking types in one queue. */
524 if (*list
&& GET_STATE((*list
)->statearg
) == STATE_BLOCKED_W_TMO
)
525 panicf("Blocking violation B->*T");
528 current
->statearg
= SET_STATE(STATE_BLOCKED
, 0);
530 /* Now force a task switch and block until we have been woken up
531 * by another thread or timeout is reached. */
532 switch_thread(true, list
);
535 /* Clear all flags to indicate we are up and running again. */
536 current
->statearg
= 0;
539 void wakeup_thread(struct thread_entry
**list
)
541 struct thread_entry
*thread
;
543 /* Check if there is a blocked thread at all. */
547 /* Wake up the last thread first. */
550 /* Determine thread's current state. */
551 switch (GET_STATE(thread
->statearg
))
554 /* Remove thread from the list of blocked threads and add it
555 * to the scheduler's list of running processes. */
556 remove_from_list(list
, thread
);
557 add_to_list(&cores
[CURRENT_CORE
].running
, thread
);
558 thread
->statearg
= 0;
561 case STATE_BLOCKED_W_TMO
:
562 /* Just remove the timeout to cause scheduler to immediately
563 * wake up the thread. */
564 thread
->statearg
&= 0xC0000000;
569 /* Nothing to do. Thread has already been woken up
570 * or it's state is not blocked or blocked with timeout. */
575 /*---------------------------------------------------------------------------
576 * Create thread on the current core.
577 * Return ID if context area could be allocated, else -1.
578 *---------------------------------------------------------------------------
581 create_thread(void (*function
)(void), void* stack
, int stack_size
,
582 const char *name
IF_PRIO(, int priority
))
584 return create_thread_on_core(CURRENT_CORE
, function
, stack
, stack_size
,
585 name
IF_PRIO(, priority
));
588 /*---------------------------------------------------------------------------
589 * Create thread on a specific core.
590 * Return ID if context area could be allocated, else -1.
591 *---------------------------------------------------------------------------
594 create_thread_on_core(unsigned int core
, void (*function
)(void),
595 void* stack
, int stack_size
,
596 const char *name
IF_PRIO(, int priority
))
599 unsigned int stacklen
;
600 unsigned int *stackptr
;
603 struct thread_entry
*thread
;
605 for (n
= 0; n
< MAXTHREADS
; n
++)
607 if (cores
[core
].threads
[n
].name
== NULL
)
615 /* Munge the stack to make it easy to spot stack overflows */
616 stacklen
= stack_size
/ sizeof(int);
618 for(i
= 0;i
< stacklen
;i
++)
620 stackptr
[i
] = DEADBEEF
;
623 /* Store interesting information */
624 thread
= &cores
[core
].threads
[n
];
626 thread
->stack
= stack
;
627 thread
->stack_size
= stack_size
;
628 thread
->statearg
= 0;
629 #ifdef HAVE_PRIORITY_SCHEDULING
630 thread
->priority
= priority
;
631 highest_priority
= 100;
633 add_to_list(&cores
[core
].running
, thread
);
635 regs
= &thread
->context
;
636 #if defined(CPU_COLDFIRE) || (CONFIG_CPU == SH7034) || defined(CPU_ARM)
637 /* Align stack to an even 32 bit boundary */
638 regs
->sp
= (void*)(((unsigned int)stack
+ stack_size
) & ~3);
639 #elif CONFIG_CPU == TCC730
640 /* Align stack on word boundary */
641 regs
->sp
= (void*)(((unsigned long)stack
+ stack_size
- 2) & ~1);
644 regs
->start
= (void*)function
;
649 /*---------------------------------------------------------------------------
650 * Remove a thread on the current core from the scheduler.
651 * Parameter is the ID as returned from create_thread().
652 *---------------------------------------------------------------------------
654 void remove_thread(struct thread_entry
*thread
)
657 thread
= cores
[CURRENT_CORE
].running
;
659 /* Free the entry by removing thread name. */
661 #ifdef HAVE_PRIORITY_SCHEDULING
662 highest_priority
= 100;
665 if (thread
== cores
[CURRENT_CORE
].running
)
667 remove_from_list(&cores
[CURRENT_CORE
].running
, thread
);
668 switch_thread(false, NULL
);
672 if (thread
== cores
[CURRENT_CORE
].sleeping
)
673 remove_from_list(&cores
[CURRENT_CORE
].sleeping
, thread
);
675 remove_from_list(NULL
, thread
);
678 #ifdef HAVE_PRIORITY_SCHEDULING
679 void thread_set_priority(struct thread_entry
*thread
, int priority
)
682 thread
= cores
[CURRENT_CORE
].running
;
684 thread
->priority
= priority
;
685 highest_priority
= 100;
689 void init_threads(void)
691 unsigned int core
= CURRENT_CORE
;
693 memset(cores
, 0, sizeof cores
);
694 cores
[core
].sleeping
= NULL
;
695 cores
[core
].running
= NULL
;
696 cores
[core
].threads
[0].name
= main_thread_name
;
697 cores
[core
].threads
[0].statearg
= 0;
698 #ifdef HAVE_PRIORITY_SCHEDULING
699 cores
[core
].threads
[0].priority
= PRIORITY_USER_INTERFACE
;
700 highest_priority
= 100;
702 add_to_list(&cores
[core
].running
, &cores
[core
].threads
[0]);
704 /* In multiple core setups, each core has a different stack. There is probably
705 a much better way to do this. */
708 cores
[CPU
].threads
[0].stack
= stackbegin
;
709 cores
[CPU
].threads
[0].stack_size
= (int)stackend
- (int)stackbegin
;
711 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
712 cores
[COP
].threads
[0].stack
= cop_stackbegin
;
713 cores
[COP
].threads
[0].stack_size
= (int)cop_stackend
- (int)cop_stackbegin
;
716 #if CONFIG_CPU == TCC730
717 cores
[core
].threads
[0].context
.started
= 1;
719 cores
[core
].threads
[0].context
.start
= 0; /* thread 0 already running */
723 int thread_stack_usage(const struct thread_entry
*thread
)
726 unsigned int *stackptr
= thread
->stack
;
728 for (i
= 0;i
< thread
->stack_size
/sizeof(int);i
++)
730 if (stackptr
[i
] != DEADBEEF
)
734 return ((thread
->stack_size
- i
* sizeof(int)) * 100) /
738 int thread_get_status(const struct thread_entry
*thread
)
740 return GET_STATE(thread
->statearg
);