1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
31 #define DEADBEEF ((unsigned int)0xdeadbeef)
32 /* Cast to the the machine int type, whose size could be < 4. */
34 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
35 #ifdef HAVE_PRIORITY_SCHEDULING
36 static unsigned short highest_priority IBSS_ATTR
;
38 #ifdef HAVE_SCHEDULER_BOOSTCTRL
39 static bool cpu_boosted IBSS_ATTR
;
42 /* Define to enable additional checks for blocking violations etc. */
43 // #define THREAD_EXTRA_CHECKS
45 static const char main_thread_name
[] = "main";
47 extern int stackbegin
[];
48 extern int stackend
[];
52 extern int cop_stackbegin
[];
53 extern int cop_stackend
[];
55 /* The coprocessor stack is not set up in the bootloader code, but the
56 threading is. No threads are run on the coprocessor, so set up some dummy
58 int *cop_stackbegin
= stackbegin
;
59 int *cop_stackend
= stackend
;
64 static void add_to_list(struct thread_entry **list,
65 struct thread_entry *thread) ICODE_ATTR;
66 static void remove_from_list(struct thread_entry **list,
67 struct thread_entry *thread) ICODE_ATTR;
70 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
73 static inline void store_context(void* addr
) __attribute__ ((always_inline
));
74 static inline void load_context(const void* addr
) __attribute__ ((always_inline
));
77 /*---------------------------------------------------------------------------
78 * Store non-volatile context.
79 *---------------------------------------------------------------------------
81 static inline void store_context(void* addr
)
84 "stmia %0, { r4-r11, sp, lr }\n"
89 /*---------------------------------------------------------------------------
90 * Load non-volatile context.
91 *---------------------------------------------------------------------------
93 static inline void load_context(const void* addr
)
96 "ldmia %0, { r4-r11, sp, lr }\n" /* load regs r4 to r14 from context */
97 "ldr r0, [%0, #40] \n" /* load start pointer */
99 "cmp r0, r1 \n" /* check for NULL */
100 "strne r1, [%0, #40] \n" /* if it's NULL, we're already running */
101 "movne pc, r0 \n" /* not already running, so jump to start */
102 : : "r" (addr
) : "r0", "r1"
106 #elif defined(CPU_COLDFIRE)
107 /*---------------------------------------------------------------------------
108 * Store non-volatile context.
109 *---------------------------------------------------------------------------
111 static inline void store_context(void* addr
)
114 "move.l %%macsr,%%d0 \n"
115 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
116 : : "a" (addr
) : "d0" /* only! */
120 /*---------------------------------------------------------------------------
121 * Load non-volatile context.
122 *---------------------------------------------------------------------------
124 static inline void load_context(const void* addr
)
127 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
128 "move.l %%d0,%%macsr \n"
129 "move.l (52,%0),%%d0 \n" /* Get start address */
130 "beq.b .running \n" /* NULL -> already running */
131 "clr.l (52,%0) \n" /* Clear start address.. */
133 "jmp (%0) \n" /* ..and start the thread */
135 : : "a" (addr
) : "d0" /* only! */
139 #elif CONFIG_CPU == SH7034
140 /*---------------------------------------------------------------------------
141 * Store non-volatile context.
142 *---------------------------------------------------------------------------
144 static inline void store_context(void* addr
)
161 /*---------------------------------------------------------------------------
162 * Load non-volatile context.
163 *---------------------------------------------------------------------------
165 static inline void load_context(const void* addr
)
177 "mov.l @%0,r0 \n" /* Get start address */
179 "bt .running \n" /* NULL -> already running */
182 "rts \n" /* Start the thread */
183 "mov.l r0,@%0 \n" /* Clear start address */
185 : : "r" (addr
) : "r0" /* only! */
189 #elif CONFIG_CPU == TCC730
190 /*---------------------------------------------------------------------------
191 * Store non-volatile context.
192 *---------------------------------------------------------------------------
194 #define store_context(addr) \
204 "ldw @[%0+0], a15\n\t" : : "a" (addr) );
206 /*---------------------------------------------------------------------------
207 * Load non-volatile context.
208 *---------------------------------------------------------------------------
210 #define load_context(addr) \
212 if (!(addr)->started) { \
213 (addr)->started = 1; \
215 "ldw a15, @[%0+0]\n\t" \
216 "ldw a14, @[%0+4]\n\t" \
217 "jmp a14\n\t" : : "a" (addr) \
221 "ldw a15, @[%0+0]\n\t" \
229 "pop r1,r0\n\t" : : "a" (addr) \
236 static void add_to_list(struct thread_entry
**list
,
237 struct thread_entry
*thread
)
241 thread
->next
= thread
;
242 thread
->prev
= thread
;
248 thread
->next
= *list
;
249 thread
->prev
= (*list
)->prev
;
250 thread
->prev
->next
= thread
;
251 (*list
)->prev
= thread
;
254 thread->next = (*list)->next;
255 thread->prev = *list;
256 thread->next->prev = thread;
257 (*list)->next = thread;
262 static void remove_from_list(struct thread_entry
**list
,
263 struct thread_entry
*thread
)
267 if (thread
== thread
->next
)
274 *list
= thread
->next
;
277 /* Fix links to jump over the removed entry. */
278 thread
->prev
->next
= thread
->next
;
279 thread
->next
->prev
= thread
->prev
;
282 /* Compiler trick: Don't declare as static to prevent putting
283 * function in IRAM. */
284 void check_sleepers(void)
286 struct thread_entry
*current
, *next
;
288 /* Check sleeping threads. */
289 current
= cores
[CURRENT_CORE
].sleeping
;
295 next
= current
->next
;
297 if ((unsigned)current_tick
>= GET_STATE_ARG(current
->statearg
))
299 /* Sleep timeout has been reached so bring the thread
300 * back to life again. */
301 remove_from_list(&cores
[CURRENT_CORE
].sleeping
, current
);
302 add_to_list(&cores
[CURRENT_CORE
].running
, current
);
304 /* If there is no more processes in the list, break the loop. */
305 if (cores
[CURRENT_CORE
].sleeping
== NULL
)
314 /* Break the loop once we have walked through the list of all
315 * sleeping processes. */
316 if (current
== cores
[CURRENT_CORE
].sleeping
)
321 static inline void sleep_core(void)
323 static long last_tick
= 0;
327 if (last_tick
!= current_tick
)
330 last_tick
= current_tick
;
333 /* We must sleep until there is at least one process in the list
334 * of running processes. */
335 if (cores
[CURRENT_CORE
].running
!= NULL
)
338 #ifdef HAVE_SCHEDULER_BOOSTCTRL
346 /* Enter sleep mode to reduce power usage, woken up on interrupt */
348 asm volatile ("stop #0x2000");
349 #elif CONFIG_CPU == SH7034
351 asm volatile ("sleep");
352 #elif CONFIG_CPU == PP5020
353 /* This should sleep the CPU. It appears to wake by itself on
355 CPU_CTL
= 0x80000000;
356 #elif CONFIG_CPU == TCC730
357 /* Sleep mode is triggered by the SYS instr on CalmRisc16.
358 * Unfortunately, the manual doesn't specify which arg to use.
359 __asm__ volatile ("sys #0x0f");
360 0x1f seems to trigger a reset;
361 0x0f is the only one other argument used by Archos.
363 #elif CONFIG_CPU == S3C2440
370 static int get_threadnum(struct thread_entry
*thread
)
374 for (i
= 0; i
< MAXTHREADS
; i
++)
376 if (&cores
[CURRENT_CORE
].threads
[i
] == thread
)
383 void profile_thread(void) {
384 profstart(get_threadnum(cores
[CURRENT_CORE
].running
));
388 /* Compiler trick: Don't declare as static to prevent putting
389 * function in IRAM. */
390 void change_thread_state(struct thread_entry
**blocked_list
)
392 struct thread_entry
*old
;
394 /* Remove the thread from the list of running threads. */
395 old
= cores
[CURRENT_CORE
].running
;
396 remove_from_list(&cores
[CURRENT_CORE
].running
, old
);
398 /* And put the thread into a new list of inactive threads. */
399 if (GET_STATE(old
->statearg
) == STATE_BLOCKED
)
400 add_to_list(blocked_list
, old
);
402 add_to_list(&cores
[CURRENT_CORE
].sleeping
, old
);
404 #ifdef HAVE_PRIORITY_SCHEDULING
405 /* Reset priorities */
406 if (old
->priority
== highest_priority
)
407 highest_priority
= 100;
411 /*---------------------------------------------------------------------------
412 * Switch thread in round robin fashion.
413 *---------------------------------------------------------------------------
415 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
418 profile_thread_stopped(get_threadnum(cores
[CURRENT_CORE
].running
));
420 unsigned int *stackptr
;
426 /* Begin task switching by saving our current context so that we can
427 * restore the state of the current thread later to the point prior
431 store_context(&cores
[CURRENT_CORE
].running
->context
);
433 # if CONFIG_CPU != TCC730
434 /* Check if the current thread stack is overflown */
435 stackptr
= cores
[CURRENT_CORE
].running
->stack
;
436 if(stackptr
[0] != DEADBEEF
)
437 panicf("Stkov %s", cores
[CURRENT_CORE
].running
->name
);
440 /* Check if a thread state change has been requested. */
441 if (cores
[CURRENT_CORE
].running
->statearg
)
443 /* Change running thread state and switch to next thread. */
444 change_thread_state(blocked_list
);
448 /* Switch to the next running thread. */
449 cores
[CURRENT_CORE
].running
= cores
[CURRENT_CORE
].running
->next
;
453 /* Go through the list of sleeping task to check if we need to wake up
454 * any of them due to timeout. Also puts core into sleep state until
455 * there is at least one running process again. */
458 #ifdef HAVE_PRIORITY_SCHEDULING
459 /* Select the new task based on priorities and the last time a process
463 int priority
= cores
[CURRENT_CORE
].running
->priority
;
465 if (priority
< highest_priority
)
466 highest_priority
= priority
;
468 if (priority
== highest_priority
|| (current_tick
469 - cores
[CURRENT_CORE
].running
->last_run
> priority
* 8))
473 cores
[CURRENT_CORE
].running
= cores
[CURRENT_CORE
].running
->next
;
476 /* Reset the value of thread's last running time to the current time. */
477 cores
[CURRENT_CORE
].running
->last_run
= current_tick
;
481 /* And finally give control to the next thread. */
482 load_context(&cores
[CURRENT_CORE
].running
->context
);
485 profile_thread_started(get_threadnum(cores
[CURRENT_CORE
].running
));
489 void sleep_thread(int ticks
)
491 /* Set the thread's new state and timeout and finally force a task switch
492 * so that scheduler removes thread from the list of running processes
493 * and puts it in list of sleeping tasks. */
494 cores
[CURRENT_CORE
].running
->statearg
=
495 SET_STATE(STATE_SLEEPING
, current_tick
+ ticks
+ 1);
496 switch_thread(true, NULL
);
498 /* Clear all flags to indicate we are up and running again. */
499 cores
[CURRENT_CORE
].running
->statearg
= 0;
502 void block_thread(struct thread_entry
**list
, int timeout
)
504 struct thread_entry
*current
;
506 /* Get the entry for the current running thread. */
507 current
= cores
[CURRENT_CORE
].running
;
509 /* At next task switch scheduler will immediately change the thread
510 * state (and we also force the task switch to happen). */
513 #ifdef THREAD_EXTRA_CHECKS
514 /* We can store only one thread to the "list" if thread is used
515 * in other list (such as core's list for sleeping tasks). */
517 panicf("Blocking violation T->*B");
521 SET_STATE(STATE_BLOCKED_W_TMO
, current_tick
+ timeout
);
524 /* Now force a task switch and block until we have been woken up
525 * by another thread or timeout is reached. */
526 switch_thread(true, NULL
);
528 /* If timeout is reached, we must set list back to NULL here. */
533 #ifdef THREAD_EXTRA_CHECKS
534 /* We are not allowed to mix blocking types in one queue. */
535 if (*list
&& GET_STATE((*list
)->statearg
) == STATE_BLOCKED_W_TMO
)
536 panicf("Blocking violation B->*T");
539 current
->statearg
= SET_STATE(STATE_BLOCKED
, 0);
541 /* Now force a task switch and block until we have been woken up
542 * by another thread or timeout is reached. */
543 switch_thread(true, list
);
546 /* Clear all flags to indicate we are up and running again. */
547 current
->statearg
= 0;
550 void wakeup_thread(struct thread_entry
**list
)
552 struct thread_entry
*thread
;
554 /* Check if there is a blocked thread at all. */
558 /* Wake up the last thread first. */
561 /* Determine thread's current state. */
562 switch (GET_STATE(thread
->statearg
))
565 /* Remove thread from the list of blocked threads and add it
566 * to the scheduler's list of running processes. */
567 remove_from_list(list
, thread
);
568 add_to_list(&cores
[CURRENT_CORE
].running
, thread
);
569 thread
->statearg
= 0;
572 case STATE_BLOCKED_W_TMO
:
573 /* Just remove the timeout to cause scheduler to immediately
574 * wake up the thread. */
575 thread
->statearg
&= 0xC0000000;
580 /* Nothing to do. Thread has already been woken up
581 * or it's state is not blocked or blocked with timeout. */
586 /*---------------------------------------------------------------------------
587 * Create thread on the current core.
588 * Return ID if context area could be allocated, else -1.
589 *---------------------------------------------------------------------------
592 create_thread(void (*function
)(void), void* stack
, int stack_size
,
593 const char *name
IF_PRIO(, int priority
))
595 return create_thread_on_core(CURRENT_CORE
, function
, stack
, stack_size
,
596 name
IF_PRIO(, priority
));
599 /*---------------------------------------------------------------------------
600 * Create thread on a specific core.
601 * Return ID if context area could be allocated, else -1.
602 *---------------------------------------------------------------------------
605 create_thread_on_core(unsigned int core
, void (*function
)(void),
606 void* stack
, int stack_size
,
607 const char *name
IF_PRIO(, int priority
))
610 unsigned int stacklen
;
611 unsigned int *stackptr
;
614 struct thread_entry
*thread
;
616 for (n
= 0; n
< MAXTHREADS
; n
++)
618 if (cores
[core
].threads
[n
].name
== NULL
)
626 /* Munge the stack to make it easy to spot stack overflows */
627 stacklen
= stack_size
/ sizeof(int);
629 for(i
= 0;i
< stacklen
;i
++)
631 stackptr
[i
] = DEADBEEF
;
634 /* Store interesting information */
635 thread
= &cores
[core
].threads
[n
];
637 thread
->stack
= stack
;
638 thread
->stack_size
= stack_size
;
639 thread
->statearg
= 0;
640 #ifdef HAVE_PRIORITY_SCHEDULING
641 thread
->priority
= priority
;
642 highest_priority
= 100;
644 add_to_list(&cores
[core
].running
, thread
);
646 regs
= &thread
->context
;
647 #if defined(CPU_COLDFIRE) || (CONFIG_CPU == SH7034) || defined(CPU_ARM)
648 /* Align stack to an even 32 bit boundary */
649 regs
->sp
= (void*)(((unsigned int)stack
+ stack_size
) & ~3);
650 #elif CONFIG_CPU == TCC730
651 /* Align stack on word boundary */
652 regs
->sp
= (void*)(((unsigned long)stack
+ stack_size
- 2) & ~1);
655 regs
->start
= (void*)function
;
660 #ifdef HAVE_SCHEDULER_BOOSTCTRL
661 void trigger_cpu_boost(void)
671 /*---------------------------------------------------------------------------
672 * Remove a thread on the current core from the scheduler.
673 * Parameter is the ID as returned from create_thread().
674 *---------------------------------------------------------------------------
676 void remove_thread(struct thread_entry
*thread
)
679 thread
= cores
[CURRENT_CORE
].running
;
681 /* Free the entry by removing thread name. */
683 #ifdef HAVE_PRIORITY_SCHEDULING
684 highest_priority
= 100;
687 if (thread
== cores
[CURRENT_CORE
].running
)
689 remove_from_list(&cores
[CURRENT_CORE
].running
, thread
);
690 switch_thread(false, NULL
);
694 if (thread
== cores
[CURRENT_CORE
].sleeping
)
695 remove_from_list(&cores
[CURRENT_CORE
].sleeping
, thread
);
697 remove_from_list(NULL
, thread
);
700 #ifdef HAVE_PRIORITY_SCHEDULING
701 int thread_set_priority(struct thread_entry
*thread
, int priority
)
706 thread
= cores
[CURRENT_CORE
].running
;
708 old_priority
= thread
->priority
;
709 thread
->priority
= priority
;
710 highest_priority
= 100;
716 void init_threads(void)
718 unsigned int core
= CURRENT_CORE
;
720 memset(cores
, 0, sizeof cores
);
721 cores
[core
].sleeping
= NULL
;
722 cores
[core
].running
= NULL
;
723 cores
[core
].threads
[0].name
= main_thread_name
;
724 cores
[core
].threads
[0].statearg
= 0;
725 #ifdef HAVE_PRIORITY_SCHEDULING
726 cores
[core
].threads
[0].priority
= PRIORITY_USER_INTERFACE
;
727 highest_priority
= 100;
729 #ifdef HAVE_SCHEDULER_BOOSTCTRL
732 add_to_list(&cores
[core
].running
, &cores
[core
].threads
[0]);
734 /* In multiple core setups, each core has a different stack. There is probably
735 a much better way to do this. */
738 cores
[CPU
].threads
[0].stack
= stackbegin
;
739 cores
[CPU
].threads
[0].stack_size
= (int)stackend
- (int)stackbegin
;
741 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
742 cores
[COP
].threads
[0].stack
= cop_stackbegin
;
743 cores
[COP
].threads
[0].stack_size
= (int)cop_stackend
- (int)cop_stackbegin
;
746 #if CONFIG_CPU == TCC730
747 cores
[core
].threads
[0].context
.started
= 1;
749 cores
[core
].threads
[0].context
.start
= 0; /* thread 0 already running */
753 int thread_stack_usage(const struct thread_entry
*thread
)
756 unsigned int *stackptr
= thread
->stack
;
758 for (i
= 0;i
< thread
->stack_size
/sizeof(int);i
++)
760 if (stackptr
[i
] != DEADBEEF
)
764 return ((thread
->stack_size
- i
* sizeof(int)) * 100) /
768 int thread_get_status(const struct thread_entry
*thread
)
770 return GET_STATE(thread
->statearg
);