MP4: Fix a sign error preventing some files from playing (e.g., if tagged by MusicBra...
[kugel-rb.git] / firmware / thread.c
blobb8bfeb4ef348494a4c6ed533b7164d8216051e56
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 #if NUM_CORES > 1
33 # define IF_COP2(x) x
34 #else
35 # define IF_COP2(x) CURRENT_CORE
36 #endif
38 #define DEADBEEF ((unsigned int)0xdeadbeef)
39 /* Cast to the the machine int type, whose size could be < 4. */
41 struct core_entry cores[NUM_CORES] IBSS_ATTR;
42 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
43 #ifdef HAVE_SCHEDULER_BOOSTCTRL
44 static int boosted_threads IBSS_ATTR;
45 #endif
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS
50 static const char main_thread_name[] = "main";
52 extern int stackbegin[];
53 extern int stackend[];
55 #ifdef CPU_PP
56 #ifndef BOOTLOADER
57 extern int cop_stackbegin[];
58 extern int cop_stackend[];
59 #else
60 /* The coprocessor stack is not set up in the bootloader code, but the threading
61 * is. No threads are run on the coprocessor, so set up some dummy stack */
62 int *cop_stackbegin = stackbegin;
63 int *cop_stackend = stackend;
64 #endif
65 #endif
67 #if NUM_CORES > 1
68 #if 0
69 static long cores_locked IBSS_ATTR;
71 #define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
72 #define UNLOCK(...) cores_locked = 0
73 #endif
75 /* #warning "Core locking mechanism should be fixed on H10/4G!" */
77 inline void lock_cores(void)
79 #if 0
80 if (!cores[CURRENT_CORE].lock_issued)
82 LOCK();
83 cores[CURRENT_CORE].lock_issued = true;
85 #endif
88 inline void unlock_cores(void)
90 #if 0
91 if (cores[CURRENT_CORE].lock_issued)
93 cores[CURRENT_CORE].lock_issued = false;
94 UNLOCK();
96 #endif
99 #endif
101 /* Conserve IRAM
102 static void add_to_list(struct thread_entry **list,
103 struct thread_entry *thread) ICODE_ATTR;
104 static void remove_from_list(struct thread_entry **list,
105 struct thread_entry *thread) ICODE_ATTR;
108 void switch_thread(bool save_context, struct thread_entry **blocked_list)
109 ICODE_ATTR;
111 static inline void store_context(void* addr) __attribute__ ((always_inline));
112 static inline void load_context(const void* addr)
113 __attribute__ ((always_inline));
114 static inline void core_sleep(void) __attribute__((always_inline));
116 #if defined(CPU_ARM)
117 /*---------------------------------------------------------------------------
118 * Store non-volatile context.
119 *---------------------------------------------------------------------------
121 static inline void store_context(void* addr)
123 asm volatile(
124 "stmia %0, { r4-r11, sp, lr }\n"
125 : : "r" (addr)
129 /*---------------------------------------------------------------------------
130 * Load non-volatile context.
131 *---------------------------------------------------------------------------
133 static void start_thread(void (*thread_func)(void), const void* addr) __attribute__((naked,used));
134 static void start_thread(void (*thread_func)(void), const void* addr)
136 /* r0 = thread_func, r1 = addr */
137 #if NUM_CORES > 1 && CONFIG_CPU != PP5002
138 asm volatile (
139 "mov r2, #0 \n"
140 "str r2, [r1, #40] \n"
141 "ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
142 "ldr r2, [r1] \n"
143 "orr r2, r2, #6 \n"
144 "str r2, [r1] \n"
145 "ldr r1, =0x6000c000 \n"
146 "1: \n"
147 "ldr r2, [r1] \n"
148 "tst r2, #0x8000 \n"
149 "bne 1b \n"
150 "mov pc, r0 \n"
151 : : : "r1", "r2"
153 #else
154 asm volatile (
155 "mov r2, #0 \n"
156 "str r2, [r1, #40] \n"
157 "mov pc, r0 \n"
158 : : : "r1", "r2"
160 #endif
161 (void)thread_func;
162 (void)addr;
165 static inline void load_context(const void* addr)
167 asm volatile(
168 "ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */
169 "ldr r0, [%0, #40] \n" /* load start pointer */
170 "cmp r0, #0 \n" /* check for NULL */
171 "movne r1, %0 \n" /* if not already running, jump to start */
172 "ldrne pc, =start_thread \n"
173 : : "r" (addr) : "r0", "r1"
177 #if defined (CPU_PP)
178 static inline void core_sleep(void)
180 unlock_cores();
182 /* This should sleep the CPU. It appears to wake by itself on
183 interrupts */
184 if (CURRENT_CORE == CPU)
185 CPU_CTL = PROC_SLEEP;
186 else
187 COP_CTL = PROC_SLEEP;
189 lock_cores();
191 #elif CONFIG_CPU == S3C2440
192 static inline void core_sleep(void)
194 int i;
195 CLKCON |= (1 << 2); /* set IDLE bit */
196 for(i=0; i<10; i++); /* wait for IDLE */
197 CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
199 #endif
201 #elif defined(CPU_COLDFIRE)
202 /*---------------------------------------------------------------------------
203 * Store non-volatile context.
204 *---------------------------------------------------------------------------
206 static inline void store_context(void* addr)
208 asm volatile (
209 "move.l %%macsr,%%d0 \n"
210 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
211 : : "a" (addr) : "d0" /* only! */
215 /*---------------------------------------------------------------------------
216 * Load non-volatile context.
217 *---------------------------------------------------------------------------
219 static inline void load_context(const void* addr)
221 asm volatile (
222 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
223 "move.l %%d0,%%macsr \n"
224 "move.l (52,%0),%%d0 \n" /* Get start address */
225 "beq.b 1f \n" /* NULL -> already running */
226 "clr.l (52,%0) \n" /* Clear start address.. */
227 "move.l %%d0,%0 \n"
228 "jmp (%0) \n" /* ..and start the thread */
229 "1: \n"
230 : : "a" (addr) : "d0" /* only! */
234 static inline void core_sleep(void)
236 asm volatile ("stop #0x2000");
239 /* Set EMAC unit to fractional mode with saturation for each new thread,
240 since that's what'll be the most useful for most things which the dsp
241 will do. Codecs should still initialize their preferred modes
242 explicitly. */
243 #define THREAD_CPU_INIT(core, thread) \
244 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
246 #elif CONFIG_CPU == SH7034
247 /*---------------------------------------------------------------------------
248 * Store non-volatile context.
249 *---------------------------------------------------------------------------
251 static inline void store_context(void* addr)
253 asm volatile (
254 "add #36,%0 \n"
255 "sts.l pr, @-%0 \n"
256 "mov.l r15,@-%0 \n"
257 "mov.l r14,@-%0 \n"
258 "mov.l r13,@-%0 \n"
259 "mov.l r12,@-%0 \n"
260 "mov.l r11,@-%0 \n"
261 "mov.l r10,@-%0 \n"
262 "mov.l r9, @-%0 \n"
263 "mov.l r8, @-%0 \n"
264 : : "r" (addr)
268 /*---------------------------------------------------------------------------
269 * Load non-volatile context.
270 *---------------------------------------------------------------------------
272 static inline void load_context(const void* addr)
274 asm volatile (
275 "mov.l @%0+,r8 \n"
276 "mov.l @%0+,r9 \n"
277 "mov.l @%0+,r10 \n"
278 "mov.l @%0+,r11 \n"
279 "mov.l @%0+,r12 \n"
280 "mov.l @%0+,r13 \n"
281 "mov.l @%0+,r14 \n"
282 "mov.l @%0+,r15 \n"
283 "lds.l @%0+,pr \n"
284 "mov.l @%0,r0 \n" /* Get start address */
285 "tst r0,r0 \n"
286 "bt .running \n" /* NULL -> already running */
287 "lds r0,pr \n"
288 "mov #0,r0 \n"
289 "rts \n" /* Start the thread */
290 "mov.l r0,@%0 \n" /* Clear start address */
291 ".running: \n"
292 : : "r" (addr) : "r0" /* only! */
296 static inline void core_sleep(void)
298 and_b(0x7F, &SBYCR);
299 asm volatile ("sleep");
302 #endif
304 #ifndef THREAD_CPU_INIT
305 /* No cpu specific init - make empty */
306 #define THREAD_CPU_INIT(core, thread)
307 #endif
309 #ifdef THREAD_EXTRA_CHECKS
310 static void thread_panicf_format_name(char *buffer, struct thread_entry *thread)
312 *buffer = '\0';
313 if (thread)
315 /* Display thread name if one or ID if none */
316 const char *fmt = thread->name ? " %s" : " %08lX";
317 intptr_t name = thread->name ?
318 (intptr_t)thread->name : (intptr_t)thread;
319 snprintf(buffer, 16, fmt, name);
323 static void thread_panicf(const char *msg,
324 struct thread_entry *thread1, struct thread_entry *thread2)
326 static char thread1_name[16], thread2_name[16];
327 thread_panicf_format_name(thread1_name, thread1);
328 thread_panicf_format_name(thread2_name, thread2);
329 panicf ("%s%s%s", msg, thread1_name, thread2_name);
331 #else
332 static void thread_stkov(void)
334 /* Display thread name if one or ID if none */
335 struct thread_entry *current = cores[CURRENT_CORE].running;
336 const char *fmt = current->name ? "%s %s" : "%s %08lX";
337 intptr_t name = current->name ?
338 (intptr_t)current->name : (intptr_t)current;
339 panicf(fmt, "Stkov", name);
341 #endif /* THREAD_EXTRA_CHECKS */
343 static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
345 if (*list == NULL)
347 thread->next = thread;
348 thread->prev = thread;
349 *list = thread;
351 else
353 /* Insert last */
354 thread->next = *list;
355 thread->prev = (*list)->prev;
356 thread->prev->next = thread;
357 (*list)->prev = thread;
359 /* Insert next
360 thread->next = (*list)->next;
361 thread->prev = *list;
362 thread->next->prev = thread;
363 (*list)->next = thread;
368 static void remove_from_list(struct thread_entry **list,
369 struct thread_entry *thread)
371 if (list != NULL)
373 if (thread == thread->next)
375 *list = NULL;
376 return;
379 if (thread == *list)
380 *list = thread->next;
383 /* Fix links to jump over the removed entry. */
384 thread->prev->next = thread->next;
385 thread->next->prev = thread->prev;
388 static void check_sleepers(void) __attribute__ ((noinline));
389 static void check_sleepers(void)
391 struct thread_entry *current, *next;
393 /* Check sleeping threads. */
394 current = cores[CURRENT_CORE].sleeping;
395 if (current == NULL)
396 return ;
398 for (;;)
400 next = current->next;
402 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
404 /* Sleep timeout has been reached so bring the thread
405 * back to life again. */
406 remove_from_list(&cores[CURRENT_CORE].sleeping, current);
407 add_to_list(&cores[CURRENT_CORE].running, current);
408 current->statearg = 0;
410 /* If there is no more processes in the list, break the loop. */
411 if (cores[CURRENT_CORE].sleeping == NULL)
412 break;
414 current = next;
415 continue;
418 current = next;
420 /* Break the loop once we have walked through the list of all
421 * sleeping processes. */
422 if (current == cores[CURRENT_CORE].sleeping)
423 break;
427 /* Safely finish waking all threads potentialy woken by interrupts -
428 * statearg already zeroed in wakeup_thread. */
429 static void wake_list_awaken(void) __attribute__ ((noinline));
430 static void wake_list_awaken(void)
432 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
434 /* No need for another check in the IRQ lock since IRQs are allowed
435 only to add threads to the waking list. They won't be adding more
436 until we're done here though. */
438 struct thread_entry *waking = cores[CURRENT_CORE].waking;
439 struct thread_entry *running = cores[CURRENT_CORE].running;
441 if (running != NULL)
443 /* Place waking threads at the end of the running list. */
444 struct thread_entry *tmp;
445 waking->prev->next = running;
446 running->prev->next = waking;
447 tmp = running->prev;
448 running->prev = waking->prev;
449 waking->prev = tmp;
451 else
453 /* Just transfer the list as-is - just came out of a core
454 * sleep. */
455 cores[CURRENT_CORE].running = waking;
458 /* Done with waking list */
459 cores[CURRENT_CORE].waking = NULL;
460 set_irq_level(oldlevel);
463 static inline void sleep_core(void)
465 for (;;)
467 /* We want to do these ASAP as it may change the decision to sleep
468 the core or the core has woken because an interrupt occurred
469 and posted a message to a queue. */
470 if (cores[CURRENT_CORE].waking != NULL)
471 wake_list_awaken();
473 if (cores[CURRENT_CORE].last_tick != current_tick)
475 check_sleepers();
476 cores[CURRENT_CORE].last_tick = current_tick;
479 /* We must sleep until there is at least one process in the list
480 * of running processes. */
481 if (cores[CURRENT_CORE].running != NULL)
482 break;
484 /* Enter sleep mode to reduce power usage, woken up on interrupt */
485 core_sleep();
489 #ifdef RB_PROFILE
490 static int get_threadnum(struct thread_entry *thread)
492 int i;
494 for (i = 0; i < MAXTHREADS; i++)
496 if (&threads[i] == thread)
497 return i;
500 return -1;
503 void profile_thread(void) {
504 profstart(get_threadnum(cores[CURRENT_CORE].running));
506 #endif
508 static void change_thread_state(struct thread_entry **blocked_list) __attribute__ ((noinline));
509 static void change_thread_state(struct thread_entry **blocked_list)
511 struct thread_entry *old;
512 unsigned long new_state;
514 /* Remove the thread from the list of running threads. */
515 old = cores[CURRENT_CORE].running;
516 new_state = GET_STATE(old->statearg);
518 /* Check if a thread state change has been requested. */
519 if (new_state)
521 /* Change running thread state and switch to next thread. */
522 remove_from_list(&cores[CURRENT_CORE].running, old);
524 /* And put the thread into a new list of inactive threads. */
525 if (new_state == STATE_BLOCKED)
526 add_to_list(blocked_list, old);
527 else
528 add_to_list(&cores[CURRENT_CORE].sleeping, old);
530 #ifdef HAVE_PRIORITY_SCHEDULING
531 /* Reset priorities */
532 if (old->priority == cores[CURRENT_CORE].highest_priority)
533 cores[CURRENT_CORE].highest_priority = 100;
534 #endif
536 else
537 /* Switch to the next running thread. */
538 cores[CURRENT_CORE].running = old->next;
541 /*---------------------------------------------------------------------------
542 * Switch thread in round robin fashion.
543 *---------------------------------------------------------------------------
545 void switch_thread(bool save_context, struct thread_entry **blocked_list)
547 #ifdef RB_PROFILE
548 profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running));
549 #endif
550 unsigned int *stackptr;
552 #ifdef SIMULATOR
553 /* Do nothing */
554 #else
556 lock_cores();
558 /* Begin task switching by saving our current context so that we can
559 * restore the state of the current thread later to the point prior
560 * to this call. */
561 if (save_context)
563 store_context(&cores[CURRENT_CORE].running->context);
565 /* Check if the current thread stack is overflown */
566 stackptr = cores[CURRENT_CORE].running->stack;
567 if(stackptr[0] != DEADBEEF)
568 #ifdef THREAD_EXTRA_CHECKS
569 thread_panicf("Stkov", cores[CURRENT_CORE].running, NULL);
570 #else
571 thread_stkov();
572 #endif
574 /* Rearrange thread lists as needed */
575 change_thread_state(blocked_list);
577 /* This has to be done after the scheduler is finished with the
578 blocked_list pointer so that an IRQ can't kill us by attempting
579 a wake but before attempting any core sleep. */
580 if (cores[CURRENT_CORE].switch_to_irq_level != STAY_IRQ_LEVEL)
582 int level = cores[CURRENT_CORE].switch_to_irq_level;
583 cores[CURRENT_CORE].switch_to_irq_level = STAY_IRQ_LEVEL;
584 set_irq_level(level);
588 /* Go through the list of sleeping task to check if we need to wake up
589 * any of them due to timeout. Also puts core into sleep state until
590 * there is at least one running process again. */
591 sleep_core();
593 #ifdef HAVE_PRIORITY_SCHEDULING
594 /* Select the new task based on priorities and the last time a process
595 * got CPU time. */
596 for (;;)
598 int priority = cores[CURRENT_CORE].running->priority;
600 if (priority < cores[CURRENT_CORE].highest_priority)
601 cores[CURRENT_CORE].highest_priority = priority;
603 if (priority == cores[CURRENT_CORE].highest_priority ||
604 (current_tick - cores[CURRENT_CORE].running->last_run >
605 priority * 8) ||
606 cores[CURRENT_CORE].running->priority_x != 0)
608 break;
611 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
614 /* Reset the value of thread's last running time to the current time. */
615 cores[CURRENT_CORE].running->last_run = current_tick;
616 #endif
618 #endif
619 unlock_cores();
621 /* And finally give control to the next thread. */
622 load_context(&cores[CURRENT_CORE].running->context);
624 #ifdef RB_PROFILE
625 profile_thread_started(get_threadnum(cores[CURRENT_CORE].running));
626 #endif
629 void sleep_thread(int ticks)
631 struct thread_entry *current;
633 lock_cores();
635 current = cores[CURRENT_CORE].running;
637 #ifdef HAVE_SCHEDULER_BOOSTCTRL
638 if (STATE_IS_BOOSTED(current->statearg))
640 boosted_threads--;
641 if (!boosted_threads)
643 cpu_boost(false);
646 #endif
648 /* Set the thread's new state and timeout and finally force a task switch
649 * so that scheduler removes thread from the list of running processes
650 * and puts it in list of sleeping tasks. */
651 SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
653 switch_thread(true, NULL);
656 void block_thread(struct thread_entry **list)
658 struct thread_entry *current;
660 lock_cores();
662 /* Get the entry for the current running thread. */
663 current = cores[CURRENT_CORE].running;
665 #ifdef HAVE_SCHEDULER_BOOSTCTRL
666 /* Keep the boosted state over indefinite block calls, because
667 * we are waiting until the earliest time that someone else
668 * completes an action */
669 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
670 #endif
672 #ifdef THREAD_EXTRA_CHECKS
673 /* We are not allowed to mix blocking types in one queue. */
674 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
675 thread_panicf("Blocking violation B->*T", current, *list);
676 #endif
678 /* Set the state to blocked and ask the scheduler to switch tasks,
679 * this takes us off of the run queue until we are explicitly woken */
680 SET_STATE(current->statearg, STATE_BLOCKED, 0);
682 switch_thread(true, list);
684 #ifdef HAVE_SCHEDULER_BOOSTCTRL
685 /* Reset only the boosted flag to indicate we are up and running again. */
686 current->statearg = boost_flag;
687 #else
688 /* Clear all flags to indicate we are up and running again. */
689 current->statearg = 0;
690 #endif
693 void block_thread_w_tmo(struct thread_entry **list, int timeout)
695 struct thread_entry *current;
696 /* Get the entry for the current running thread. */
697 current = cores[CURRENT_CORE].running;
699 lock_cores();
700 #ifdef HAVE_SCHEDULER_BOOSTCTRL
701 /* A block with a timeout is a sleep situation, whatever we are waiting
702 * for _may or may not_ happen, regardless of boost state, (user input
703 * for instance), so this thread no longer needs to boost */
704 if (STATE_IS_BOOSTED(current->statearg))
706 boosted_threads--;
707 if (!boosted_threads)
709 cpu_boost(false);
712 #endif
714 #ifdef THREAD_EXTRA_CHECKS
715 /* We can store only one thread to the "list" if thread is used
716 * in other list (such as core's list for sleeping tasks). */
717 if (*list)
718 thread_panicf("Blocking violation T->*B", current, NULL);
719 #endif
721 /* Set the state to blocked with the specified timeout */
722 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
724 /* Set the "list" for explicit wakeup */
725 *list = current;
727 /* Now force a task switch and block until we have been woken up
728 * by another thread or timeout is reached. */
729 switch_thread(true, NULL);
731 /* It is now safe for another thread to block on this "list" */
732 *list = NULL;
735 #if !defined(SIMULATOR)
736 void set_irq_level_and_block_thread(struct thread_entry **list, int level)
738 cores[CURRENT_CORE].switch_to_irq_level = level;
739 block_thread(list);
742 void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
743 int timeout, int level)
745 cores[CURRENT_CORE].switch_to_irq_level = level;
746 block_thread_w_tmo(list, timeout);
748 #endif
750 void wakeup_thread(struct thread_entry **list)
752 struct thread_entry *thread;
754 /* Check if there is a blocked thread at all. */
755 if (*list == NULL)
757 return ;
760 /* Wake up the last thread first. */
761 thread = *list;
763 /* Determine thread's current state. */
764 switch (GET_STATE(thread->statearg))
766 case STATE_BLOCKED:
767 /* Remove thread from the list of blocked threads and add it
768 * to the scheduler's list of running processes. List removal
769 * is safe since each object maintains it's own list of
770 * sleepers and queues protect against reentrancy. */
771 remove_from_list(list, thread);
772 add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread);
774 case STATE_BLOCKED_W_TMO:
775 /* Just remove the timeout to cause scheduler to immediately
776 * wake up the thread. */
777 thread->statearg = 0;
778 break;
780 default:
781 /* Nothing to do. Thread has already been woken up
782 * or it's state is not blocked or blocked with timeout. */
783 return ;
787 inline static int find_empty_thread_slot(void)
789 int n;
791 for (n = 0; n < MAXTHREADS; n++)
793 if (threads[n].name == NULL)
794 return n;
797 return -1;
800 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
801 before calling. */
802 void wakeup_thread_irq_safe(struct thread_entry **list)
804 struct core_entry *core = &cores[CURRENT_CORE];
805 /* Switch wakeup lists and call wakeup_thread */
806 core->wakeup_list = &core->waking;
807 wakeup_thread(list);
808 /* Switch back to normal running list */
809 core->wakeup_list = &core->running;
812 /*---------------------------------------------------------------------------
813 * Create a thread
814 * If using a dual core architecture, specify which core to start the thread
815 * on, and whether to fall back to the other core if it can't be created
816 * Return ID if context area could be allocated, else NULL.
817 *---------------------------------------------------------------------------
819 struct thread_entry*
820 create_thread(void (*function)(void), void* stack, int stack_size,
821 const char *name IF_PRIO(, int priority)
822 IF_COP(, unsigned int core, bool fallback))
824 unsigned int i;
825 unsigned int stacklen;
826 unsigned int *stackptr;
827 int slot;
828 struct regs *regs;
829 struct thread_entry *thread;
831 /*****
832 * Ugly code alert!
833 * To prevent ifdef hell while keeping the binary size down, we define
834 * core here if it hasn't been passed as a parameter
835 *****/
836 #if NUM_CORES == 1
837 #define core CPU
838 #endif
840 #if NUM_CORES > 1
841 /* If the kernel hasn't initialised on the COP (most likely due to an old
842 * bootloader) then refuse to start threads on the COP
844 if ((core == COP) && !cores[core].kernel_running)
846 if (fallback)
847 return create_thread(function, stack, stack_size, name
848 IF_PRIO(, priority) IF_COP(, CPU, false));
849 else
850 return NULL;
852 #endif
854 lock_cores();
856 slot = find_empty_thread_slot();
857 if (slot < 0)
859 unlock_cores();
860 return NULL;
863 /* Munge the stack to make it easy to spot stack overflows */
864 stacklen = stack_size / sizeof(int);
865 stackptr = stack;
866 for(i = 0;i < stacklen;i++)
868 stackptr[i] = DEADBEEF;
871 /* Store interesting information */
872 thread = &threads[slot];
873 thread->name = name;
874 thread->stack = stack;
875 thread->stack_size = stack_size;
876 thread->statearg = 0;
877 #ifdef HAVE_PRIORITY_SCHEDULING
878 thread->priority_x = 0;
879 thread->priority = priority;
880 cores[core].highest_priority = 100;
881 #endif
883 #if NUM_CORES > 1
884 thread->core = core;
885 #endif
887 regs = &thread->context;
888 /* Align stack to an even 32 bit boundary */
889 regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
890 regs->start = (void*)function;
892 /* Do any CPU specific inits after initializing common items
893 to have access to valid data */
894 THREAD_CPU_INIT(core, thread);
896 add_to_list(&cores[core].running, thread);
897 unlock_cores();
899 return thread;
900 #if NUM_CORES == 1
901 #undef core
902 #endif
905 #ifdef HAVE_SCHEDULER_BOOSTCTRL
906 void trigger_cpu_boost(void)
908 lock_cores();
910 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
912 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
913 if (!boosted_threads)
915 cpu_boost(true);
917 boosted_threads++;
920 unlock_cores();
922 #endif
924 /*---------------------------------------------------------------------------
925 * Remove a thread on the current core from the scheduler.
926 * Parameter is the ID as returned from create_thread().
927 *---------------------------------------------------------------------------
929 void remove_thread(struct thread_entry *thread)
931 lock_cores();
933 if (thread == NULL)
934 thread = cores[CURRENT_CORE].running;
936 /* Free the entry by removing thread name. */
937 thread->name = NULL;
938 #ifdef HAVE_PRIORITY_SCHEDULING
939 cores[IF_COP2(thread->core)].highest_priority = 100;
940 #endif
942 if (thread == cores[IF_COP2(thread->core)].running)
944 remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
945 switch_thread(false, NULL);
946 return ;
949 if (thread == cores[IF_COP2(thread->core)].sleeping)
950 remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
951 else
952 remove_from_list(NULL, thread);
954 unlock_cores();
957 #ifdef HAVE_PRIORITY_SCHEDULING
958 int thread_set_priority(struct thread_entry *thread, int priority)
960 int old_priority;
962 lock_cores();
963 if (thread == NULL)
964 thread = cores[CURRENT_CORE].running;
966 old_priority = thread->priority;
967 thread->priority = priority;
968 cores[IF_COP2(thread->core)].highest_priority = 100;
969 unlock_cores();
971 return old_priority;
974 int thread_get_priority(struct thread_entry *thread)
976 if (thread == NULL)
977 thread = cores[CURRENT_CORE].running;
979 return thread->priority;
982 void priority_yield(void)
984 struct thread_entry *thread = cores[CURRENT_CORE].running;
985 thread->priority_x = 1;
986 switch_thread(true, NULL);
987 thread->priority_x = 0;
989 #endif /* HAVE_PRIORITY_SCHEDULING */
991 struct thread_entry * thread_get_current(void)
993 return cores[CURRENT_CORE].running;
996 void init_threads(void)
998 unsigned int core = CURRENT_CORE;
999 int slot;
1001 /* Let main CPU initialize first. */
1002 #if NUM_CORES > 1
1003 if (core != CPU)
1005 while (!cores[CPU].kernel_running) ;
1007 #endif
1009 lock_cores();
1010 slot = find_empty_thread_slot();
1012 cores[core].sleeping = NULL;
1013 cores[core].running = NULL;
1014 cores[core].waking = NULL;
1015 cores[core].wakeup_list = &cores[core].running;
1016 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
1017 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
1018 #endif
1019 threads[slot].name = main_thread_name;
1020 threads[slot].statearg = 0;
1021 threads[slot].context.start = 0; /* core's main thread already running */
1022 #if NUM_CORES > 1
1023 threads[slot].core = core;
1024 #endif
1025 #ifdef HAVE_PRIORITY_SCHEDULING
1026 threads[slot].priority = PRIORITY_USER_INTERFACE;
1027 threads[slot].priority_x = 0;
1028 cores[core].highest_priority = 100;
1029 #endif
1030 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1031 boosted_threads = 0;
1032 #endif
1033 add_to_list(&cores[core].running, &threads[slot]);
1035 /* In multiple core setups, each core has a different stack. There is
1036 * probably a much better way to do this. */
1037 if (core == CPU)
1039 threads[slot].stack = stackbegin;
1040 threads[slot].stack_size = (int)stackend - (int)stackbegin;
1042 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
1043 else
1045 threads[slot].stack = cop_stackbegin;
1046 threads[slot].stack_size =
1047 (int)cop_stackend - (int)cop_stackbegin;
1050 cores[core].kernel_running = true;
1051 #endif
1053 unlock_cores();
1056 int thread_stack_usage(const struct thread_entry *thread)
1058 unsigned int i;
1059 unsigned int *stackptr = thread->stack;
1061 for (i = 0;i < thread->stack_size/sizeof(int);i++)
1063 if (stackptr[i] != DEADBEEF)
1064 break;
1067 return ((thread->stack_size - i * sizeof(int)) * 100) /
1068 thread->stack_size;
1071 int thread_get_status(const struct thread_entry *thread)
1073 return GET_STATE(thread->statearg);