The credits file now also has underscores, which need to be escaped in LaTeX.
[Rockbox.git] / firmware / thread.c
blobff804e4337b10762343734b46de237a6903e8d28
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 #if NUM_CORES > 1
33 # define IF_COP2(x) x
34 #else
35 # define IF_COP2(x) CURRENT_CORE
36 #endif
38 #define DEADBEEF ((unsigned int)0xdeadbeef)
39 /* Cast to the the machine int type, whose size could be < 4. */
41 struct core_entry cores[NUM_CORES] IBSS_ATTR;
42 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
43 #ifdef HAVE_SCHEDULER_BOOSTCTRL
44 static int boosted_threads IBSS_ATTR;
45 #endif
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS
50 static const char main_thread_name[] = "main";
52 extern int stackbegin[];
53 extern int stackend[];
55 #ifdef CPU_PP
56 #ifndef BOOTLOADER
57 extern int cop_stackbegin[];
58 extern int cop_stackend[];
59 #else
60 /* The coprocessor stack is not set up in the bootloader code, but the threading
61 * is. No threads are run on the coprocessor, so set up some dummy stack */
62 int *cop_stackbegin = stackbegin;
63 int *cop_stackend = stackend;
64 #endif
65 #endif
67 #if NUM_CORES > 1
68 #if 0
69 static long cores_locked IBSS_ATTR;
71 #define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
72 #define UNLOCK(...) cores_locked = 0
73 #endif
75 /* #warning "Core locking mechanism should be fixed on H10/4G!" */
77 inline void lock_cores(void)
79 #if 0
80 if (!cores[CURRENT_CORE].lock_issued)
82 LOCK();
83 cores[CURRENT_CORE].lock_issued = true;
85 #endif
88 inline void unlock_cores(void)
90 #if 0
91 if (cores[CURRENT_CORE].lock_issued)
93 cores[CURRENT_CORE].lock_issued = false;
94 UNLOCK();
96 #endif
99 #endif
101 /* Conserve IRAM
102 static void add_to_list(struct thread_entry **list,
103 struct thread_entry *thread) ICODE_ATTR;
104 static void remove_from_list(struct thread_entry **list,
105 struct thread_entry *thread) ICODE_ATTR;
108 void switch_thread(bool save_context, struct thread_entry **blocked_list)
109 ICODE_ATTR;
111 static inline void store_context(void* addr) __attribute__ ((always_inline));
112 static inline void load_context(const void* addr)
113 __attribute__ ((always_inline));
114 static inline void core_sleep(void) __attribute__((always_inline));
116 #if defined(CPU_ARM)
117 /*---------------------------------------------------------------------------
118 * Store non-volatile context.
119 *---------------------------------------------------------------------------
121 static inline void store_context(void* addr)
123 asm volatile(
124 "stmia %0, { r4-r11, sp, lr }\n"
125 : : "r" (addr)
129 /*---------------------------------------------------------------------------
130 * Load non-volatile context.
131 *---------------------------------------------------------------------------
133 static void start_thread(void (*thread_func)(void), const void* addr) __attribute__((naked));
134 static void start_thread(void (*thread_func)(void), const void* addr)
136 /* r0 = thread_func, r1 = addr */
137 #if NUM_CORES > 1 && CONFIG_CPU != PP5002
138 asm volatile (
139 "mov r2, #0 \n"
140 "str r2, [r1, #40] \n"
141 "ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
142 "ldr r2, [r1] \n"
143 "orr r2, r2, #6 \n"
144 "str r2, [r1] \n"
145 "ldr r1, =0x6000c000 \n"
146 "1: \n"
147 "ldr r2, [r1] \n"
148 "tst r2, #0x8000 \n"
149 "bne 1b \n"
150 "mov pc, r0 \n"
151 : : : "r1", "r2"
153 #else
154 asm volatile (
155 "mov r2, #0 \n"
156 "str r2, [r1, #40] \n"
157 "mov pc, r0 \n"
158 : : : "r1", "r2"
160 #endif
161 (void)thread_func;
162 (void)addr;
163 (void)start_thread;
166 static inline void load_context(const void* addr)
168 asm volatile(
169 "ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */
170 "ldr r0, [%0, #40] \n" /* load start pointer */
171 "cmp r0, #0 \n" /* check for NULL */
172 "movne r1, %0 \n" /* if not already running, jump to start */
173 "ldrne pc, =start_thread \n"
174 : : "r" (addr) : "r0", "r1"
178 #if defined (CPU_PP)
179 static inline void core_sleep(void)
181 unlock_cores();
183 /* This should sleep the CPU. It appears to wake by itself on
184 interrupts */
185 if (CURRENT_CORE == CPU)
186 CPU_CTL = PROC_SLEEP;
187 else
188 COP_CTL = PROC_SLEEP;
190 lock_cores();
192 #elif CONFIG_CPU == S3C2440
193 static inline void core_sleep(void)
195 int i;
196 CLKCON |= (1 << 2); /* set IDLE bit */
197 for(i=0; i<10; i++); /* wait for IDLE */
198 CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
200 #endif
202 #elif defined(CPU_COLDFIRE)
203 /*---------------------------------------------------------------------------
204 * Store non-volatile context.
205 *---------------------------------------------------------------------------
207 static inline void store_context(void* addr)
209 asm volatile (
210 "move.l %%macsr,%%d0 \n"
211 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
212 : : "a" (addr) : "d0" /* only! */
216 /*---------------------------------------------------------------------------
217 * Load non-volatile context.
218 *---------------------------------------------------------------------------
220 static inline void load_context(const void* addr)
222 asm volatile (
223 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
224 "move.l %%d0,%%macsr \n"
225 "move.l (52,%0),%%d0 \n" /* Get start address */
226 "beq.b 1f \n" /* NULL -> already running */
227 "clr.l (52,%0) \n" /* Clear start address.. */
228 "move.l %%d0,%0 \n"
229 "jmp (%0) \n" /* ..and start the thread */
230 "1: \n"
231 : : "a" (addr) : "d0" /* only! */
235 static inline void core_sleep(void)
237 asm volatile ("stop #0x2000");
240 /* Set EMAC unit to fractional mode with saturation for each new thread,
241 since that's what'll be the most useful for most things which the dsp
242 will do. Codecs should still initialize their preferred modes
243 explicitly. */
244 #define THREAD_CPU_INIT(core, thread) \
245 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
247 #elif CONFIG_CPU == SH7034
248 /*---------------------------------------------------------------------------
249 * Store non-volatile context.
250 *---------------------------------------------------------------------------
252 static inline void store_context(void* addr)
254 asm volatile (
255 "add #36,%0 \n"
256 "sts.l pr, @-%0 \n"
257 "mov.l r15,@-%0 \n"
258 "mov.l r14,@-%0 \n"
259 "mov.l r13,@-%0 \n"
260 "mov.l r12,@-%0 \n"
261 "mov.l r11,@-%0 \n"
262 "mov.l r10,@-%0 \n"
263 "mov.l r9, @-%0 \n"
264 "mov.l r8, @-%0 \n"
265 : : "r" (addr)
269 /*---------------------------------------------------------------------------
270 * Load non-volatile context.
271 *---------------------------------------------------------------------------
273 static inline void load_context(const void* addr)
275 asm volatile (
276 "mov.l @%0+,r8 \n"
277 "mov.l @%0+,r9 \n"
278 "mov.l @%0+,r10 \n"
279 "mov.l @%0+,r11 \n"
280 "mov.l @%0+,r12 \n"
281 "mov.l @%0+,r13 \n"
282 "mov.l @%0+,r14 \n"
283 "mov.l @%0+,r15 \n"
284 "lds.l @%0+,pr \n"
285 "mov.l @%0,r0 \n" /* Get start address */
286 "tst r0,r0 \n"
287 "bt .running \n" /* NULL -> already running */
288 "lds r0,pr \n"
289 "mov #0,r0 \n"
290 "rts \n" /* Start the thread */
291 "mov.l r0,@%0 \n" /* Clear start address */
292 ".running: \n"
293 : : "r" (addr) : "r0" /* only! */
297 static inline void core_sleep(void)
299 and_b(0x7F, &SBYCR);
300 asm volatile ("sleep");
303 #endif
305 #ifndef THREAD_CPU_INIT
306 /* No cpu specific init - make empty */
307 #define THREAD_CPU_INIT(core, thread)
308 #endif
310 #ifdef THREAD_EXTRA_CHECKS
311 static void thread_panicf_format_name(char *buffer, struct thread_entry *thread)
313 *buffer = '\0';
314 if (thread)
316 /* Display thread name if one or ID if none */
317 const char *fmt = thread->name ? " %s" : " %08lX";
318 intptr_t name = thread->name ?
319 (intptr_t)thread->name : (intptr_t)thread;
320 snprintf(buffer, 16, fmt, name);
324 static void thread_panicf(const char *msg,
325 struct thread_entry *thread1, struct thread_entry *thread2)
327 static char thread1_name[16], thread2_name[16];
328 thread_panicf_format_name(thread1_name, thread1);
329 thread_panicf_format_name(thread2_name, thread2);
330 panicf ("%s%s%s", msg, thread1_name, thread2_name);
332 #else
333 static void thread_stkov(void)
335 /* Display thread name if one or ID if none */
336 struct thread_entry *current = cores[CURRENT_CORE].running;
337 const char *fmt = current->name ? "%s %s" : "%s %08lX";
338 intptr_t name = current->name ?
339 (intptr_t)current->name : (intptr_t)current;
340 panicf(fmt, "Stkov", name);
342 #endif /* THREAD_EXTRA_CHECKS */
344 static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
346 if (*list == NULL)
348 thread->next = thread;
349 thread->prev = thread;
350 *list = thread;
352 else
354 /* Insert last */
355 thread->next = *list;
356 thread->prev = (*list)->prev;
357 thread->prev->next = thread;
358 (*list)->prev = thread;
360 /* Insert next
361 thread->next = (*list)->next;
362 thread->prev = *list;
363 thread->next->prev = thread;
364 (*list)->next = thread;
369 static void remove_from_list(struct thread_entry **list,
370 struct thread_entry *thread)
372 if (list != NULL)
374 if (thread == thread->next)
376 *list = NULL;
377 return;
380 if (thread == *list)
381 *list = thread->next;
384 /* Fix links to jump over the removed entry. */
385 thread->prev->next = thread->next;
386 thread->next->prev = thread->prev;
389 /* Compiler trick: Don't declare as static to prevent putting
390 * function in IRAM. */
391 void check_sleepers(void)
393 struct thread_entry *current, *next;
395 /* Check sleeping threads. */
396 current = cores[CURRENT_CORE].sleeping;
397 if (current == NULL)
398 return ;
400 for (;;)
402 next = current->next;
404 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
406 /* Sleep timeout has been reached so bring the thread
407 * back to life again. */
408 remove_from_list(&cores[CURRENT_CORE].sleeping, current);
409 add_to_list(&cores[CURRENT_CORE].running, current);
410 current->statearg = 0;
412 /* If there is no more processes in the list, break the loop. */
413 if (cores[CURRENT_CORE].sleeping == NULL)
414 break;
416 current = next;
417 continue;
420 current = next;
422 /* Break the loop once we have walked through the list of all
423 * sleeping processes. */
424 if (current == cores[CURRENT_CORE].sleeping)
425 break;
429 /* Safely finish waking all threads potentialy woken by interrupts -
430 * statearg already zeroed in wakeup_thread. */
431 static void wake_list_awaken(void)
433 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
435 /* No need for another check in the IRQ lock since IRQs are allowed
436 only to add threads to the waking list. They won't be adding more
437 until we're done here though. */
439 struct thread_entry *waking = cores[CURRENT_CORE].waking;
440 struct thread_entry *running = cores[CURRENT_CORE].running;
442 if (running != NULL)
444 /* Place waking threads at the end of the running list. */
445 struct thread_entry *tmp;
446 waking->prev->next = running;
447 running->prev->next = waking;
448 tmp = running->prev;
449 running->prev = waking->prev;
450 waking->prev = tmp;
452 else
454 /* Just transfer the list as-is - just came out of a core
455 * sleep. */
456 cores[CURRENT_CORE].running = waking;
459 /* Done with waking list */
460 cores[CURRENT_CORE].waking = NULL;
461 set_irq_level(oldlevel);
464 static inline void sleep_core(void)
466 for (;;)
468 /* We want to do these ASAP as it may change the decision to sleep
469 the core or the core has woken because an interrupt occurred
470 and posted a message to a queue. */
471 if (cores[CURRENT_CORE].waking != NULL)
472 wake_list_awaken();
474 if (cores[CURRENT_CORE].last_tick != current_tick)
476 check_sleepers();
477 cores[CURRENT_CORE].last_tick = current_tick;
480 /* We must sleep until there is at least one process in the list
481 * of running processes. */
482 if (cores[CURRENT_CORE].running != NULL)
483 break;
485 /* Enter sleep mode to reduce power usage, woken up on interrupt */
486 core_sleep();
490 #ifdef RB_PROFILE
491 static int get_threadnum(struct thread_entry *thread)
493 int i;
495 for (i = 0; i < MAXTHREADS; i++)
497 if (&threads[i] == thread)
498 return i;
501 return -1;
504 void profile_thread(void) {
505 profstart(get_threadnum(cores[CURRENT_CORE].running));
507 #endif
509 /* Compiler trick: Don't declare as static to prevent putting
510 * function in IRAM. */
511 void change_thread_state(struct thread_entry **blocked_list)
513 struct thread_entry *old;
514 unsigned long new_state;
516 /* Remove the thread from the list of running threads. */
517 old = cores[CURRENT_CORE].running;
518 new_state = GET_STATE(old->statearg);
520 /* Check if a thread state change has been requested. */
521 if (new_state)
523 /* Change running thread state and switch to next thread. */
524 remove_from_list(&cores[CURRENT_CORE].running, old);
526 /* And put the thread into a new list of inactive threads. */
527 if (new_state == STATE_BLOCKED)
528 add_to_list(blocked_list, old);
529 else
530 add_to_list(&cores[CURRENT_CORE].sleeping, old);
532 #ifdef HAVE_PRIORITY_SCHEDULING
533 /* Reset priorities */
534 if (old->priority == cores[CURRENT_CORE].highest_priority)
535 cores[CURRENT_CORE].highest_priority = 100;
536 #endif
538 else
539 /* Switch to the next running thread. */
540 cores[CURRENT_CORE].running = old->next;
543 /*---------------------------------------------------------------------------
544 * Switch thread in round robin fashion.
545 *---------------------------------------------------------------------------
547 void switch_thread(bool save_context, struct thread_entry **blocked_list)
549 #ifdef RB_PROFILE
550 profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running));
551 #endif
552 unsigned int *stackptr;
554 #ifdef SIMULATOR
555 /* Do nothing */
556 #else
558 lock_cores();
560 /* Begin task switching by saving our current context so that we can
561 * restore the state of the current thread later to the point prior
562 * to this call. */
563 if (save_context)
565 store_context(&cores[CURRENT_CORE].running->context);
567 /* Check if the current thread stack is overflown */
568 stackptr = cores[CURRENT_CORE].running->stack;
569 if(stackptr[0] != DEADBEEF)
570 #ifdef THREAD_EXTRA_CHECKS
571 thread_panicf("Stkov", cores[CURRENT_CORE].running, NULL);
572 #else
573 thread_stkov();
574 #endif
576 /* Rearrange thread lists as needed */
577 change_thread_state(blocked_list);
579 /* This has to be done after the scheduler is finished with the
580 blocked_list pointer so that an IRQ can't kill us by attempting
581 a wake but before attempting any core sleep. */
582 if (cores[CURRENT_CORE].switch_to_irq_level != STAY_IRQ_LEVEL)
584 int level = cores[CURRENT_CORE].switch_to_irq_level;
585 cores[CURRENT_CORE].switch_to_irq_level = STAY_IRQ_LEVEL;
586 set_irq_level(level);
590 /* Go through the list of sleeping task to check if we need to wake up
591 * any of them due to timeout. Also puts core into sleep state until
592 * there is at least one running process again. */
593 sleep_core();
595 #ifdef HAVE_PRIORITY_SCHEDULING
596 /* Select the new task based on priorities and the last time a process
597 * got CPU time. */
598 for (;;)
600 int priority = cores[CURRENT_CORE].running->priority;
602 if (priority < cores[CURRENT_CORE].highest_priority)
603 cores[CURRENT_CORE].highest_priority = priority;
605 if (priority == cores[CURRENT_CORE].highest_priority ||
606 (current_tick - cores[CURRENT_CORE].running->last_run >
607 priority * 8) ||
608 cores[CURRENT_CORE].running->priority_x != 0)
610 break;
613 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
616 /* Reset the value of thread's last running time to the current time. */
617 cores[CURRENT_CORE].running->last_run = current_tick;
618 #endif
620 #endif
621 unlock_cores();
623 /* And finally give control to the next thread. */
624 load_context(&cores[CURRENT_CORE].running->context);
626 #ifdef RB_PROFILE
627 profile_thread_started(get_threadnum(cores[CURRENT_CORE].running));
628 #endif
631 void sleep_thread(int ticks)
633 struct thread_entry *current;
635 lock_cores();
637 current = cores[CURRENT_CORE].running;
639 #ifdef HAVE_SCHEDULER_BOOSTCTRL
640 if (STATE_IS_BOOSTED(current->statearg))
642 boosted_threads--;
643 if (!boosted_threads)
645 cpu_boost(false);
648 #endif
650 /* Set the thread's new state and timeout and finally force a task switch
651 * so that scheduler removes thread from the list of running processes
652 * and puts it in list of sleeping tasks. */
653 SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
655 switch_thread(true, NULL);
658 void block_thread(struct thread_entry **list)
660 struct thread_entry *current;
662 lock_cores();
664 /* Get the entry for the current running thread. */
665 current = cores[CURRENT_CORE].running;
667 #ifdef HAVE_SCHEDULER_BOOSTCTRL
668 /* Keep the boosted state over indefinite block calls, because
669 * we are waiting until the earliest time that someone else
670 * completes an action */
671 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
672 #endif
674 #ifdef THREAD_EXTRA_CHECKS
675 /* We are not allowed to mix blocking types in one queue. */
676 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
677 thread_panicf("Blocking violation B->*T", current, *list);
678 #endif
680 /* Set the state to blocked and ask the scheduler to switch tasks,
681 * this takes us off of the run queue until we are explicitly woken */
682 SET_STATE(current->statearg, STATE_BLOCKED, 0);
684 switch_thread(true, list);
686 #ifdef HAVE_SCHEDULER_BOOSTCTRL
687 /* Reset only the boosted flag to indicate we are up and running again. */
688 current->statearg = boost_flag;
689 #else
690 /* Clear all flags to indicate we are up and running again. */
691 current->statearg = 0;
692 #endif
695 void block_thread_w_tmo(struct thread_entry **list, int timeout)
697 struct thread_entry *current;
698 /* Get the entry for the current running thread. */
699 current = cores[CURRENT_CORE].running;
701 lock_cores();
702 #ifdef HAVE_SCHEDULER_BOOSTCTRL
703 /* A block with a timeout is a sleep situation, whatever we are waiting
704 * for _may or may not_ happen, regardless of boost state, (user input
705 * for instance), so this thread no longer needs to boost */
706 if (STATE_IS_BOOSTED(current->statearg))
708 boosted_threads--;
709 if (!boosted_threads)
711 cpu_boost(false);
714 #endif
716 #ifdef THREAD_EXTRA_CHECKS
717 /* We can store only one thread to the "list" if thread is used
718 * in other list (such as core's list for sleeping tasks). */
719 if (*list)
720 thread_panicf("Blocking violation T->*B", current, NULL);
721 #endif
723 /* Set the state to blocked with the specified timeout */
724 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
726 /* Set the "list" for explicit wakeup */
727 *list = current;
729 /* Now force a task switch and block until we have been woken up
730 * by another thread or timeout is reached. */
731 switch_thread(true, NULL);
733 /* It is now safe for another thread to block on this "list" */
734 *list = NULL;
737 #if !defined(SIMULATOR)
738 void set_irq_level_and_block_thread(struct thread_entry **list, int level)
740 cores[CURRENT_CORE].switch_to_irq_level = level;
741 block_thread(list);
744 void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
745 int timeout, int level)
747 cores[CURRENT_CORE].switch_to_irq_level = level;
748 block_thread_w_tmo(list, timeout);
750 #endif
752 void wakeup_thread(struct thread_entry **list)
754 struct thread_entry *thread;
756 /* Check if there is a blocked thread at all. */
757 if (*list == NULL)
759 return ;
762 /* Wake up the last thread first. */
763 thread = *list;
765 /* Determine thread's current state. */
766 switch (GET_STATE(thread->statearg))
768 case STATE_BLOCKED:
769 /* Remove thread from the list of blocked threads and add it
770 * to the scheduler's list of running processes. List removal
771 * is safe since each object maintains it's own list of
772 * sleepers and queues protect against reentrancy. */
773 remove_from_list(list, thread);
774 add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread);
776 case STATE_BLOCKED_W_TMO:
777 /* Just remove the timeout to cause scheduler to immediately
778 * wake up the thread. */
779 thread->statearg = 0;
780 break;
782 default:
783 /* Nothing to do. Thread has already been woken up
784 * or it's state is not blocked or blocked with timeout. */
785 return ;
789 inline static int find_empty_thread_slot(void)
791 int n;
793 for (n = 0; n < MAXTHREADS; n++)
795 if (threads[n].name == NULL)
796 return n;
799 return -1;
802 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
803 before calling. */
804 void wakeup_thread_irq_safe(struct thread_entry **list)
806 struct core_entry *core = &cores[CURRENT_CORE];
807 /* Switch wakeup lists and call wakeup_thread */
808 core->wakeup_list = &core->waking;
809 wakeup_thread(list);
810 /* Switch back to normal running list */
811 core->wakeup_list = &core->running;
814 /*---------------------------------------------------------------------------
815 * Create a thread
816 * If using a dual core architecture, specify which core to start the thread
817 * on, and whether to fall back to the other core if it can't be created
818 * Return ID if context area could be allocated, else NULL.
819 *---------------------------------------------------------------------------
821 struct thread_entry*
822 create_thread(void (*function)(void), void* stack, int stack_size,
823 const char *name IF_PRIO(, int priority)
824 IF_COP(, unsigned int core, bool fallback))
826 unsigned int i;
827 unsigned int stacklen;
828 unsigned int *stackptr;
829 int slot;
830 struct regs *regs;
831 struct thread_entry *thread;
833 /*****
834 * Ugly code alert!
835 * To prevent ifdef hell while keeping the binary size down, we define
836 * core here if it hasn't been passed as a parameter
837 *****/
838 #if NUM_CORES == 1
839 #define core CPU
840 #endif
842 #if NUM_CORES > 1
843 /* If the kernel hasn't initialised on the COP (most likely due to an old
844 * bootloader) then refuse to start threads on the COP
846 if ((core == COP) && !cores[core].kernel_running)
848 if (fallback)
849 return create_thread(function, stack, stack_size, name
850 IF_PRIO(, priority) IF_COP(, CPU, false));
851 else
852 return NULL;
854 #endif
856 lock_cores();
858 slot = find_empty_thread_slot();
859 if (slot < 0)
861 unlock_cores();
862 return NULL;
865 /* Munge the stack to make it easy to spot stack overflows */
866 stacklen = stack_size / sizeof(int);
867 stackptr = stack;
868 for(i = 0;i < stacklen;i++)
870 stackptr[i] = DEADBEEF;
873 /* Store interesting information */
874 thread = &threads[slot];
875 thread->name = name;
876 thread->stack = stack;
877 thread->stack_size = stack_size;
878 thread->statearg = 0;
879 #ifdef HAVE_PRIORITY_SCHEDULING
880 thread->priority_x = 0;
881 thread->priority = priority;
882 cores[core].highest_priority = 100;
883 #endif
885 #if NUM_CORES > 1
886 thread->core = core;
887 #endif
889 regs = &thread->context;
890 /* Align stack to an even 32 bit boundary */
891 regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
892 regs->start = (void*)function;
894 /* Do any CPU specific inits after initializing common items
895 to have access to valid data */
896 THREAD_CPU_INIT(core, thread);
898 add_to_list(&cores[core].running, thread);
899 unlock_cores();
901 return thread;
902 #if NUM_CORES == 1
903 #undef core
904 #endif
907 #ifdef HAVE_SCHEDULER_BOOSTCTRL
908 void trigger_cpu_boost(void)
910 lock_cores();
912 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
914 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
915 if (!boosted_threads)
917 cpu_boost(true);
919 boosted_threads++;
922 unlock_cores();
924 #endif
926 /*---------------------------------------------------------------------------
927 * Remove a thread on the current core from the scheduler.
928 * Parameter is the ID as returned from create_thread().
929 *---------------------------------------------------------------------------
931 void remove_thread(struct thread_entry *thread)
933 lock_cores();
935 if (thread == NULL)
936 thread = cores[CURRENT_CORE].running;
938 /* Free the entry by removing thread name. */
939 thread->name = NULL;
940 #ifdef HAVE_PRIORITY_SCHEDULING
941 cores[IF_COP2(thread->core)].highest_priority = 100;
942 #endif
944 if (thread == cores[IF_COP2(thread->core)].running)
946 remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
947 switch_thread(false, NULL);
948 return ;
951 if (thread == cores[IF_COP2(thread->core)].sleeping)
952 remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
953 else
954 remove_from_list(NULL, thread);
956 unlock_cores();
959 #ifdef HAVE_PRIORITY_SCHEDULING
960 int thread_set_priority(struct thread_entry *thread, int priority)
962 int old_priority;
964 lock_cores();
965 if (thread == NULL)
966 thread = cores[CURRENT_CORE].running;
968 old_priority = thread->priority;
969 thread->priority = priority;
970 cores[IF_COP2(thread->core)].highest_priority = 100;
971 unlock_cores();
973 return old_priority;
976 int thread_get_priority(struct thread_entry *thread)
978 if (thread == NULL)
979 thread = cores[CURRENT_CORE].running;
981 return thread->priority;
984 void priority_yield(void)
986 struct thread_entry *thread = cores[CURRENT_CORE].running;
987 thread->priority_x = 1;
988 switch_thread(true, NULL);
989 thread->priority_x = 0;
991 #endif /* HAVE_PRIORITY_SCHEDULING */
993 struct thread_entry * thread_get_current(void)
995 return cores[CURRENT_CORE].running;
998 void init_threads(void)
1000 unsigned int core = CURRENT_CORE;
1001 int slot;
1003 /* Let main CPU initialize first. */
1004 #if NUM_CORES > 1
1005 if (core != CPU)
1007 while (!cores[CPU].kernel_running) ;
1009 #endif
1011 lock_cores();
1012 slot = find_empty_thread_slot();
1014 cores[core].sleeping = NULL;
1015 cores[core].running = NULL;
1016 cores[core].waking = NULL;
1017 cores[core].wakeup_list = &cores[core].running;
1018 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
1019 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
1020 #endif
1021 threads[slot].name = main_thread_name;
1022 threads[slot].statearg = 0;
1023 threads[slot].context.start = 0; /* core's main thread already running */
1024 #if NUM_CORES > 1
1025 threads[slot].core = core;
1026 #endif
1027 #ifdef HAVE_PRIORITY_SCHEDULING
1028 threads[slot].priority = PRIORITY_USER_INTERFACE;
1029 threads[slot].priority_x = 0;
1030 cores[core].highest_priority = 100;
1031 #endif
1032 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1033 boosted_threads = 0;
1034 #endif
1035 add_to_list(&cores[core].running, &threads[slot]);
1037 /* In multiple core setups, each core has a different stack. There is
1038 * probably a much better way to do this. */
1039 if (core == CPU)
1041 threads[slot].stack = stackbegin;
1042 threads[slot].stack_size = (int)stackend - (int)stackbegin;
1044 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
1045 else
1047 threads[slot].stack = cop_stackbegin;
1048 threads[slot].stack_size =
1049 (int)cop_stackend - (int)cop_stackbegin;
1052 cores[core].kernel_running = true;
1053 #endif
1055 unlock_cores();
1058 int thread_stack_usage(const struct thread_entry *thread)
1060 unsigned int i;
1061 unsigned int *stackptr = thread->stack;
1063 for (i = 0;i < thread->stack_size/sizeof(int);i++)
1065 if (stackptr[i] != DEADBEEF)
1066 break;
1069 return ((thread->stack_size - i * sizeof(int)) * 100) /
1070 thread->stack_size;
1073 int thread_get_status(const struct thread_entry *thread)
1075 return GET_STATE(thread->statearg);