Fix wrong file type.
[Rockbox.git] / firmware / thread.c
blob86b90f4f0d156d027eb2411f93bab89d1ac8761f
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "sprintf.h"
24 #include "system.h"
25 #include "kernel.h"
26 #include "cpu.h"
27 #include "string.h"
28 #ifdef RB_PROFILE
29 #include <profile.h>
30 #endif
32 #if NUM_CORES > 1
33 # define IF_COP2(x) x
34 #else
35 # define IF_COP2(x) CURRENT_CORE
36 #endif
38 #define DEADBEEF ((unsigned int)0xdeadbeef)
39 /* Cast to the the machine int type, whose size could be < 4. */
41 struct core_entry cores[NUM_CORES] IBSS_ATTR;
42 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
43 #ifdef HAVE_SCHEDULER_BOOSTCTRL
44 static int boosted_threads IBSS_ATTR;
45 #endif
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS
50 static const char main_thread_name[] = "main";
52 extern int stackbegin[];
53 extern int stackend[];
55 #ifdef CPU_PP
56 #ifndef BOOTLOADER
57 extern int cop_stackbegin[];
58 extern int cop_stackend[];
59 #else
60 /* The coprocessor stack is not set up in the bootloader code, but the threading
61 * is. No threads are run on the coprocessor, so set up some dummy stack */
62 int *cop_stackbegin = stackbegin;
63 int *cop_stackend = stackend;
64 #endif
65 #endif
67 #if NUM_CORES > 1
68 #if 0
69 static long cores_locked IBSS_ATTR;
71 #define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
72 #define UNLOCK(...) cores_locked = 0
73 #endif
75 /* #warning "Core locking mechanism should be fixed on H10/4G!" */
77 inline void lock_cores(void)
79 #if 0
80 if (!cores[CURRENT_CORE].lock_issued)
82 LOCK();
83 cores[CURRENT_CORE].lock_issued = true;
85 #endif
88 inline void unlock_cores(void)
90 #if 0
91 if (cores[CURRENT_CORE].lock_issued)
93 cores[CURRENT_CORE].lock_issued = false;
94 UNLOCK();
96 #endif
99 #endif
101 /* Conserve IRAM
102 static void add_to_list(struct thread_entry **list,
103 struct thread_entry *thread) ICODE_ATTR;
104 static void remove_from_list(struct thread_entry **list,
105 struct thread_entry *thread) ICODE_ATTR;
108 void switch_thread(bool save_context, struct thread_entry **blocked_list)
109 ICODE_ATTR;
111 static inline void store_context(void* addr) __attribute__ ((always_inline));
112 static inline void load_context(const void* addr)
113 __attribute__ ((always_inline));
114 static inline void core_sleep(void) __attribute__((always_inline));
116 #if defined(CPU_ARM)
117 /*---------------------------------------------------------------------------
118 * Store non-volatile context.
119 *---------------------------------------------------------------------------
121 static inline void store_context(void* addr)
123 asm volatile(
124 "stmia %0, { r4-r11, sp, lr }\n"
125 : : "r" (addr)
129 /*---------------------------------------------------------------------------
130 * Load non-volatile context.
131 *---------------------------------------------------------------------------
133 static void start_thread(void (*thread_func)(void), const void* addr) __attribute__((naked,used));
134 static void start_thread(void (*thread_func)(void), const void* addr)
136 /* r0 = thread_func, r1 = addr */
137 #if NUM_CORES > 1 && CONFIG_CPU != PP5002
138 asm volatile (
139 "mov r2, #0 \n"
140 "str r2, [r1, #40] \n"
141 "ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
142 "ldr r2, [r1] \n"
143 "orr r2, r2, #6 \n"
144 "str r2, [r1] \n"
145 "ldr r1, =0x6000c000 \n"
146 "1: \n"
147 "ldr r2, [r1] \n"
148 "tst r2, #0x8000 \n"
149 "bne 1b \n"
150 "mov pc, r0 \n"
151 : : : "r1", "r2"
153 #else
154 asm volatile (
155 "mov r2, #0 \n"
156 "str r2, [r1, #40] \n"
157 "mov pc, r0 \n"
158 : : : "r1", "r2"
160 #endif
161 (void)thread_func;
162 (void)addr;
165 static inline void load_context(const void* addr)
167 asm volatile(
168 "ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */
169 "ldr r0, [%0, #40] \n" /* load start pointer */
170 "cmp r0, #0 \n" /* check for NULL */
171 "movne r1, %0 \n" /* if not already running, jump to start */
172 "ldrne pc, =start_thread \n"
173 : : "r" (addr) : "r0", "r1"
177 #if defined (CPU_PP)
178 static inline void core_sleep(void)
180 unlock_cores();
182 /* This should sleep the CPU. It appears to wake by itself on
183 interrupts */
184 if (CURRENT_CORE == CPU)
185 CPU_CTL = PROC_SLEEP;
186 else
187 COP_CTL = PROC_SLEEP;
189 lock_cores();
191 #elif CONFIG_CPU == S3C2440
192 static inline void core_sleep(void)
194 int i;
195 CLKCON |= (1 << 2); /* set IDLE bit */
196 for(i=0; i<10; i++); /* wait for IDLE */
197 CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
199 #else
200 static inline void core_sleep(void)
204 #endif
206 #elif defined(CPU_COLDFIRE)
207 /*---------------------------------------------------------------------------
208 * Store non-volatile context.
209 *---------------------------------------------------------------------------
211 static inline void store_context(void* addr)
213 asm volatile (
214 "move.l %%macsr,%%d0 \n"
215 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
216 : : "a" (addr) : "d0" /* only! */
220 /*---------------------------------------------------------------------------
221 * Load non-volatile context.
222 *---------------------------------------------------------------------------
224 static inline void load_context(const void* addr)
226 asm volatile (
227 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
228 "move.l %%d0,%%macsr \n"
229 "move.l (52,%0),%%d0 \n" /* Get start address */
230 "beq.b 1f \n" /* NULL -> already running */
231 "clr.l (52,%0) \n" /* Clear start address.. */
232 "move.l %%d0,%0 \n"
233 "jmp (%0) \n" /* ..and start the thread */
234 "1: \n"
235 : : "a" (addr) : "d0" /* only! */
239 static inline void core_sleep(void)
241 asm volatile ("stop #0x2000");
244 /* Set EMAC unit to fractional mode with saturation for each new thread,
245 since that's what'll be the most useful for most things which the dsp
246 will do. Codecs should still initialize their preferred modes
247 explicitly. */
248 #define THREAD_CPU_INIT(core, thread) \
249 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
251 #elif CONFIG_CPU == SH7034
252 /*---------------------------------------------------------------------------
253 * Store non-volatile context.
254 *---------------------------------------------------------------------------
256 static inline void store_context(void* addr)
258 asm volatile (
259 "add #36,%0 \n"
260 "sts.l pr, @-%0 \n"
261 "mov.l r15,@-%0 \n"
262 "mov.l r14,@-%0 \n"
263 "mov.l r13,@-%0 \n"
264 "mov.l r12,@-%0 \n"
265 "mov.l r11,@-%0 \n"
266 "mov.l r10,@-%0 \n"
267 "mov.l r9, @-%0 \n"
268 "mov.l r8, @-%0 \n"
269 : : "r" (addr)
273 /*---------------------------------------------------------------------------
274 * Load non-volatile context.
275 *---------------------------------------------------------------------------
277 static inline void load_context(const void* addr)
279 asm volatile (
280 "mov.l @%0+,r8 \n"
281 "mov.l @%0+,r9 \n"
282 "mov.l @%0+,r10 \n"
283 "mov.l @%0+,r11 \n"
284 "mov.l @%0+,r12 \n"
285 "mov.l @%0+,r13 \n"
286 "mov.l @%0+,r14 \n"
287 "mov.l @%0+,r15 \n"
288 "lds.l @%0+,pr \n"
289 "mov.l @%0,r0 \n" /* Get start address */
290 "tst r0,r0 \n"
291 "bt .running \n" /* NULL -> already running */
292 "lds r0,pr \n"
293 "mov #0,r0 \n"
294 "rts \n" /* Start the thread */
295 "mov.l r0,@%0 \n" /* Clear start address */
296 ".running: \n"
297 : : "r" (addr) : "r0" /* only! */
301 static inline void core_sleep(void)
303 and_b(0x7F, &SBYCR);
304 asm volatile ("sleep");
307 #endif
309 #ifndef THREAD_CPU_INIT
310 /* No cpu specific init - make empty */
311 #define THREAD_CPU_INIT(core, thread)
312 #endif
314 #ifdef THREAD_EXTRA_CHECKS
315 static void thread_panicf_format_name(char *buffer, struct thread_entry *thread)
317 *buffer = '\0';
318 if (thread)
320 /* Display thread name if one or ID if none */
321 const char *fmt = thread->name ? " %s" : " %08lX";
322 intptr_t name = thread->name ?
323 (intptr_t)thread->name : (intptr_t)thread;
324 snprintf(buffer, 16, fmt, name);
328 static void thread_panicf(const char *msg,
329 struct thread_entry *thread1, struct thread_entry *thread2)
331 static char thread1_name[16], thread2_name[16];
332 thread_panicf_format_name(thread1_name, thread1);
333 thread_panicf_format_name(thread2_name, thread2);
334 panicf ("%s%s%s", msg, thread1_name, thread2_name);
336 #else
337 static void thread_stkov(void)
339 /* Display thread name if one or ID if none */
340 struct thread_entry *current = cores[CURRENT_CORE].running;
341 const char *fmt = current->name ? "%s %s" : "%s %08lX";
342 intptr_t name = current->name ?
343 (intptr_t)current->name : (intptr_t)current;
344 panicf(fmt, "Stkov", name);
346 #endif /* THREAD_EXTRA_CHECKS */
348 static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
350 if (*list == NULL)
352 thread->next = thread;
353 thread->prev = thread;
354 *list = thread;
356 else
358 /* Insert last */
359 thread->next = *list;
360 thread->prev = (*list)->prev;
361 thread->prev->next = thread;
362 (*list)->prev = thread;
364 /* Insert next
365 thread->next = (*list)->next;
366 thread->prev = *list;
367 thread->next->prev = thread;
368 (*list)->next = thread;
373 static void remove_from_list(struct thread_entry **list,
374 struct thread_entry *thread)
376 if (list != NULL)
378 if (thread == thread->next)
380 *list = NULL;
381 return;
384 if (thread == *list)
385 *list = thread->next;
388 /* Fix links to jump over the removed entry. */
389 thread->prev->next = thread->next;
390 thread->next->prev = thread->prev;
393 static void check_sleepers(void) __attribute__ ((noinline));
394 static void check_sleepers(void)
396 const unsigned int core = CURRENT_CORE;
397 struct thread_entry *current, *next;
399 /* Check sleeping threads. */
400 current = cores[core].sleeping;
402 for (;;)
404 next = current->next;
406 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
408 /* Sleep timeout has been reached so bring the thread
409 * back to life again. */
410 remove_from_list(&cores[core].sleeping, current);
411 add_to_list(&cores[core].running, current);
412 current->statearg = 0;
414 /* If there is no more processes in the list, break the loop. */
415 if (cores[core].sleeping == NULL)
416 break;
418 current = next;
419 continue;
422 current = next;
424 /* Break the loop once we have walked through the list of all
425 * sleeping processes. */
426 if (current == cores[core].sleeping)
427 break;
431 /* Safely finish waking all threads potentialy woken by interrupts -
432 * statearg already zeroed in wakeup_thread. */
433 static void wake_list_awaken(void) __attribute__ ((noinline));
434 static void wake_list_awaken(void)
436 const unsigned int core = CURRENT_CORE;
437 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
439 /* No need for another check in the IRQ lock since IRQs are allowed
440 only to add threads to the waking list. They won't be adding more
441 until we're done here though. */
443 struct thread_entry *waking = cores[core].waking;
444 struct thread_entry *running = cores[core].running;
446 if (running != NULL)
448 /* Place waking threads at the end of the running list. */
449 struct thread_entry *tmp;
450 waking->prev->next = running;
451 running->prev->next = waking;
452 tmp = running->prev;
453 running->prev = waking->prev;
454 waking->prev = tmp;
456 else
458 /* Just transfer the list as-is - just came out of a core
459 * sleep. */
460 cores[core].running = waking;
463 /* Done with waking list */
464 cores[core].waking = NULL;
465 set_irq_level(oldlevel);
468 static inline void sleep_core(void)
470 const unsigned int core = CURRENT_CORE;
472 for (;;)
474 /* We want to do these ASAP as it may change the decision to sleep
475 the core or the core has woken because an interrupt occurred
476 and posted a message to a queue. */
477 if (cores[core].waking != NULL)
478 wake_list_awaken();
480 if (cores[core].last_tick != current_tick)
482 if (cores[core].sleeping != NULL)
483 check_sleepers();
484 cores[core].last_tick = current_tick;
487 /* We must sleep until there is at least one process in the list
488 * of running processes. */
489 if (cores[core].running != NULL)
490 break;
492 /* Enter sleep mode to reduce power usage, woken up on interrupt */
493 core_sleep();
497 #ifdef RB_PROFILE
498 static int get_threadnum(struct thread_entry *thread)
500 int i;
502 for (i = 0; i < MAXTHREADS; i++)
504 if (&threads[i] == thread)
505 return i;
508 return -1;
511 void profile_thread(void) {
512 profstart(get_threadnum(cores[CURRENT_CORE].running));
514 #endif
516 static void change_thread_state(struct thread_entry **blocked_list) __attribute__ ((noinline));
517 static void change_thread_state(struct thread_entry **blocked_list)
519 const unsigned int core = CURRENT_CORE;
520 struct thread_entry *old;
521 unsigned long new_state;
523 /* Remove the thread from the list of running threads. */
524 old = cores[core].running;
525 new_state = GET_STATE(old->statearg);
527 /* Check if a thread state change has been requested. */
528 if (new_state)
530 /* Change running thread state and switch to next thread. */
531 remove_from_list(&cores[core].running, old);
533 /* And put the thread into a new list of inactive threads. */
534 if (new_state == STATE_BLOCKED)
535 add_to_list(blocked_list, old);
536 else
537 add_to_list(&cores[core].sleeping, old);
539 #ifdef HAVE_PRIORITY_SCHEDULING
540 /* Reset priorities */
541 if (old->priority == cores[core].highest_priority)
542 cores[core].highest_priority = 100;
543 #endif
545 else
546 /* Switch to the next running thread. */
547 cores[core].running = old->next;
550 /*---------------------------------------------------------------------------
551 * Switch thread in round robin fashion.
552 *---------------------------------------------------------------------------
554 void switch_thread(bool save_context, struct thread_entry **blocked_list)
556 const unsigned int core = CURRENT_CORE;
558 #ifdef RB_PROFILE
559 profile_thread_stopped(get_threadnum(cores[core].running));
560 #endif
561 unsigned int *stackptr;
563 #ifdef SIMULATOR
564 /* Do nothing */
565 #else
567 lock_cores();
569 /* Begin task switching by saving our current context so that we can
570 * restore the state of the current thread later to the point prior
571 * to this call. */
572 if (save_context)
574 store_context(&cores[core].running->context);
576 /* Check if the current thread stack is overflown */
577 stackptr = cores[core].running->stack;
578 if(stackptr[0] != DEADBEEF)
579 #ifdef THREAD_EXTRA_CHECKS
580 thread_panicf("Stkov", cores[core].running, NULL);
581 #else
582 thread_stkov();
583 #endif
585 /* Rearrange thread lists as needed */
586 change_thread_state(blocked_list);
588 /* This has to be done after the scheduler is finished with the
589 blocked_list pointer so that an IRQ can't kill us by attempting
590 a wake but before attempting any core sleep. */
591 if (cores[core].switch_to_irq_level != STAY_IRQ_LEVEL)
593 int level = cores[core].switch_to_irq_level;
594 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
595 set_irq_level(level);
599 /* Go through the list of sleeping task to check if we need to wake up
600 * any of them due to timeout. Also puts core into sleep state until
601 * there is at least one running process again. */
602 sleep_core();
604 #ifdef HAVE_PRIORITY_SCHEDULING
605 /* Select the new task based on priorities and the last time a process
606 * got CPU time. */
607 for (;;)
609 int priority = cores[core].running->priority;
611 if (priority < cores[core].highest_priority)
612 cores[core].highest_priority = priority;
614 if (priority == cores[core].highest_priority ||
615 (current_tick - cores[core].running->last_run >
616 priority * 8) ||
617 cores[core].running->priority_x != 0)
619 break;
622 cores[core].running = cores[core].running->next;
625 /* Reset the value of thread's last running time to the current time. */
626 cores[core].running->last_run = current_tick;
627 #endif
629 #endif
630 unlock_cores();
632 /* And finally give control to the next thread. */
633 load_context(&cores[core].running->context);
635 #ifdef RB_PROFILE
636 profile_thread_started(get_threadnum(cores[core].running));
637 #endif
640 void sleep_thread(int ticks)
642 struct thread_entry *current;
644 lock_cores();
646 current = cores[CURRENT_CORE].running;
648 #ifdef HAVE_SCHEDULER_BOOSTCTRL
649 if (STATE_IS_BOOSTED(current->statearg))
651 boosted_threads--;
652 if (!boosted_threads)
654 cpu_boost(false);
657 #endif
659 /* Set the thread's new state and timeout and finally force a task switch
660 * so that scheduler removes thread from the list of running processes
661 * and puts it in list of sleeping tasks. */
662 SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
664 switch_thread(true, NULL);
667 void block_thread(struct thread_entry **list)
669 struct thread_entry *current;
671 lock_cores();
673 /* Get the entry for the current running thread. */
674 current = cores[CURRENT_CORE].running;
676 #ifdef HAVE_SCHEDULER_BOOSTCTRL
677 /* Keep the boosted state over indefinite block calls, because
678 * we are waiting until the earliest time that someone else
679 * completes an action */
680 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
681 #endif
683 #ifdef THREAD_EXTRA_CHECKS
684 /* We are not allowed to mix blocking types in one queue. */
685 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
686 thread_panicf("Blocking violation B->*T", current, *list);
687 #endif
689 /* Set the state to blocked and ask the scheduler to switch tasks,
690 * this takes us off of the run queue until we are explicitly woken */
691 SET_STATE(current->statearg, STATE_BLOCKED, 0);
693 switch_thread(true, list);
695 #ifdef HAVE_SCHEDULER_BOOSTCTRL
696 /* Reset only the boosted flag to indicate we are up and running again. */
697 current->statearg = boost_flag;
698 #else
699 /* Clear all flags to indicate we are up and running again. */
700 current->statearg = 0;
701 #endif
704 void block_thread_w_tmo(struct thread_entry **list, int timeout)
706 struct thread_entry *current;
707 /* Get the entry for the current running thread. */
708 current = cores[CURRENT_CORE].running;
710 lock_cores();
711 #ifdef HAVE_SCHEDULER_BOOSTCTRL
712 /* A block with a timeout is a sleep situation, whatever we are waiting
713 * for _may or may not_ happen, regardless of boost state, (user input
714 * for instance), so this thread no longer needs to boost */
715 if (STATE_IS_BOOSTED(current->statearg))
717 boosted_threads--;
718 if (!boosted_threads)
720 cpu_boost(false);
723 #endif
725 #ifdef THREAD_EXTRA_CHECKS
726 /* We can store only one thread to the "list" if thread is used
727 * in other list (such as core's list for sleeping tasks). */
728 if (*list)
729 thread_panicf("Blocking violation T->*B", current, NULL);
730 #endif
732 /* Set the state to blocked with the specified timeout */
733 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
735 /* Set the "list" for explicit wakeup */
736 *list = current;
738 /* Now force a task switch and block until we have been woken up
739 * by another thread or timeout is reached. */
740 switch_thread(true, NULL);
742 /* It is now safe for another thread to block on this "list" */
743 *list = NULL;
746 #if !defined(SIMULATOR)
747 void set_irq_level_and_block_thread(struct thread_entry **list, int level)
749 cores[CURRENT_CORE].switch_to_irq_level = level;
750 block_thread(list);
753 void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
754 int timeout, int level)
756 cores[CURRENT_CORE].switch_to_irq_level = level;
757 block_thread_w_tmo(list, timeout);
759 #endif
761 void wakeup_thread(struct thread_entry **list)
763 struct thread_entry *thread;
765 /* Check if there is a blocked thread at all. */
766 if (*list == NULL)
768 return ;
771 /* Wake up the last thread first. */
772 thread = *list;
774 /* Determine thread's current state. */
775 switch (GET_STATE(thread->statearg))
777 case STATE_BLOCKED:
778 /* Remove thread from the list of blocked threads and add it
779 * to the scheduler's list of running processes. List removal
780 * is safe since each object maintains it's own list of
781 * sleepers and queues protect against reentrancy. */
782 remove_from_list(list, thread);
783 add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread);
785 case STATE_BLOCKED_W_TMO:
786 /* Just remove the timeout to cause scheduler to immediately
787 * wake up the thread. */
788 thread->statearg = 0;
789 break;
791 default:
792 /* Nothing to do. Thread has already been woken up
793 * or it's state is not blocked or blocked with timeout. */
794 return ;
798 inline static int find_empty_thread_slot(void)
800 int n;
802 for (n = 0; n < MAXTHREADS; n++)
804 if (threads[n].name == NULL)
805 return n;
808 return -1;
811 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
812 before calling. */
813 void wakeup_thread_irq_safe(struct thread_entry **list)
815 struct core_entry *core = &cores[CURRENT_CORE];
816 /* Switch wakeup lists and call wakeup_thread */
817 core->wakeup_list = &core->waking;
818 wakeup_thread(list);
819 /* Switch back to normal running list */
820 core->wakeup_list = &core->running;
823 /*---------------------------------------------------------------------------
824 * Create a thread
825 * If using a dual core architecture, specify which core to start the thread
826 * on, and whether to fall back to the other core if it can't be created
827 * Return ID if context area could be allocated, else NULL.
828 *---------------------------------------------------------------------------
830 struct thread_entry*
831 create_thread(void (*function)(void), void* stack, int stack_size,
832 const char *name IF_PRIO(, int priority)
833 IF_COP(, unsigned int core, bool fallback))
835 unsigned int i;
836 unsigned int stacklen;
837 unsigned int *stackptr;
838 int slot;
839 struct regs *regs;
840 struct thread_entry *thread;
842 /*****
843 * Ugly code alert!
844 * To prevent ifdef hell while keeping the binary size down, we define
845 * core here if it hasn't been passed as a parameter
846 *****/
847 #if NUM_CORES == 1
848 #define core CPU
849 #endif
851 #if NUM_CORES > 1
852 /* If the kernel hasn't initialised on the COP (most likely due to an old
853 * bootloader) then refuse to start threads on the COP
855 if ((core == COP) && !cores[core].kernel_running)
857 if (fallback)
858 return create_thread(function, stack, stack_size, name
859 IF_PRIO(, priority) IF_COP(, CPU, false));
860 else
861 return NULL;
863 #endif
865 lock_cores();
867 slot = find_empty_thread_slot();
868 if (slot < 0)
870 unlock_cores();
871 return NULL;
874 /* Munge the stack to make it easy to spot stack overflows */
875 stacklen = stack_size / sizeof(int);
876 stackptr = stack;
877 for(i = 0;i < stacklen;i++)
879 stackptr[i] = DEADBEEF;
882 /* Store interesting information */
883 thread = &threads[slot];
884 thread->name = name;
885 thread->stack = stack;
886 thread->stack_size = stack_size;
887 thread->statearg = 0;
888 #ifdef HAVE_PRIORITY_SCHEDULING
889 thread->priority_x = 0;
890 thread->priority = priority;
891 cores[core].highest_priority = 100;
892 #endif
894 #if NUM_CORES > 1
895 thread->core = core;
897 /* Writeback stack munging or anything else before starting */
898 if (core != CURRENT_CORE)
899 flush_icache();
900 #endif
902 regs = &thread->context;
903 /* Align stack to an even 32 bit boundary */
904 regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
905 regs->start = (void*)function;
907 /* Do any CPU specific inits after initializing common items
908 to have access to valid data */
909 THREAD_CPU_INIT(core, thread);
911 add_to_list(&cores[core].running, thread);
912 unlock_cores();
914 return thread;
915 #if NUM_CORES == 1
916 #undef core
917 #endif
920 #ifdef HAVE_SCHEDULER_BOOSTCTRL
921 void trigger_cpu_boost(void)
923 lock_cores();
925 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
927 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
928 if (!boosted_threads)
930 cpu_boost(true);
932 boosted_threads++;
935 unlock_cores();
937 #endif
939 /*---------------------------------------------------------------------------
940 * Remove a thread on the current core from the scheduler.
941 * Parameter is the ID as returned from create_thread().
942 *---------------------------------------------------------------------------
944 void remove_thread(struct thread_entry *thread)
946 lock_cores();
948 if (thread == NULL)
949 thread = cores[CURRENT_CORE].running;
951 /* Free the entry by removing thread name. */
952 thread->name = NULL;
953 #ifdef HAVE_PRIORITY_SCHEDULING
954 cores[IF_COP2(thread->core)].highest_priority = 100;
955 #endif
957 if (thread == cores[IF_COP2(thread->core)].running)
959 remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
960 switch_thread(false, NULL);
961 return ;
964 if (thread == cores[IF_COP2(thread->core)].sleeping)
965 remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
966 else
967 remove_from_list(NULL, thread);
969 unlock_cores();
972 #ifdef HAVE_PRIORITY_SCHEDULING
973 int thread_set_priority(struct thread_entry *thread, int priority)
975 int old_priority;
977 lock_cores();
978 if (thread == NULL)
979 thread = cores[CURRENT_CORE].running;
981 old_priority = thread->priority;
982 thread->priority = priority;
983 cores[IF_COP2(thread->core)].highest_priority = 100;
984 unlock_cores();
986 return old_priority;
989 int thread_get_priority(struct thread_entry *thread)
991 if (thread == NULL)
992 thread = cores[CURRENT_CORE].running;
994 return thread->priority;
997 void priority_yield(void)
999 struct thread_entry *thread = cores[CURRENT_CORE].running;
1000 thread->priority_x = 1;
1001 switch_thread(true, NULL);
1002 thread->priority_x = 0;
1004 #endif /* HAVE_PRIORITY_SCHEDULING */
1006 struct thread_entry * thread_get_current(void)
1008 return cores[CURRENT_CORE].running;
1011 void init_threads(void)
1013 const unsigned int core = CURRENT_CORE;
1014 int slot;
1016 /* Let main CPU initialize first. */
1017 #if NUM_CORES > 1
1018 if (core != CPU)
1020 while (!cores[CPU].kernel_running) ;
1022 #endif
1024 lock_cores();
1025 slot = find_empty_thread_slot();
1027 cores[core].sleeping = NULL;
1028 cores[core].running = NULL;
1029 cores[core].waking = NULL;
1030 cores[core].wakeup_list = &cores[core].running;
1031 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
1032 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
1033 #endif
1034 threads[slot].name = main_thread_name;
1035 threads[slot].statearg = 0;
1036 threads[slot].context.start = 0; /* core's main thread already running */
1037 #if NUM_CORES > 1
1038 threads[slot].core = core;
1039 #endif
1040 #ifdef HAVE_PRIORITY_SCHEDULING
1041 threads[slot].priority = PRIORITY_USER_INTERFACE;
1042 threads[slot].priority_x = 0;
1043 cores[core].highest_priority = 100;
1044 #endif
1045 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1046 boosted_threads = 0;
1047 #endif
1048 add_to_list(&cores[core].running, &threads[slot]);
1050 /* In multiple core setups, each core has a different stack. There is
1051 * probably a much better way to do this. */
1052 if (core == CPU)
1054 threads[slot].stack = stackbegin;
1055 threads[slot].stack_size = (int)stackend - (int)stackbegin;
1057 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
1058 else
1060 threads[slot].stack = cop_stackbegin;
1061 threads[slot].stack_size =
1062 (int)cop_stackend - (int)cop_stackbegin;
1065 cores[core].kernel_running = true;
1066 #endif
1068 unlock_cores();
1071 int thread_stack_usage(const struct thread_entry *thread)
1073 unsigned int i;
1074 unsigned int *stackptr = thread->stack;
1076 for (i = 0;i < thread->stack_size/sizeof(int);i++)
1078 if (stackptr[i] != DEADBEEF)
1079 break;
1082 return ((thread->stack_size - i * sizeof(int)) * 100) /
1083 thread->stack_size;
1086 int thread_get_status(const struct thread_entry *thread)
1088 return GET_STATE(thread->statearg);