Do the clamping a little better.
[kugel-rb.git] / firmware / thread.c
blob9d9e0a648eff40344f3f3568339087c5bf80995d
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include "config.h"
20 #include <stdbool.h>
21 #include "thread.h"
22 #include "panic.h"
23 #include "system.h"
24 #include "kernel.h"
25 #include "cpu.h"
26 #include "string.h"
27 #ifdef RB_PROFILE
28 #include <profile.h>
29 #endif
31 #if NUM_CORES > 1
32 # define IF_COP2(x) x
33 #else
34 # define IF_COP2(x) CURRENT_CORE
35 #endif
37 #define DEADBEEF ((unsigned int)0xdeadbeef)
38 /* Cast to the the machine int type, whose size could be < 4. */
40 struct core_entry cores[NUM_CORES] IBSS_ATTR;
41 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
42 #ifdef HAVE_SCHEDULER_BOOSTCTRL
43 static int boosted_threads IBSS_ATTR;
44 #endif
46 /* Define to enable additional checks for blocking violations etc. */
47 #define THREAD_EXTRA_CHECKS
49 static const char main_thread_name[] = "main";
51 extern int stackbegin[];
52 extern int stackend[];
54 #ifdef CPU_PP
55 #ifndef BOOTLOADER
56 extern int cop_stackbegin[];
57 extern int cop_stackend[];
58 #else
59 /* The coprocessor stack is not set up in the bootloader code, but the threading
60 * is. No threads are run on the coprocessor, so set up some dummy stack */
61 int *cop_stackbegin = stackbegin;
62 int *cop_stackend = stackend;
63 #endif
64 #endif
66 #if NUM_CORES > 1
67 #if 0
68 static long cores_locked IBSS_ATTR;
70 #define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
71 #define UNLOCK(...) cores_locked = 0
72 #endif
74 #warning "Core locking mechanism should be fixed on H10/4G!"
76 inline void lock_cores(void)
78 #if 0
79 if (!cores[CURRENT_CORE].lock_issued)
81 LOCK();
82 cores[CURRENT_CORE].lock_issued = true;
84 #endif
87 inline void unlock_cores(void)
89 #if 0
90 if (cores[CURRENT_CORE].lock_issued)
92 cores[CURRENT_CORE].lock_issued = false;
93 UNLOCK();
95 #endif
98 #endif
100 /* Conserve IRAM
101 static void add_to_list(struct thread_entry **list,
102 struct thread_entry *thread) ICODE_ATTR;
103 static void remove_from_list(struct thread_entry **list,
104 struct thread_entry *thread) ICODE_ATTR;
107 void switch_thread(bool save_context, struct thread_entry **blocked_list)
108 ICODE_ATTR;
110 static inline void store_context(void* addr) __attribute__ ((always_inline));
111 static inline void load_context(const void* addr)
112 __attribute__ ((always_inline));
114 #if defined(CPU_ARM)
115 /*---------------------------------------------------------------------------
116 * Store non-volatile context.
117 *---------------------------------------------------------------------------
119 static inline void store_context(void* addr)
121 asm volatile(
122 "stmia %0, { r4-r11, sp, lr }\n"
123 : : "r" (addr)
127 /*---------------------------------------------------------------------------
128 * Load non-volatile context.
129 *---------------------------------------------------------------------------
131 static void start_thread(void (*thread_func)(void), const void* addr) __attribute__((naked));
132 static void start_thread(void (*thread_func)(void), const void* addr)
134 /* r0 = thread_func, r1 = addr */
135 #if NUM_CORES > 1 && CONFIG_CPU != PP5002
136 asm volatile (
137 "mov r2, #0 \n"
138 "str r2, [r1, #40] \n"
139 "ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
140 "ldr r2, [r1] \n"
141 "orr r2, r2, #6 \n"
142 "str r2, [r1] \n"
143 "ldr r1, =0x6000c000 \n"
144 "1: \n"
145 "ldr r2, [r1] \n"
146 "tst r2, #0x8000 \n"
147 "bne 1b \n"
148 "mov pc, r0 \n"
149 : : : "r1", "r2"
151 #else
152 asm volatile (
153 "mov r2, #0 \n"
154 "str r2, [r1, #40] \n"
155 "mov pc, r0 \n"
156 : : : "r1", "r2"
158 #endif
159 (void)thread_func;
160 (void)addr;
161 (void)start_thread;
164 static inline void load_context(const void* addr)
166 asm volatile(
167 "ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */
168 "ldr r0, [%0, #40] \n" /* load start pointer */
169 "cmp r0, #0 \n" /* check for NULL */
170 "movne r1, %0 \n" /* if not already running, jump to start */
171 "ldrne pc, =start_thread \n"
172 : : "r" (addr) : "r0", "r1"
176 #elif defined(CPU_COLDFIRE)
177 /*---------------------------------------------------------------------------
178 * Store non-volatile context.
179 *---------------------------------------------------------------------------
181 static inline void store_context(void* addr)
183 asm volatile (
184 "move.l %%macsr,%%d0 \n"
185 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
186 : : "a" (addr) : "d0" /* only! */
190 /*---------------------------------------------------------------------------
191 * Load non-volatile context.
192 *---------------------------------------------------------------------------
194 static inline void load_context(const void* addr)
196 asm volatile (
197 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
198 "move.l %%d0,%%macsr \n"
199 "move.l (52,%0),%%d0 \n" /* Get start address */
200 "beq.b 1f \n" /* NULL -> already running */
201 "clr.l (52,%0) \n" /* Clear start address.. */
202 "move.l %%d0,%0 \n"
203 "jmp (%0) \n" /* ..and start the thread */
204 "1: \n"
205 : : "a" (addr) : "d0" /* only! */
209 /* Set EMAC unit to fractional mode with saturation for each new thread,
210 since that's what'll be the most useful for most things which the dsp
211 will do. Codecs should still initialize their preferred modes
212 explicitly. */
213 #define THREAD_CPU_INIT(core, thread) \
214 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
216 #elif CONFIG_CPU == SH7034
217 /*---------------------------------------------------------------------------
218 * Store non-volatile context.
219 *---------------------------------------------------------------------------
221 static inline void store_context(void* addr)
223 asm volatile (
224 "add #36,%0 \n"
225 "sts.l pr, @-%0 \n"
226 "mov.l r15,@-%0 \n"
227 "mov.l r14,@-%0 \n"
228 "mov.l r13,@-%0 \n"
229 "mov.l r12,@-%0 \n"
230 "mov.l r11,@-%0 \n"
231 "mov.l r10,@-%0 \n"
232 "mov.l r9, @-%0 \n"
233 "mov.l r8, @-%0 \n"
234 : : "r" (addr)
238 /*---------------------------------------------------------------------------
239 * Load non-volatile context.
240 *---------------------------------------------------------------------------
242 static inline void load_context(const void* addr)
244 asm volatile (
245 "mov.l @%0+,r8 \n"
246 "mov.l @%0+,r9 \n"
247 "mov.l @%0+,r10 \n"
248 "mov.l @%0+,r11 \n"
249 "mov.l @%0+,r12 \n"
250 "mov.l @%0+,r13 \n"
251 "mov.l @%0+,r14 \n"
252 "mov.l @%0+,r15 \n"
253 "lds.l @%0+,pr \n"
254 "mov.l @%0,r0 \n" /* Get start address */
255 "tst r0,r0 \n"
256 "bt .running \n" /* NULL -> already running */
257 "lds r0,pr \n"
258 "mov #0,r0 \n"
259 "rts \n" /* Start the thread */
260 "mov.l r0,@%0 \n" /* Clear start address */
261 ".running: \n"
262 : : "r" (addr) : "r0" /* only! */
266 #endif
268 #ifndef THREAD_CPU_INIT
269 /* No cpu specific init - make empty */
270 #define THREAD_CPU_INIT(core, thread)
271 #endif
273 static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
275 if (*list == NULL)
277 thread->next = thread;
278 thread->prev = thread;
279 *list = thread;
281 else
283 /* Insert last */
284 thread->next = *list;
285 thread->prev = (*list)->prev;
286 thread->prev->next = thread;
287 (*list)->prev = thread;
289 /* Insert next
290 thread->next = (*list)->next;
291 thread->prev = *list;
292 thread->next->prev = thread;
293 (*list)->next = thread;
298 static void remove_from_list(struct thread_entry **list,
299 struct thread_entry *thread)
301 if (list != NULL)
303 if (thread == thread->next)
305 *list = NULL;
306 return;
309 if (thread == *list)
310 *list = thread->next;
313 /* Fix links to jump over the removed entry. */
314 thread->prev->next = thread->next;
315 thread->next->prev = thread->prev;
318 /* Compiler trick: Don't declare as static to prevent putting
319 * function in IRAM. */
320 void check_sleepers(void)
322 struct thread_entry *current, *next;
324 /* Check sleeping threads. */
325 current = cores[CURRENT_CORE].sleeping;
326 if (current == NULL)
327 return ;
329 for (;;)
331 next = current->next;
333 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
335 /* Sleep timeout has been reached so bring the thread
336 * back to life again. */
337 remove_from_list(&cores[CURRENT_CORE].sleeping, current);
338 add_to_list(&cores[CURRENT_CORE].running, current);
339 current->statearg = 0;
341 /* If there is no more processes in the list, break the loop. */
342 if (cores[CURRENT_CORE].sleeping == NULL)
343 break;
345 current = next;
346 continue;
349 current = next;
351 /* Break the loop once we have walked through the list of all
352 * sleeping processes. */
353 if (current == cores[CURRENT_CORE].sleeping)
354 break;
358 /* Safely finish waking all threads potentialy woken by interrupts -
359 * statearg already zeroed in wakeup_thread. */
360 static void wake_list_awaken(void)
362 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
364 /* No need for another check in the IRQ lock since IRQs are allowed
365 only to add threads to the waking list. They won't be adding more
366 until we're done here though. */
368 struct thread_entry *waking = cores[CURRENT_CORE].waking;
369 struct thread_entry *running = cores[CURRENT_CORE].running;
371 if (running != NULL)
373 /* Place waking threads at the end of the running list. */
374 struct thread_entry *tmp;
375 waking->prev->next = running;
376 running->prev->next = waking;
377 tmp = running->prev;
378 running->prev = waking->prev;
379 waking->prev = tmp;
381 else
383 /* Just transfer the list as-is - just came out of a core
384 * sleep. */
385 cores[CURRENT_CORE].running = waking;
388 /* Done with waking list */
389 cores[CURRENT_CORE].waking = NULL;
390 set_irq_level(oldlevel);
393 static inline void sleep_core(void)
395 #if CONFIG_CPU == S3C2440
396 int i;
397 #endif
399 for (;;)
401 /* We want to do these ASAP as it may change the decision to sleep
402 the core or the core has woken because an interrupt occurred
403 and posted a message to a queue. */
404 if (cores[CURRENT_CORE].waking != NULL)
405 wake_list_awaken();
407 if (cores[CURRENT_CORE].last_tick != current_tick)
409 check_sleepers();
410 cores[CURRENT_CORE].last_tick = current_tick;
413 /* We must sleep until there is at least one process in the list
414 * of running processes. */
415 if (cores[CURRENT_CORE].running != NULL)
416 break;
418 /* Enter sleep mode to reduce power usage, woken up on interrupt */
419 #ifdef CPU_COLDFIRE
420 asm volatile ("stop #0x2000");
421 #elif CONFIG_CPU == SH7034
422 and_b(0x7F, &SBYCR);
423 asm volatile ("sleep");
424 #elif defined (CPU_PP)
425 unlock_cores();
427 /* This should sleep the CPU. It appears to wake by itself on
428 interrupts */
429 if (CURRENT_CORE == CPU)
430 CPU_CTL = PROC_SLEEP;
431 else
432 COP_CTL = PROC_SLEEP;
434 lock_cores();
435 #elif CONFIG_CPU == S3C2440
436 CLKCON |= (1 << 2); /* set IDLE bit */
437 for(i=0; i<10; i++); /* wait for IDLE */
438 CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
439 #endif
444 #ifdef RB_PROFILE
445 static int get_threadnum(struct thread_entry *thread)
447 int i;
449 for (i = 0; i < MAXTHREADS; i++)
451 if (&threads[i] == thread)
452 return i;
455 return -1;
458 void profile_thread(void) {
459 profstart(get_threadnum(cores[CURRENT_CORE].running));
461 #endif
463 /* Compiler trick: Don't declare as static to prevent putting
464 * function in IRAM. */
465 void change_thread_state(struct thread_entry **blocked_list)
467 struct thread_entry *old;
468 unsigned long new_state;
470 /* Remove the thread from the list of running threads. */
471 old = cores[CURRENT_CORE].running;
472 new_state = GET_STATE(old->statearg);
474 /* Check if a thread state change has been requested. */
475 if (new_state)
477 /* Change running thread state and switch to next thread. */
478 remove_from_list(&cores[CURRENT_CORE].running, old);
480 /* And put the thread into a new list of inactive threads. */
481 if (new_state == STATE_BLOCKED)
482 add_to_list(blocked_list, old);
483 else
484 add_to_list(&cores[CURRENT_CORE].sleeping, old);
486 #ifdef HAVE_PRIORITY_SCHEDULING
487 /* Reset priorities */
488 if (old->priority == cores[CURRENT_CORE].highest_priority)
489 cores[CURRENT_CORE].highest_priority = 100;
490 #endif
492 else
493 /* Switch to the next running thread. */
494 cores[CURRENT_CORE].running = old->next;
497 /*---------------------------------------------------------------------------
498 * Switch thread in round robin fashion.
499 *---------------------------------------------------------------------------
501 void switch_thread(bool save_context, struct thread_entry **blocked_list)
503 #ifdef RB_PROFILE
504 profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running));
505 #endif
506 unsigned int *stackptr;
508 #ifdef SIMULATOR
509 /* Do nothing */
510 #else
512 lock_cores();
514 /* Begin task switching by saving our current context so that we can
515 * restore the state of the current thread later to the point prior
516 * to this call. */
517 if (save_context)
519 store_context(&cores[CURRENT_CORE].running->context);
521 /* Check if the current thread stack is overflown */
522 stackptr = cores[CURRENT_CORE].running->stack;
523 if(stackptr[0] != DEADBEEF)
524 panicf("Stkov %s", cores[CURRENT_CORE].running->name);
526 /* Rearrange thread lists as needed */
527 change_thread_state(blocked_list);
529 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
530 /* This has to be done after the scheduler is finished with the
531 blocked_list pointer so that an IRQ can't kill us by attempting
532 a wake but before attempting any core sleep. */
533 if (cores[CURRENT_CORE].switch_to_irq_level != STAY_IRQ_LEVEL)
535 int level = cores[CURRENT_CORE].switch_to_irq_level;
536 cores[CURRENT_CORE].switch_to_irq_level = STAY_IRQ_LEVEL;
537 set_irq_level(level);
539 #endif
542 /* Go through the list of sleeping task to check if we need to wake up
543 * any of them due to timeout. Also puts core into sleep state until
544 * there is at least one running process again. */
545 sleep_core();
547 #ifdef HAVE_PRIORITY_SCHEDULING
548 /* Select the new task based on priorities and the last time a process
549 * got CPU time. */
550 for (;;)
552 int priority = cores[CURRENT_CORE].running->priority;
554 if (priority < cores[CURRENT_CORE].highest_priority)
555 cores[CURRENT_CORE].highest_priority = priority;
557 if (priority == cores[CURRENT_CORE].highest_priority ||
558 (current_tick - cores[CURRENT_CORE].running->last_run >
559 priority * 8) ||
560 cores[CURRENT_CORE].running->priority_x != 0)
562 break;
565 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
568 /* Reset the value of thread's last running time to the current time. */
569 cores[CURRENT_CORE].running->last_run = current_tick;
570 #endif
572 #endif
573 unlock_cores();
575 /* And finally give control to the next thread. */
576 load_context(&cores[CURRENT_CORE].running->context);
578 #ifdef RB_PROFILE
579 profile_thread_started(get_threadnum(cores[CURRENT_CORE].running));
580 #endif
583 void sleep_thread(int ticks)
585 struct thread_entry *current;
587 lock_cores();
589 current = cores[CURRENT_CORE].running;
591 #ifdef HAVE_SCHEDULER_BOOSTCTRL
592 if (STATE_IS_BOOSTED(current->statearg))
594 boosted_threads--;
595 if (!boosted_threads)
597 cpu_boost(false);
600 #endif
602 /* Set the thread's new state and timeout and finally force a task switch
603 * so that scheduler removes thread from the list of running processes
604 * and puts it in list of sleeping tasks. */
605 SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
607 switch_thread(true, NULL);
610 void block_thread(struct thread_entry **list)
612 struct thread_entry *current;
614 lock_cores();
616 /* Get the entry for the current running thread. */
617 current = cores[CURRENT_CORE].running;
619 #ifdef HAVE_SCHEDULER_BOOSTCTRL
620 /* Keep the boosted state over indefinite block calls, because
621 * we are waiting until the earliest time that someone else
622 * completes an action */
623 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
624 #endif
626 #ifdef THREAD_EXTRA_CHECKS
627 /* We are not allowed to mix blocking types in one queue. */
628 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
629 panicf("Blocking violation B->*T");
630 #endif
632 /* Set the state to blocked and ask the scheduler to switch tasks,
633 * this takes us off of the run queue until we are explicitly woken */
634 SET_STATE(current->statearg, STATE_BLOCKED, 0);
636 switch_thread(true, list);
638 #ifdef HAVE_SCHEDULER_BOOSTCTRL
639 /* Reset only the boosted flag to indicate we are up and running again. */
640 current->statearg = boost_flag;
641 #else
642 /* Clear all flags to indicate we are up and running again. */
643 current->statearg = 0;
644 #endif
647 void block_thread_w_tmo(struct thread_entry **list, int timeout)
649 struct thread_entry *current;
650 /* Get the entry for the current running thread. */
651 current = cores[CURRENT_CORE].running;
653 lock_cores();
654 #ifdef HAVE_SCHEDULER_BOOSTCTRL
655 /* A block with a timeout is a sleep situation, whatever we are waiting
656 * for _may or may not_ happen, regardless of boost state, (user input
657 * for instance), so this thread no longer needs to boost */
658 if (STATE_IS_BOOSTED(current->statearg))
660 boosted_threads--;
661 if (!boosted_threads)
663 cpu_boost(false);
666 #endif
668 #ifdef THREAD_EXTRA_CHECKS
669 /* We can store only one thread to the "list" if thread is used
670 * in other list (such as core's list for sleeping tasks). */
671 if (*list)
672 panicf("Blocking violation T->*B");
673 #endif
675 /* Set the state to blocked with the specified timeout */
676 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
678 /* Set the "list" for explicit wakeup */
679 *list = current;
681 /* Now force a task switch and block until we have been woken up
682 * by another thread or timeout is reached. */
683 switch_thread(true, NULL);
685 /* It is now safe for another thread to block on this "list" */
686 *list = NULL;
689 #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) && !defined(SIMULATOR)
690 void set_irq_level_and_block_thread(struct thread_entry **list, int level)
692 cores[CURRENT_CORE].switch_to_irq_level = level;
693 block_thread(list);
696 #if 0
697 void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
698 int timeout, int level)
700 cores[CURRENT_CORE].switch_to_irq_level = level;
701 block_thread_w_tmo(list, timeout);
703 #endif
704 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
706 void wakeup_thread(struct thread_entry **list)
708 struct thread_entry *thread;
710 /* Check if there is a blocked thread at all. */
711 if (*list == NULL)
713 return ;
716 /* Wake up the last thread first. */
717 thread = *list;
719 /* Determine thread's current state. */
720 switch (GET_STATE(thread->statearg))
722 case STATE_BLOCKED:
723 /* Remove thread from the list of blocked threads and add it
724 * to the scheduler's list of running processes. List removal
725 * is safe since each object maintains it's own list of
726 * sleepers and queues protect against reentrancy. */
727 remove_from_list(list, thread);
728 add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread);
730 case STATE_BLOCKED_W_TMO:
731 /* Just remove the timeout to cause scheduler to immediately
732 * wake up the thread. */
733 thread->statearg = 0;
734 break;
736 default:
737 /* Nothing to do. Thread has already been woken up
738 * or it's state is not blocked or blocked with timeout. */
739 return ;
743 inline static int find_empty_thread_slot(void)
745 int n;
747 for (n = 0; n < MAXTHREADS; n++)
749 if (threads[n].name == NULL)
750 return n;
753 return -1;
756 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
757 before calling. */
758 void wakeup_thread_irq_safe(struct thread_entry **list)
760 struct core_entry *core = &cores[CURRENT_CORE];
761 /* Switch wakeup lists and call wakeup_thread */
762 core->wakeup_list = &core->waking;
763 wakeup_thread(list);
764 /* Switch back to normal running list */
765 core->wakeup_list = &core->running;
768 /*---------------------------------------------------------------------------
769 * Create a thread
770 * If using a dual core architecture, specify which core to start the thread
771 * on, and whether to fall back to the other core if it can't be created
772 * Return ID if context area could be allocated, else NULL.
773 *---------------------------------------------------------------------------
775 struct thread_entry*
776 create_thread(void (*function)(void), void* stack, int stack_size,
777 const char *name IF_PRIO(, int priority)
778 IF_COP(, unsigned int core, bool fallback))
780 unsigned int i;
781 unsigned int stacklen;
782 unsigned int *stackptr;
783 int slot;
784 struct regs *regs;
785 struct thread_entry *thread;
787 /*****
788 * Ugly code alert!
789 * To prevent ifdef hell while keeping the binary size down, we define
790 * core here if it hasn't been passed as a parameter
791 *****/
792 #if NUM_CORES == 1
793 #define core CPU
794 #endif
796 #if NUM_CORES > 1
797 /* If the kernel hasn't initialised on the COP (most likely due to an old
798 * bootloader) then refuse to start threads on the COP
800 if ((core == COP) && !cores[core].kernel_running)
802 if (fallback)
803 return create_thread(function, stack, stack_size, name
804 IF_PRIO(, priority) IF_COP(, CPU, false));
805 else
806 return NULL;
808 #endif
810 lock_cores();
812 slot = find_empty_thread_slot();
813 if (slot < 0)
815 unlock_cores();
816 return NULL;
819 /* Munge the stack to make it easy to spot stack overflows */
820 stacklen = stack_size / sizeof(int);
821 stackptr = stack;
822 for(i = 0;i < stacklen;i++)
824 stackptr[i] = DEADBEEF;
827 /* Store interesting information */
828 thread = &threads[slot];
829 thread->name = name;
830 thread->stack = stack;
831 thread->stack_size = stack_size;
832 thread->statearg = 0;
833 #ifdef HAVE_PRIORITY_SCHEDULING
834 thread->priority_x = 0;
835 thread->priority = priority;
836 cores[core].highest_priority = 100;
837 #endif
839 #if NUM_CORES > 1
840 thread->core = core;
841 #endif
843 regs = &thread->context;
844 /* Align stack to an even 32 bit boundary */
845 regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
846 regs->start = (void*)function;
848 /* Do any CPU specific inits after initializing common items
849 to have access to valid data */
850 THREAD_CPU_INIT(core, thread);
852 add_to_list(&cores[core].running, thread);
853 unlock_cores();
855 return thread;
856 #if NUM_CORES == 1
857 #undef core
858 #endif
861 #ifdef HAVE_SCHEDULER_BOOSTCTRL
862 void trigger_cpu_boost(void)
864 lock_cores();
866 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
868 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
869 if (!boosted_threads)
871 cpu_boost(true);
873 boosted_threads++;
876 unlock_cores();
878 #endif
880 /*---------------------------------------------------------------------------
881 * Remove a thread on the current core from the scheduler.
882 * Parameter is the ID as returned from create_thread().
883 *---------------------------------------------------------------------------
885 void remove_thread(struct thread_entry *thread)
887 lock_cores();
889 if (thread == NULL)
890 thread = cores[CURRENT_CORE].running;
892 /* Free the entry by removing thread name. */
893 thread->name = NULL;
894 #ifdef HAVE_PRIORITY_SCHEDULING
895 cores[IF_COP2(thread->core)].highest_priority = 100;
896 #endif
898 if (thread == cores[IF_COP2(thread->core)].running)
900 remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
901 switch_thread(false, NULL);
902 return ;
905 if (thread == cores[IF_COP2(thread->core)].sleeping)
906 remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
907 else
908 remove_from_list(NULL, thread);
910 unlock_cores();
913 #ifdef HAVE_PRIORITY_SCHEDULING
914 int thread_set_priority(struct thread_entry *thread, int priority)
916 int old_priority;
918 lock_cores();
919 if (thread == NULL)
920 thread = cores[CURRENT_CORE].running;
922 old_priority = thread->priority;
923 thread->priority = priority;
924 cores[IF_COP2(thread->core)].highest_priority = 100;
925 unlock_cores();
927 return old_priority;
930 int thread_get_priority(struct thread_entry *thread)
932 if (thread == NULL)
933 thread = cores[CURRENT_CORE].running;
935 return thread->priority;
938 void priority_yield(void)
940 struct thread_entry *thread = cores[CURRENT_CORE].running;
941 thread->priority_x = 1;
942 switch_thread(true, NULL);
943 thread->priority_x = 0;
945 #endif /* HAVE_PRIORITY_SCHEDULING */
947 struct thread_entry * thread_get_current(void)
949 return cores[CURRENT_CORE].running;
952 void init_threads(void)
954 unsigned int core = CURRENT_CORE;
955 int slot;
957 /* Let main CPU initialize first. */
958 #if NUM_CORES > 1
959 if (core != CPU)
961 while (!cores[CPU].kernel_running) ;
963 #endif
965 lock_cores();
966 slot = find_empty_thread_slot();
968 cores[core].sleeping = NULL;
969 cores[core].running = NULL;
970 cores[core].waking = NULL;
971 cores[core].wakeup_list = &cores[core].running;
972 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
973 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
974 #endif
975 threads[slot].name = main_thread_name;
976 threads[slot].statearg = 0;
977 threads[slot].context.start = 0; /* core's main thread already running */
978 #if NUM_CORES > 1
979 threads[slot].core = core;
980 #endif
981 #ifdef HAVE_PRIORITY_SCHEDULING
982 threads[slot].priority = PRIORITY_USER_INTERFACE;
983 threads[slot].priority_x = 0;
984 cores[core].highest_priority = 100;
985 #endif
986 #ifdef HAVE_SCHEDULER_BOOSTCTRL
987 boosted_threads = 0;
988 #endif
989 add_to_list(&cores[core].running, &threads[slot]);
991 /* In multiple core setups, each core has a different stack. There is
992 * probably a much better way to do this. */
993 if (core == CPU)
995 threads[slot].stack = stackbegin;
996 threads[slot].stack_size = (int)stackend - (int)stackbegin;
998 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
999 else
1001 threads[slot].stack = cop_stackbegin;
1002 threads[slot].stack_size =
1003 (int)cop_stackend - (int)cop_stackbegin;
1006 cores[core].kernel_running = true;
1007 #endif
1009 unlock_cores();
1012 int thread_stack_usage(const struct thread_entry *thread)
1014 unsigned int i;
1015 unsigned int *stackptr = thread->stack;
1017 for (i = 0;i < thread->stack_size/sizeof(int);i++)
1019 if (stackptr[i] != DEADBEEF)
1020 break;
1023 return ((thread->stack_size - i * sizeof(int)) * 100) /
1024 thread->stack_size;
1027 int thread_get_status(const struct thread_entry *thread)
1029 return GET_STATE(thread->statearg);