1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
35 # define IF_COP2(x) CURRENT_CORE
38 #define DEADBEEF ((unsigned int)0xdeadbeef)
39 /* Cast to the the machine int type, whose size could be < 4. */
41 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
42 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
43 #ifdef HAVE_SCHEDULER_BOOSTCTRL
44 static int boosted_threads IBSS_ATTR
;
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS
50 static const char main_thread_name
[] = "main";
52 extern int stackbegin
[];
53 extern int stackend
[];
57 extern int cop_stackbegin
[];
58 extern int cop_stackend
[];
60 /* The coprocessor stack is not set up in the bootloader code, but the threading
61 * is. No threads are run on the coprocessor, so set up some dummy stack */
62 int *cop_stackbegin
= stackbegin
;
63 int *cop_stackend
= stackend
;
69 static long cores_locked IBSS_ATTR
;
71 #define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
72 #define UNLOCK(...) cores_locked = 0
75 /* #warning "Core locking mechanism should be fixed on H10/4G!" */
77 inline void lock_cores(void)
80 if (!cores
[CURRENT_CORE
].lock_issued
)
83 cores
[CURRENT_CORE
].lock_issued
= true;
88 inline void unlock_cores(void)
91 if (cores
[CURRENT_CORE
].lock_issued
)
93 cores
[CURRENT_CORE
].lock_issued
= false;
102 static void add_to_list(struct thread_entry **list,
103 struct thread_entry *thread) ICODE_ATTR;
104 static void remove_from_list(struct thread_entry **list,
105 struct thread_entry *thread) ICODE_ATTR;
108 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
111 static inline void store_context(void* addr
) __attribute__ ((always_inline
));
112 static inline void load_context(const void* addr
)
113 __attribute__ ((always_inline
));
114 static inline void core_sleep(void) __attribute__((always_inline
));
117 /*---------------------------------------------------------------------------
118 * Store non-volatile context.
119 *---------------------------------------------------------------------------
121 static inline void store_context(void* addr
)
124 "stmia %0, { r4-r11, sp, lr }\n"
129 /*---------------------------------------------------------------------------
130 * Load non-volatile context.
131 *---------------------------------------------------------------------------
133 static void start_thread(void (*thread_func
)(void), const void* addr
) __attribute__((naked
));
134 static void start_thread(void (*thread_func
)(void), const void* addr
)
136 /* r0 = thread_func, r1 = addr */
137 #if NUM_CORES > 1 && CONFIG_CPU != PP5002
140 "str r2, [r1, #40] \n"
141 "ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
145 "ldr r1, =0x6000c000 \n"
156 "str r2, [r1, #40] \n"
166 static inline void load_context(const void* addr
)
169 "ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */
170 "ldr r0, [%0, #40] \n" /* load start pointer */
171 "cmp r0, #0 \n" /* check for NULL */
172 "movne r1, %0 \n" /* if not already running, jump to start */
173 "ldrne pc, =start_thread \n"
174 : : "r" (addr
) : "r0", "r1"
179 static inline void core_sleep(void)
183 /* This should sleep the CPU. It appears to wake by itself on
185 if (CURRENT_CORE
== CPU
)
186 CPU_CTL
= PROC_SLEEP
;
188 COP_CTL
= PROC_SLEEP
;
192 #elif CONFIG_CPU == S3C2440
193 static inline void core_sleep(void)
196 CLKCON
|= (1 << 2); /* set IDLE bit */
197 for(i
=0; i
<10; i
++); /* wait for IDLE */
198 CLKCON
&= ~(1 << 2); /* reset IDLE bit when wake up */
202 #elif defined(CPU_COLDFIRE)
203 /*---------------------------------------------------------------------------
204 * Store non-volatile context.
205 *---------------------------------------------------------------------------
207 static inline void store_context(void* addr
)
210 "move.l %%macsr,%%d0 \n"
211 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
212 : : "a" (addr
) : "d0" /* only! */
216 /*---------------------------------------------------------------------------
217 * Load non-volatile context.
218 *---------------------------------------------------------------------------
220 static inline void load_context(const void* addr
)
223 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
224 "move.l %%d0,%%macsr \n"
225 "move.l (52,%0),%%d0 \n" /* Get start address */
226 "beq.b 1f \n" /* NULL -> already running */
227 "clr.l (52,%0) \n" /* Clear start address.. */
229 "jmp (%0) \n" /* ..and start the thread */
231 : : "a" (addr
) : "d0" /* only! */
235 static inline void core_sleep(void)
237 asm volatile ("stop #0x2000");
240 /* Set EMAC unit to fractional mode with saturation for each new thread,
241 since that's what'll be the most useful for most things which the dsp
242 will do. Codecs should still initialize their preferred modes
244 #define THREAD_CPU_INIT(core, thread) \
245 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
247 #elif CONFIG_CPU == SH7034
248 /*---------------------------------------------------------------------------
249 * Store non-volatile context.
250 *---------------------------------------------------------------------------
252 static inline void store_context(void* addr
)
269 /*---------------------------------------------------------------------------
270 * Load non-volatile context.
271 *---------------------------------------------------------------------------
273 static inline void load_context(const void* addr
)
285 "mov.l @%0,r0 \n" /* Get start address */
287 "bt .running \n" /* NULL -> already running */
290 "rts \n" /* Start the thread */
291 "mov.l r0,@%0 \n" /* Clear start address */
293 : : "r" (addr
) : "r0" /* only! */
297 static inline void core_sleep(void)
300 asm volatile ("sleep");
305 #ifndef THREAD_CPU_INIT
306 /* No cpu specific init - make empty */
307 #define THREAD_CPU_INIT(core, thread)
310 #ifdef THREAD_EXTRA_CHECKS
311 static void thread_panicf_format_name(char *buffer
, struct thread_entry
*thread
)
316 /* Display thread name if one or ID if none */
317 const char *fmt
= thread
->name
? " %s" : " %08lX";
318 intptr_t name
= thread
->name
?
319 (intptr_t)thread
->name
: (intptr_t)thread
;
320 snprintf(buffer
, 16, fmt
, name
);
324 static void thread_panicf(const char *msg
,
325 struct thread_entry
*thread1
, struct thread_entry
*thread2
)
327 static char thread1_name
[16], thread2_name
[16];
328 thread_panicf_format_name(thread1_name
, thread1
);
329 thread_panicf_format_name(thread2_name
, thread2
);
330 panicf ("%s%s%s", msg
, thread1_name
, thread2_name
);
333 static void thread_stkov(void)
335 /* Display thread name if one or ID if none */
336 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
337 const char *fmt
= current
->name
? "%s %s" : "%s %08lX";
338 intptr_t name
= current
->name
?
339 (intptr_t)current
->name
: (intptr_t)current
;
340 panicf(fmt
, "Stkov", name
);
342 #endif /* THREAD_EXTRA_CHECKS */
344 static void add_to_list(struct thread_entry
**list
, struct thread_entry
*thread
)
348 thread
->next
= thread
;
349 thread
->prev
= thread
;
355 thread
->next
= *list
;
356 thread
->prev
= (*list
)->prev
;
357 thread
->prev
->next
= thread
;
358 (*list
)->prev
= thread
;
361 thread->next = (*list)->next;
362 thread->prev = *list;
363 thread->next->prev = thread;
364 (*list)->next = thread;
369 static void remove_from_list(struct thread_entry
**list
,
370 struct thread_entry
*thread
)
374 if (thread
== thread
->next
)
381 *list
= thread
->next
;
384 /* Fix links to jump over the removed entry. */
385 thread
->prev
->next
= thread
->next
;
386 thread
->next
->prev
= thread
->prev
;
389 /* Compiler trick: Don't declare as static to prevent putting
390 * function in IRAM. */
391 void check_sleepers(void)
393 struct thread_entry
*current
, *next
;
395 /* Check sleeping threads. */
396 current
= cores
[CURRENT_CORE
].sleeping
;
402 next
= current
->next
;
404 if ((unsigned)current_tick
>= GET_STATE_ARG(current
->statearg
))
406 /* Sleep timeout has been reached so bring the thread
407 * back to life again. */
408 remove_from_list(&cores
[CURRENT_CORE
].sleeping
, current
);
409 add_to_list(&cores
[CURRENT_CORE
].running
, current
);
410 current
->statearg
= 0;
412 /* If there is no more processes in the list, break the loop. */
413 if (cores
[CURRENT_CORE
].sleeping
== NULL
)
422 /* Break the loop once we have walked through the list of all
423 * sleeping processes. */
424 if (current
== cores
[CURRENT_CORE
].sleeping
)
429 /* Safely finish waking all threads potentialy woken by interrupts -
430 * statearg already zeroed in wakeup_thread. */
431 static void wake_list_awaken(void)
433 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
435 /* No need for another check in the IRQ lock since IRQs are allowed
436 only to add threads to the waking list. They won't be adding more
437 until we're done here though. */
439 struct thread_entry
*waking
= cores
[CURRENT_CORE
].waking
;
440 struct thread_entry
*running
= cores
[CURRENT_CORE
].running
;
444 /* Place waking threads at the end of the running list. */
445 struct thread_entry
*tmp
;
446 waking
->prev
->next
= running
;
447 running
->prev
->next
= waking
;
449 running
->prev
= waking
->prev
;
454 /* Just transfer the list as-is - just came out of a core
456 cores
[CURRENT_CORE
].running
= waking
;
459 /* Done with waking list */
460 cores
[CURRENT_CORE
].waking
= NULL
;
461 set_irq_level(oldlevel
);
464 static inline void sleep_core(void)
468 /* We want to do these ASAP as it may change the decision to sleep
469 the core or the core has woken because an interrupt occurred
470 and posted a message to a queue. */
471 if (cores
[CURRENT_CORE
].waking
!= NULL
)
474 if (cores
[CURRENT_CORE
].last_tick
!= current_tick
)
477 cores
[CURRENT_CORE
].last_tick
= current_tick
;
480 /* We must sleep until there is at least one process in the list
481 * of running processes. */
482 if (cores
[CURRENT_CORE
].running
!= NULL
)
485 /* Enter sleep mode to reduce power usage, woken up on interrupt */
491 static int get_threadnum(struct thread_entry
*thread
)
495 for (i
= 0; i
< MAXTHREADS
; i
++)
497 if (&threads
[i
] == thread
)
504 void profile_thread(void) {
505 profstart(get_threadnum(cores
[CURRENT_CORE
].running
));
509 /* Compiler trick: Don't declare as static to prevent putting
510 * function in IRAM. */
511 void change_thread_state(struct thread_entry
**blocked_list
)
513 struct thread_entry
*old
;
514 unsigned long new_state
;
516 /* Remove the thread from the list of running threads. */
517 old
= cores
[CURRENT_CORE
].running
;
518 new_state
= GET_STATE(old
->statearg
);
520 /* Check if a thread state change has been requested. */
523 /* Change running thread state and switch to next thread. */
524 remove_from_list(&cores
[CURRENT_CORE
].running
, old
);
526 /* And put the thread into a new list of inactive threads. */
527 if (new_state
== STATE_BLOCKED
)
528 add_to_list(blocked_list
, old
);
530 add_to_list(&cores
[CURRENT_CORE
].sleeping
, old
);
532 #ifdef HAVE_PRIORITY_SCHEDULING
533 /* Reset priorities */
534 if (old
->priority
== cores
[CURRENT_CORE
].highest_priority
)
535 cores
[CURRENT_CORE
].highest_priority
= 100;
539 /* Switch to the next running thread. */
540 cores
[CURRENT_CORE
].running
= old
->next
;
543 /*---------------------------------------------------------------------------
544 * Switch thread in round robin fashion.
545 *---------------------------------------------------------------------------
547 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
550 profile_thread_stopped(get_threadnum(cores
[CURRENT_CORE
].running
));
552 unsigned int *stackptr
;
560 /* Begin task switching by saving our current context so that we can
561 * restore the state of the current thread later to the point prior
565 store_context(&cores
[CURRENT_CORE
].running
->context
);
567 /* Check if the current thread stack is overflown */
568 stackptr
= cores
[CURRENT_CORE
].running
->stack
;
569 if(stackptr
[0] != DEADBEEF
)
570 #ifdef THREAD_EXTRA_CHECKS
571 thread_panicf("Stkov", cores
[CURRENT_CORE
].running
, NULL
);
576 /* Rearrange thread lists as needed */
577 change_thread_state(blocked_list
);
579 /* This has to be done after the scheduler is finished with the
580 blocked_list pointer so that an IRQ can't kill us by attempting
581 a wake but before attempting any core sleep. */
582 if (cores
[CURRENT_CORE
].switch_to_irq_level
!= STAY_IRQ_LEVEL
)
584 int level
= cores
[CURRENT_CORE
].switch_to_irq_level
;
585 cores
[CURRENT_CORE
].switch_to_irq_level
= STAY_IRQ_LEVEL
;
586 set_irq_level(level
);
590 /* Go through the list of sleeping task to check if we need to wake up
591 * any of them due to timeout. Also puts core into sleep state until
592 * there is at least one running process again. */
595 #ifdef HAVE_PRIORITY_SCHEDULING
596 /* Select the new task based on priorities and the last time a process
600 int priority
= cores
[CURRENT_CORE
].running
->priority
;
602 if (priority
< cores
[CURRENT_CORE
].highest_priority
)
603 cores
[CURRENT_CORE
].highest_priority
= priority
;
605 if (priority
== cores
[CURRENT_CORE
].highest_priority
||
606 (current_tick
- cores
[CURRENT_CORE
].running
->last_run
>
608 cores
[CURRENT_CORE
].running
->priority_x
!= 0)
613 cores
[CURRENT_CORE
].running
= cores
[CURRENT_CORE
].running
->next
;
616 /* Reset the value of thread's last running time to the current time. */
617 cores
[CURRENT_CORE
].running
->last_run
= current_tick
;
623 /* And finally give control to the next thread. */
624 load_context(&cores
[CURRENT_CORE
].running
->context
);
627 profile_thread_started(get_threadnum(cores
[CURRENT_CORE
].running
));
631 void sleep_thread(int ticks
)
633 struct thread_entry
*current
;
637 current
= cores
[CURRENT_CORE
].running
;
639 #ifdef HAVE_SCHEDULER_BOOSTCTRL
640 if (STATE_IS_BOOSTED(current
->statearg
))
643 if (!boosted_threads
)
650 /* Set the thread's new state and timeout and finally force a task switch
651 * so that scheduler removes thread from the list of running processes
652 * and puts it in list of sleeping tasks. */
653 SET_STATE(current
->statearg
, STATE_SLEEPING
, current_tick
+ ticks
+ 1);
655 switch_thread(true, NULL
);
658 void block_thread(struct thread_entry
**list
)
660 struct thread_entry
*current
;
664 /* Get the entry for the current running thread. */
665 current
= cores
[CURRENT_CORE
].running
;
667 #ifdef HAVE_SCHEDULER_BOOSTCTRL
668 /* Keep the boosted state over indefinite block calls, because
669 * we are waiting until the earliest time that someone else
670 * completes an action */
671 unsigned long boost_flag
= STATE_IS_BOOSTED(current
->statearg
);
674 #ifdef THREAD_EXTRA_CHECKS
675 /* We are not allowed to mix blocking types in one queue. */
676 if (*list
&& GET_STATE((*list
)->statearg
) == STATE_BLOCKED_W_TMO
)
677 thread_panicf("Blocking violation B->*T", current
, *list
);
680 /* Set the state to blocked and ask the scheduler to switch tasks,
681 * this takes us off of the run queue until we are explicitly woken */
682 SET_STATE(current
->statearg
, STATE_BLOCKED
, 0);
684 switch_thread(true, list
);
686 #ifdef HAVE_SCHEDULER_BOOSTCTRL
687 /* Reset only the boosted flag to indicate we are up and running again. */
688 current
->statearg
= boost_flag
;
690 /* Clear all flags to indicate we are up and running again. */
691 current
->statearg
= 0;
695 void block_thread_w_tmo(struct thread_entry
**list
, int timeout
)
697 struct thread_entry
*current
;
698 /* Get the entry for the current running thread. */
699 current
= cores
[CURRENT_CORE
].running
;
702 #ifdef HAVE_SCHEDULER_BOOSTCTRL
703 /* A block with a timeout is a sleep situation, whatever we are waiting
704 * for _may or may not_ happen, regardless of boost state, (user input
705 * for instance), so this thread no longer needs to boost */
706 if (STATE_IS_BOOSTED(current
->statearg
))
709 if (!boosted_threads
)
716 #ifdef THREAD_EXTRA_CHECKS
717 /* We can store only one thread to the "list" if thread is used
718 * in other list (such as core's list for sleeping tasks). */
720 thread_panicf("Blocking violation T->*B", current
, NULL
);
723 /* Set the state to blocked with the specified timeout */
724 SET_STATE(current
->statearg
, STATE_BLOCKED_W_TMO
, current_tick
+ timeout
);
726 /* Set the "list" for explicit wakeup */
729 /* Now force a task switch and block until we have been woken up
730 * by another thread or timeout is reached. */
731 switch_thread(true, NULL
);
733 /* It is now safe for another thread to block on this "list" */
737 #if !defined(SIMULATOR)
738 void set_irq_level_and_block_thread(struct thread_entry
**list
, int level
)
740 cores
[CURRENT_CORE
].switch_to_irq_level
= level
;
744 void set_irq_level_and_block_thread_w_tmo(struct thread_entry
**list
,
745 int timeout
, int level
)
747 cores
[CURRENT_CORE
].switch_to_irq_level
= level
;
748 block_thread_w_tmo(list
, timeout
);
752 void wakeup_thread(struct thread_entry
**list
)
754 struct thread_entry
*thread
;
756 /* Check if there is a blocked thread at all. */
762 /* Wake up the last thread first. */
765 /* Determine thread's current state. */
766 switch (GET_STATE(thread
->statearg
))
769 /* Remove thread from the list of blocked threads and add it
770 * to the scheduler's list of running processes. List removal
771 * is safe since each object maintains it's own list of
772 * sleepers and queues protect against reentrancy. */
773 remove_from_list(list
, thread
);
774 add_to_list(cores
[IF_COP2(thread
->core
)].wakeup_list
, thread
);
776 case STATE_BLOCKED_W_TMO
:
777 /* Just remove the timeout to cause scheduler to immediately
778 * wake up the thread. */
779 thread
->statearg
= 0;
783 /* Nothing to do. Thread has already been woken up
784 * or it's state is not blocked or blocked with timeout. */
789 inline static int find_empty_thread_slot(void)
793 for (n
= 0; n
< MAXTHREADS
; n
++)
795 if (threads
[n
].name
== NULL
)
802 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
804 void wakeup_thread_irq_safe(struct thread_entry
**list
)
806 struct core_entry
*core
= &cores
[CURRENT_CORE
];
807 /* Switch wakeup lists and call wakeup_thread */
808 core
->wakeup_list
= &core
->waking
;
810 /* Switch back to normal running list */
811 core
->wakeup_list
= &core
->running
;
814 /*---------------------------------------------------------------------------
816 * If using a dual core architecture, specify which core to start the thread
817 * on, and whether to fall back to the other core if it can't be created
818 * Return ID if context area could be allocated, else NULL.
819 *---------------------------------------------------------------------------
822 create_thread(void (*function
)(void), void* stack
, int stack_size
,
823 const char *name
IF_PRIO(, int priority
)
824 IF_COP(, unsigned int core
, bool fallback
))
827 unsigned int stacklen
;
828 unsigned int *stackptr
;
831 struct thread_entry
*thread
;
835 * To prevent ifdef hell while keeping the binary size down, we define
836 * core here if it hasn't been passed as a parameter
843 /* If the kernel hasn't initialised on the COP (most likely due to an old
844 * bootloader) then refuse to start threads on the COP
846 if ((core
== COP
) && !cores
[core
].kernel_running
)
849 return create_thread(function
, stack
, stack_size
, name
850 IF_PRIO(, priority
) IF_COP(, CPU
, false));
858 slot
= find_empty_thread_slot();
865 /* Munge the stack to make it easy to spot stack overflows */
866 stacklen
= stack_size
/ sizeof(int);
868 for(i
= 0;i
< stacklen
;i
++)
870 stackptr
[i
] = DEADBEEF
;
873 /* Store interesting information */
874 thread
= &threads
[slot
];
876 thread
->stack
= stack
;
877 thread
->stack_size
= stack_size
;
878 thread
->statearg
= 0;
879 #ifdef HAVE_PRIORITY_SCHEDULING
880 thread
->priority_x
= 0;
881 thread
->priority
= priority
;
882 cores
[core
].highest_priority
= 100;
889 regs
= &thread
->context
;
890 /* Align stack to an even 32 bit boundary */
891 regs
->sp
= (void*)(((unsigned int)stack
+ stack_size
) & ~3);
892 regs
->start
= (void*)function
;
894 /* Do any CPU specific inits after initializing common items
895 to have access to valid data */
896 THREAD_CPU_INIT(core
, thread
);
898 add_to_list(&cores
[core
].running
, thread
);
907 #ifdef HAVE_SCHEDULER_BOOSTCTRL
908 void trigger_cpu_boost(void)
912 if (!STATE_IS_BOOSTED(cores
[CURRENT_CORE
].running
->statearg
))
914 SET_BOOST_STATE(cores
[CURRENT_CORE
].running
->statearg
);
915 if (!boosted_threads
)
926 /*---------------------------------------------------------------------------
927 * Remove a thread on the current core from the scheduler.
928 * Parameter is the ID as returned from create_thread().
929 *---------------------------------------------------------------------------
931 void remove_thread(struct thread_entry
*thread
)
936 thread
= cores
[CURRENT_CORE
].running
;
938 /* Free the entry by removing thread name. */
940 #ifdef HAVE_PRIORITY_SCHEDULING
941 cores
[IF_COP2(thread
->core
)].highest_priority
= 100;
944 if (thread
== cores
[IF_COP2(thread
->core
)].running
)
946 remove_from_list(&cores
[IF_COP2(thread
->core
)].running
, thread
);
947 switch_thread(false, NULL
);
951 if (thread
== cores
[IF_COP2(thread
->core
)].sleeping
)
952 remove_from_list(&cores
[IF_COP2(thread
->core
)].sleeping
, thread
);
954 remove_from_list(NULL
, thread
);
959 #ifdef HAVE_PRIORITY_SCHEDULING
960 int thread_set_priority(struct thread_entry
*thread
, int priority
)
966 thread
= cores
[CURRENT_CORE
].running
;
968 old_priority
= thread
->priority
;
969 thread
->priority
= priority
;
970 cores
[IF_COP2(thread
->core
)].highest_priority
= 100;
976 int thread_get_priority(struct thread_entry
*thread
)
979 thread
= cores
[CURRENT_CORE
].running
;
981 return thread
->priority
;
984 void priority_yield(void)
986 struct thread_entry
*thread
= cores
[CURRENT_CORE
].running
;
987 thread
->priority_x
= 1;
988 switch_thread(true, NULL
);
989 thread
->priority_x
= 0;
991 #endif /* HAVE_PRIORITY_SCHEDULING */
993 struct thread_entry
* thread_get_current(void)
995 return cores
[CURRENT_CORE
].running
;
998 void init_threads(void)
1000 unsigned int core
= CURRENT_CORE
;
1003 /* Let main CPU initialize first. */
1007 while (!cores
[CPU
].kernel_running
) ;
1012 slot
= find_empty_thread_slot();
1014 cores
[core
].sleeping
= NULL
;
1015 cores
[core
].running
= NULL
;
1016 cores
[core
].waking
= NULL
;
1017 cores
[core
].wakeup_list
= &cores
[core
].running
;
1018 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
1019 cores
[core
].switch_to_irq_level
= STAY_IRQ_LEVEL
;
1021 threads
[slot
].name
= main_thread_name
;
1022 threads
[slot
].statearg
= 0;
1023 threads
[slot
].context
.start
= 0; /* core's main thread already running */
1025 threads
[slot
].core
= core
;
1027 #ifdef HAVE_PRIORITY_SCHEDULING
1028 threads
[slot
].priority
= PRIORITY_USER_INTERFACE
;
1029 threads
[slot
].priority_x
= 0;
1030 cores
[core
].highest_priority
= 100;
1032 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1033 boosted_threads
= 0;
1035 add_to_list(&cores
[core
].running
, &threads
[slot
]);
1037 /* In multiple core setups, each core has a different stack. There is
1038 * probably a much better way to do this. */
1041 threads
[slot
].stack
= stackbegin
;
1042 threads
[slot
].stack_size
= (int)stackend
- (int)stackbegin
;
1044 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
1047 threads
[slot
].stack
= cop_stackbegin
;
1048 threads
[slot
].stack_size
=
1049 (int)cop_stackend
- (int)cop_stackbegin
;
1052 cores
[core
].kernel_running
= true;
1058 int thread_stack_usage(const struct thread_entry
*thread
)
1061 unsigned int *stackptr
= thread
->stack
;
1063 for (i
= 0;i
< thread
->stack_size
/sizeof(int);i
++)
1065 if (stackptr
[i
] != DEADBEEF
)
1069 return ((thread
->stack_size
- i
* sizeof(int)) * 100) /
1073 int thread_get_status(const struct thread_entry
*thread
)
1075 return GET_STATE(thread
->statearg
);