1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
35 # define IF_COP2(x) CURRENT_CORE
38 #define DEADBEEF ((unsigned int)0xdeadbeef)
39 /* Cast to the the machine int type, whose size could be < 4. */
41 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
42 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
43 #ifdef HAVE_SCHEDULER_BOOSTCTRL
44 static int boosted_threads IBSS_ATTR
;
47 /* Define to enable additional checks for blocking violations etc. */
48 #define THREAD_EXTRA_CHECKS
50 static const char main_thread_name
[] = "main";
52 extern int stackbegin
[];
53 extern int stackend
[];
57 extern int cop_stackbegin
[];
58 extern int cop_stackend
[];
60 /* The coprocessor stack is not set up in the bootloader code, but the threading
61 * is. No threads are run on the coprocessor, so set up some dummy stack */
62 int *cop_stackbegin
= stackbegin
;
63 int *cop_stackend
= stackend
;
69 static long cores_locked IBSS_ATTR
;
71 #define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
72 #define UNLOCK(...) cores_locked = 0
75 /* #warning "Core locking mechanism should be fixed on H10/4G!" */
77 inline void lock_cores(void)
80 if (!cores
[CURRENT_CORE
].lock_issued
)
83 cores
[CURRENT_CORE
].lock_issued
= true;
88 inline void unlock_cores(void)
91 if (cores
[CURRENT_CORE
].lock_issued
)
93 cores
[CURRENT_CORE
].lock_issued
= false;
102 static void add_to_list(struct thread_entry **list,
103 struct thread_entry *thread) ICODE_ATTR;
104 static void remove_from_list(struct thread_entry **list,
105 struct thread_entry *thread) ICODE_ATTR;
108 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
111 static inline void store_context(void* addr
) __attribute__ ((always_inline
));
112 static inline void load_context(const void* addr
)
113 __attribute__ ((always_inline
));
114 static inline void core_sleep(void) __attribute__((always_inline
));
117 /*---------------------------------------------------------------------------
118 * Store non-volatile context.
119 *---------------------------------------------------------------------------
121 static inline void store_context(void* addr
)
124 "stmia %0, { r4-r11, sp, lr }\n"
129 /*---------------------------------------------------------------------------
130 * Load non-volatile context.
131 *---------------------------------------------------------------------------
133 static void start_thread(void (*thread_func
)(void), const void* addr
) __attribute__((naked
,used
));
134 static void start_thread(void (*thread_func
)(void), const void* addr
)
136 /* r0 = thread_func, r1 = addr */
137 #if NUM_CORES > 1 && CONFIG_CPU != PP5002
140 "str r2, [r1, #40] \n"
141 "ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
145 "ldr r1, =0x6000c000 \n"
156 "str r2, [r1, #40] \n"
165 static inline void load_context(const void* addr
)
168 "ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */
169 "ldr r0, [%0, #40] \n" /* load start pointer */
170 "cmp r0, #0 \n" /* check for NULL */
171 "movne r1, %0 \n" /* if not already running, jump to start */
172 "ldrne pc, =start_thread \n"
173 : : "r" (addr
) : "r0", "r1"
178 static inline void core_sleep(void)
182 /* This should sleep the CPU. It appears to wake by itself on
184 if (CURRENT_CORE
== CPU
)
185 CPU_CTL
= PROC_SLEEP
;
187 COP_CTL
= PROC_SLEEP
;
191 #elif CONFIG_CPU == S3C2440
192 static inline void core_sleep(void)
195 CLKCON
|= (1 << 2); /* set IDLE bit */
196 for(i
=0; i
<10; i
++); /* wait for IDLE */
197 CLKCON
&= ~(1 << 2); /* reset IDLE bit when wake up */
201 #elif defined(CPU_COLDFIRE)
202 /*---------------------------------------------------------------------------
203 * Store non-volatile context.
204 *---------------------------------------------------------------------------
206 static inline void store_context(void* addr
)
209 "move.l %%macsr,%%d0 \n"
210 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
211 : : "a" (addr
) : "d0" /* only! */
215 /*---------------------------------------------------------------------------
216 * Load non-volatile context.
217 *---------------------------------------------------------------------------
219 static inline void load_context(const void* addr
)
222 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
223 "move.l %%d0,%%macsr \n"
224 "move.l (52,%0),%%d0 \n" /* Get start address */
225 "beq.b 1f \n" /* NULL -> already running */
226 "clr.l (52,%0) \n" /* Clear start address.. */
228 "jmp (%0) \n" /* ..and start the thread */
230 : : "a" (addr
) : "d0" /* only! */
234 static inline void core_sleep(void)
236 asm volatile ("stop #0x2000");
239 /* Set EMAC unit to fractional mode with saturation for each new thread,
240 since that's what'll be the most useful for most things which the dsp
241 will do. Codecs should still initialize their preferred modes
243 #define THREAD_CPU_INIT(core, thread) \
244 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
246 #elif CONFIG_CPU == SH7034
247 /*---------------------------------------------------------------------------
248 * Store non-volatile context.
249 *---------------------------------------------------------------------------
251 static inline void store_context(void* addr
)
268 /*---------------------------------------------------------------------------
269 * Load non-volatile context.
270 *---------------------------------------------------------------------------
272 static inline void load_context(const void* addr
)
284 "mov.l @%0,r0 \n" /* Get start address */
286 "bt .running \n" /* NULL -> already running */
289 "rts \n" /* Start the thread */
290 "mov.l r0,@%0 \n" /* Clear start address */
292 : : "r" (addr
) : "r0" /* only! */
296 static inline void core_sleep(void)
299 asm volatile ("sleep");
304 #ifndef THREAD_CPU_INIT
305 /* No cpu specific init - make empty */
306 #define THREAD_CPU_INIT(core, thread)
309 #ifdef THREAD_EXTRA_CHECKS
310 static void thread_panicf_format_name(char *buffer
, struct thread_entry
*thread
)
315 /* Display thread name if one or ID if none */
316 const char *fmt
= thread
->name
? " %s" : " %08lX";
317 intptr_t name
= thread
->name
?
318 (intptr_t)thread
->name
: (intptr_t)thread
;
319 snprintf(buffer
, 16, fmt
, name
);
323 static void thread_panicf(const char *msg
,
324 struct thread_entry
*thread1
, struct thread_entry
*thread2
)
326 static char thread1_name
[16], thread2_name
[16];
327 thread_panicf_format_name(thread1_name
, thread1
);
328 thread_panicf_format_name(thread2_name
, thread2
);
329 panicf ("%s%s%s", msg
, thread1_name
, thread2_name
);
332 static void thread_stkov(void)
334 /* Display thread name if one or ID if none */
335 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
336 const char *fmt
= current
->name
? "%s %s" : "%s %08lX";
337 intptr_t name
= current
->name
?
338 (intptr_t)current
->name
: (intptr_t)current
;
339 panicf(fmt
, "Stkov", name
);
341 #endif /* THREAD_EXTRA_CHECKS */
343 static void add_to_list(struct thread_entry
**list
, struct thread_entry
*thread
)
347 thread
->next
= thread
;
348 thread
->prev
= thread
;
354 thread
->next
= *list
;
355 thread
->prev
= (*list
)->prev
;
356 thread
->prev
->next
= thread
;
357 (*list
)->prev
= thread
;
360 thread->next = (*list)->next;
361 thread->prev = *list;
362 thread->next->prev = thread;
363 (*list)->next = thread;
368 static void remove_from_list(struct thread_entry
**list
,
369 struct thread_entry
*thread
)
373 if (thread
== thread
->next
)
380 *list
= thread
->next
;
383 /* Fix links to jump over the removed entry. */
384 thread
->prev
->next
= thread
->next
;
385 thread
->next
->prev
= thread
->prev
;
388 static void check_sleepers(void) __attribute__ ((noinline
));
389 static void check_sleepers(void)
391 const unsigned int core
= CURRENT_CORE
;
392 struct thread_entry
*current
, *next
;
394 /* Check sleeping threads. */
395 current
= cores
[core
].sleeping
;
399 next
= current
->next
;
401 if ((unsigned)current_tick
>= GET_STATE_ARG(current
->statearg
))
403 /* Sleep timeout has been reached so bring the thread
404 * back to life again. */
405 remove_from_list(&cores
[core
].sleeping
, current
);
406 add_to_list(&cores
[core
].running
, current
);
407 current
->statearg
= 0;
409 /* If there is no more processes in the list, break the loop. */
410 if (cores
[core
].sleeping
== NULL
)
419 /* Break the loop once we have walked through the list of all
420 * sleeping processes. */
421 if (current
== cores
[core
].sleeping
)
426 /* Safely finish waking all threads potentialy woken by interrupts -
427 * statearg already zeroed in wakeup_thread. */
428 static void wake_list_awaken(void) __attribute__ ((noinline
));
429 static void wake_list_awaken(void)
431 const unsigned int core
= CURRENT_CORE
;
432 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
434 /* No need for another check in the IRQ lock since IRQs are allowed
435 only to add threads to the waking list. They won't be adding more
436 until we're done here though. */
438 struct thread_entry
*waking
= cores
[core
].waking
;
439 struct thread_entry
*running
= cores
[core
].running
;
443 /* Place waking threads at the end of the running list. */
444 struct thread_entry
*tmp
;
445 waking
->prev
->next
= running
;
446 running
->prev
->next
= waking
;
448 running
->prev
= waking
->prev
;
453 /* Just transfer the list as-is - just came out of a core
455 cores
[core
].running
= waking
;
458 /* Done with waking list */
459 cores
[core
].waking
= NULL
;
460 set_irq_level(oldlevel
);
463 static inline void sleep_core(void)
465 const unsigned int core
= CURRENT_CORE
;
469 /* We want to do these ASAP as it may change the decision to sleep
470 the core or the core has woken because an interrupt occurred
471 and posted a message to a queue. */
472 if (cores
[core
].waking
!= NULL
)
475 if (cores
[core
].last_tick
!= current_tick
)
477 if (cores
[core
].sleeping
!= NULL
)
479 cores
[core
].last_tick
= current_tick
;
482 /* We must sleep until there is at least one process in the list
483 * of running processes. */
484 if (cores
[core
].running
!= NULL
)
487 /* Enter sleep mode to reduce power usage, woken up on interrupt */
493 static int get_threadnum(struct thread_entry
*thread
)
497 for (i
= 0; i
< MAXTHREADS
; i
++)
499 if (&threads
[i
] == thread
)
506 void profile_thread(void) {
507 profstart(get_threadnum(cores
[CURRENT_CORE
].running
));
511 static void change_thread_state(struct thread_entry
**blocked_list
) __attribute__ ((noinline
));
512 static void change_thread_state(struct thread_entry
**blocked_list
)
514 const unsigned int core
= CURRENT_CORE
;
515 struct thread_entry
*old
;
516 unsigned long new_state
;
518 /* Remove the thread from the list of running threads. */
519 old
= cores
[core
].running
;
520 new_state
= GET_STATE(old
->statearg
);
522 /* Check if a thread state change has been requested. */
525 /* Change running thread state and switch to next thread. */
526 remove_from_list(&cores
[core
].running
, old
);
528 /* And put the thread into a new list of inactive threads. */
529 if (new_state
== STATE_BLOCKED
)
530 add_to_list(blocked_list
, old
);
532 add_to_list(&cores
[core
].sleeping
, old
);
534 #ifdef HAVE_PRIORITY_SCHEDULING
535 /* Reset priorities */
536 if (old
->priority
== cores
[core
].highest_priority
)
537 cores
[core
].highest_priority
= 100;
541 /* Switch to the next running thread. */
542 cores
[core
].running
= old
->next
;
545 /*---------------------------------------------------------------------------
546 * Switch thread in round robin fashion.
547 *---------------------------------------------------------------------------
549 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
551 const unsigned int core
= CURRENT_CORE
;
554 profile_thread_stopped(get_threadnum(cores
[core
].running
));
556 unsigned int *stackptr
;
564 /* Begin task switching by saving our current context so that we can
565 * restore the state of the current thread later to the point prior
569 store_context(&cores
[core
].running
->context
);
571 /* Check if the current thread stack is overflown */
572 stackptr
= cores
[core
].running
->stack
;
573 if(stackptr
[0] != DEADBEEF
)
574 #ifdef THREAD_EXTRA_CHECKS
575 thread_panicf("Stkov", cores
[core
].running
, NULL
);
580 /* Rearrange thread lists as needed */
581 change_thread_state(blocked_list
);
583 /* This has to be done after the scheduler is finished with the
584 blocked_list pointer so that an IRQ can't kill us by attempting
585 a wake but before attempting any core sleep. */
586 if (cores
[core
].switch_to_irq_level
!= STAY_IRQ_LEVEL
)
588 int level
= cores
[core
].switch_to_irq_level
;
589 cores
[core
].switch_to_irq_level
= STAY_IRQ_LEVEL
;
590 set_irq_level(level
);
594 /* Go through the list of sleeping task to check if we need to wake up
595 * any of them due to timeout. Also puts core into sleep state until
596 * there is at least one running process again. */
599 #ifdef HAVE_PRIORITY_SCHEDULING
600 /* Select the new task based on priorities and the last time a process
604 int priority
= cores
[core
].running
->priority
;
606 if (priority
< cores
[core
].highest_priority
)
607 cores
[core
].highest_priority
= priority
;
609 if (priority
== cores
[core
].highest_priority
||
610 (current_tick
- cores
[core
].running
->last_run
>
612 cores
[core
].running
->priority_x
!= 0)
617 cores
[core
].running
= cores
[core
].running
->next
;
620 /* Reset the value of thread's last running time to the current time. */
621 cores
[core
].running
->last_run
= current_tick
;
627 /* And finally give control to the next thread. */
628 load_context(&cores
[core
].running
->context
);
631 profile_thread_started(get_threadnum(cores
[core
].running
));
635 void sleep_thread(int ticks
)
637 struct thread_entry
*current
;
641 current
= cores
[CURRENT_CORE
].running
;
643 #ifdef HAVE_SCHEDULER_BOOSTCTRL
644 if (STATE_IS_BOOSTED(current
->statearg
))
647 if (!boosted_threads
)
654 /* Set the thread's new state and timeout and finally force a task switch
655 * so that scheduler removes thread from the list of running processes
656 * and puts it in list of sleeping tasks. */
657 SET_STATE(current
->statearg
, STATE_SLEEPING
, current_tick
+ ticks
+ 1);
659 switch_thread(true, NULL
);
662 void block_thread(struct thread_entry
**list
)
664 struct thread_entry
*current
;
668 /* Get the entry for the current running thread. */
669 current
= cores
[CURRENT_CORE
].running
;
671 #ifdef HAVE_SCHEDULER_BOOSTCTRL
672 /* Keep the boosted state over indefinite block calls, because
673 * we are waiting until the earliest time that someone else
674 * completes an action */
675 unsigned long boost_flag
= STATE_IS_BOOSTED(current
->statearg
);
678 #ifdef THREAD_EXTRA_CHECKS
679 /* We are not allowed to mix blocking types in one queue. */
680 if (*list
&& GET_STATE((*list
)->statearg
) == STATE_BLOCKED_W_TMO
)
681 thread_panicf("Blocking violation B->*T", current
, *list
);
684 /* Set the state to blocked and ask the scheduler to switch tasks,
685 * this takes us off of the run queue until we are explicitly woken */
686 SET_STATE(current
->statearg
, STATE_BLOCKED
, 0);
688 switch_thread(true, list
);
690 #ifdef HAVE_SCHEDULER_BOOSTCTRL
691 /* Reset only the boosted flag to indicate we are up and running again. */
692 current
->statearg
= boost_flag
;
694 /* Clear all flags to indicate we are up and running again. */
695 current
->statearg
= 0;
699 void block_thread_w_tmo(struct thread_entry
**list
, int timeout
)
701 struct thread_entry
*current
;
702 /* Get the entry for the current running thread. */
703 current
= cores
[CURRENT_CORE
].running
;
706 #ifdef HAVE_SCHEDULER_BOOSTCTRL
707 /* A block with a timeout is a sleep situation, whatever we are waiting
708 * for _may or may not_ happen, regardless of boost state, (user input
709 * for instance), so this thread no longer needs to boost */
710 if (STATE_IS_BOOSTED(current
->statearg
))
713 if (!boosted_threads
)
720 #ifdef THREAD_EXTRA_CHECKS
721 /* We can store only one thread to the "list" if thread is used
722 * in other list (such as core's list for sleeping tasks). */
724 thread_panicf("Blocking violation T->*B", current
, NULL
);
727 /* Set the state to blocked with the specified timeout */
728 SET_STATE(current
->statearg
, STATE_BLOCKED_W_TMO
, current_tick
+ timeout
);
730 /* Set the "list" for explicit wakeup */
733 /* Now force a task switch and block until we have been woken up
734 * by another thread or timeout is reached. */
735 switch_thread(true, NULL
);
737 /* It is now safe for another thread to block on this "list" */
741 #if !defined(SIMULATOR)
742 void set_irq_level_and_block_thread(struct thread_entry
**list
, int level
)
744 cores
[CURRENT_CORE
].switch_to_irq_level
= level
;
748 void set_irq_level_and_block_thread_w_tmo(struct thread_entry
**list
,
749 int timeout
, int level
)
751 cores
[CURRENT_CORE
].switch_to_irq_level
= level
;
752 block_thread_w_tmo(list
, timeout
);
756 void wakeup_thread(struct thread_entry
**list
)
758 struct thread_entry
*thread
;
760 /* Check if there is a blocked thread at all. */
766 /* Wake up the last thread first. */
769 /* Determine thread's current state. */
770 switch (GET_STATE(thread
->statearg
))
773 /* Remove thread from the list of blocked threads and add it
774 * to the scheduler's list of running processes. List removal
775 * is safe since each object maintains it's own list of
776 * sleepers and queues protect against reentrancy. */
777 remove_from_list(list
, thread
);
778 add_to_list(cores
[IF_COP2(thread
->core
)].wakeup_list
, thread
);
780 case STATE_BLOCKED_W_TMO
:
781 /* Just remove the timeout to cause scheduler to immediately
782 * wake up the thread. */
783 thread
->statearg
= 0;
787 /* Nothing to do. Thread has already been woken up
788 * or it's state is not blocked or blocked with timeout. */
793 inline static int find_empty_thread_slot(void)
797 for (n
= 0; n
< MAXTHREADS
; n
++)
799 if (threads
[n
].name
== NULL
)
806 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
808 void wakeup_thread_irq_safe(struct thread_entry
**list
)
810 struct core_entry
*core
= &cores
[CURRENT_CORE
];
811 /* Switch wakeup lists and call wakeup_thread */
812 core
->wakeup_list
= &core
->waking
;
814 /* Switch back to normal running list */
815 core
->wakeup_list
= &core
->running
;
818 /*---------------------------------------------------------------------------
820 * If using a dual core architecture, specify which core to start the thread
821 * on, and whether to fall back to the other core if it can't be created
822 * Return ID if context area could be allocated, else NULL.
823 *---------------------------------------------------------------------------
826 create_thread(void (*function
)(void), void* stack
, int stack_size
,
827 const char *name
IF_PRIO(, int priority
)
828 IF_COP(, unsigned int core
, bool fallback
))
831 unsigned int stacklen
;
832 unsigned int *stackptr
;
835 struct thread_entry
*thread
;
839 * To prevent ifdef hell while keeping the binary size down, we define
840 * core here if it hasn't been passed as a parameter
847 /* If the kernel hasn't initialised on the COP (most likely due to an old
848 * bootloader) then refuse to start threads on the COP
850 if ((core
== COP
) && !cores
[core
].kernel_running
)
853 return create_thread(function
, stack
, stack_size
, name
854 IF_PRIO(, priority
) IF_COP(, CPU
, false));
862 slot
= find_empty_thread_slot();
869 /* Munge the stack to make it easy to spot stack overflows */
870 stacklen
= stack_size
/ sizeof(int);
872 for(i
= 0;i
< stacklen
;i
++)
874 stackptr
[i
] = DEADBEEF
;
877 /* Store interesting information */
878 thread
= &threads
[slot
];
880 thread
->stack
= stack
;
881 thread
->stack_size
= stack_size
;
882 thread
->statearg
= 0;
883 #ifdef HAVE_PRIORITY_SCHEDULING
884 thread
->priority_x
= 0;
885 thread
->priority
= priority
;
886 cores
[core
].highest_priority
= 100;
892 /* Writeback stack munging or anything else before starting */
893 if (core
!= CURRENT_CORE
)
897 regs
= &thread
->context
;
898 /* Align stack to an even 32 bit boundary */
899 regs
->sp
= (void*)(((unsigned int)stack
+ stack_size
) & ~3);
900 regs
->start
= (void*)function
;
902 /* Do any CPU specific inits after initializing common items
903 to have access to valid data */
904 THREAD_CPU_INIT(core
, thread
);
906 add_to_list(&cores
[core
].running
, thread
);
915 #ifdef HAVE_SCHEDULER_BOOSTCTRL
916 void trigger_cpu_boost(void)
920 if (!STATE_IS_BOOSTED(cores
[CURRENT_CORE
].running
->statearg
))
922 SET_BOOST_STATE(cores
[CURRENT_CORE
].running
->statearg
);
923 if (!boosted_threads
)
934 /*---------------------------------------------------------------------------
935 * Remove a thread on the current core from the scheduler.
936 * Parameter is the ID as returned from create_thread().
937 *---------------------------------------------------------------------------
939 void remove_thread(struct thread_entry
*thread
)
944 thread
= cores
[CURRENT_CORE
].running
;
946 /* Free the entry by removing thread name. */
948 #ifdef HAVE_PRIORITY_SCHEDULING
949 cores
[IF_COP2(thread
->core
)].highest_priority
= 100;
952 if (thread
== cores
[IF_COP2(thread
->core
)].running
)
954 remove_from_list(&cores
[IF_COP2(thread
->core
)].running
, thread
);
955 switch_thread(false, NULL
);
959 if (thread
== cores
[IF_COP2(thread
->core
)].sleeping
)
960 remove_from_list(&cores
[IF_COP2(thread
->core
)].sleeping
, thread
);
962 remove_from_list(NULL
, thread
);
967 #ifdef HAVE_PRIORITY_SCHEDULING
968 int thread_set_priority(struct thread_entry
*thread
, int priority
)
974 thread
= cores
[CURRENT_CORE
].running
;
976 old_priority
= thread
->priority
;
977 thread
->priority
= priority
;
978 cores
[IF_COP2(thread
->core
)].highest_priority
= 100;
984 int thread_get_priority(struct thread_entry
*thread
)
987 thread
= cores
[CURRENT_CORE
].running
;
989 return thread
->priority
;
992 void priority_yield(void)
994 struct thread_entry
*thread
= cores
[CURRENT_CORE
].running
;
995 thread
->priority_x
= 1;
996 switch_thread(true, NULL
);
997 thread
->priority_x
= 0;
999 #endif /* HAVE_PRIORITY_SCHEDULING */
1001 struct thread_entry
* thread_get_current(void)
1003 return cores
[CURRENT_CORE
].running
;
1006 void init_threads(void)
1008 const unsigned int core
= CURRENT_CORE
;
1011 /* Let main CPU initialize first. */
1015 while (!cores
[CPU
].kernel_running
) ;
1020 slot
= find_empty_thread_slot();
1022 cores
[core
].sleeping
= NULL
;
1023 cores
[core
].running
= NULL
;
1024 cores
[core
].waking
= NULL
;
1025 cores
[core
].wakeup_list
= &cores
[core
].running
;
1026 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
1027 cores
[core
].switch_to_irq_level
= STAY_IRQ_LEVEL
;
1029 threads
[slot
].name
= main_thread_name
;
1030 threads
[slot
].statearg
= 0;
1031 threads
[slot
].context
.start
= 0; /* core's main thread already running */
1033 threads
[slot
].core
= core
;
1035 #ifdef HAVE_PRIORITY_SCHEDULING
1036 threads
[slot
].priority
= PRIORITY_USER_INTERFACE
;
1037 threads
[slot
].priority_x
= 0;
1038 cores
[core
].highest_priority
= 100;
1040 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1041 boosted_threads
= 0;
1043 add_to_list(&cores
[core
].running
, &threads
[slot
]);
1045 /* In multiple core setups, each core has a different stack. There is
1046 * probably a much better way to do this. */
1049 threads
[slot
].stack
= stackbegin
;
1050 threads
[slot
].stack_size
= (int)stackend
- (int)stackbegin
;
1052 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
1055 threads
[slot
].stack
= cop_stackbegin
;
1056 threads
[slot
].stack_size
=
1057 (int)cop_stackend
- (int)cop_stackbegin
;
1060 cores
[core
].kernel_running
= true;
1066 int thread_stack_usage(const struct thread_entry
*thread
)
1069 unsigned int *stackptr
= thread
->stack
;
1071 for (i
= 0;i
< thread
->stack_size
/sizeof(int);i
++)
1073 if (stackptr
[i
] != DEADBEEF
)
1077 return ((thread
->stack_size
- i
* sizeof(int)) * 100) /
1081 int thread_get_status(const struct thread_entry
*thread
)
1083 return GET_STATE(thread
->statearg
);