1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
34 # define IF_COP2(x) CURRENT_CORE
37 #define DEADBEEF ((unsigned int)0xdeadbeef)
38 /* Cast to the the machine int type, whose size could be < 4. */
40 struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
41 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
42 #ifdef HAVE_SCHEDULER_BOOSTCTRL
43 static int boosted_threads IBSS_ATTR
;
46 /* Define to enable additional checks for blocking violations etc. */
47 #define THREAD_EXTRA_CHECKS
49 static const char main_thread_name
[] = "main";
51 extern int stackbegin
[];
52 extern int stackend
[];
56 extern int cop_stackbegin
[];
57 extern int cop_stackend
[];
59 /* The coprocessor stack is not set up in the bootloader code, but the threading
60 * is. No threads are run on the coprocessor, so set up some dummy stack */
61 int *cop_stackbegin
= stackbegin
;
62 int *cop_stackend
= stackend
;
68 static long cores_locked IBSS_ATTR
;
70 #define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
71 #define UNLOCK(...) cores_locked = 0
74 #warning "Core locking mechanism should be fixed on H10/4G!"
76 inline void lock_cores(void)
79 if (!cores
[CURRENT_CORE
].lock_issued
)
82 cores
[CURRENT_CORE
].lock_issued
= true;
87 inline void unlock_cores(void)
90 if (cores
[CURRENT_CORE
].lock_issued
)
92 cores
[CURRENT_CORE
].lock_issued
= false;
101 static void add_to_list(struct thread_entry **list,
102 struct thread_entry *thread) ICODE_ATTR;
103 static void remove_from_list(struct thread_entry **list,
104 struct thread_entry *thread) ICODE_ATTR;
107 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
110 static inline void store_context(void* addr
) __attribute__ ((always_inline
));
111 static inline void load_context(const void* addr
)
112 __attribute__ ((always_inline
));
115 /*---------------------------------------------------------------------------
116 * Store non-volatile context.
117 *---------------------------------------------------------------------------
119 static inline void store_context(void* addr
)
122 "stmia %0, { r4-r11, sp, lr }\n"
127 /*---------------------------------------------------------------------------
128 * Load non-volatile context.
129 *---------------------------------------------------------------------------
131 static void start_thread(void (*thread_func
)(void), const void* addr
) __attribute__((naked
));
132 static void start_thread(void (*thread_func
)(void), const void* addr
)
134 /* r0 = thread_func, r1 = addr */
135 #if NUM_CORES > 1 && CONFIG_CPU != PP5002
138 "str r2, [r1, #40] \n"
139 "ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
143 "ldr r1, =0x6000c000 \n"
154 "str r2, [r1, #40] \n"
164 static inline void load_context(const void* addr
)
167 "ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */
168 "ldr r0, [%0, #40] \n" /* load start pointer */
169 "cmp r0, #0 \n" /* check for NULL */
170 "movne r1, %0 \n" /* if not already running, jump to start */
171 "ldrne pc, =start_thread \n"
172 : : "r" (addr
) : "r0", "r1"
176 #elif defined(CPU_COLDFIRE)
177 /*---------------------------------------------------------------------------
178 * Store non-volatile context.
179 *---------------------------------------------------------------------------
181 static inline void store_context(void* addr
)
184 "move.l %%macsr,%%d0 \n"
185 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
186 : : "a" (addr
) : "d0" /* only! */
190 /*---------------------------------------------------------------------------
191 * Load non-volatile context.
192 *---------------------------------------------------------------------------
194 static inline void load_context(const void* addr
)
197 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
198 "move.l %%d0,%%macsr \n"
199 "move.l (52,%0),%%d0 \n" /* Get start address */
200 "beq.b 1f \n" /* NULL -> already running */
201 "clr.l (52,%0) \n" /* Clear start address.. */
203 "jmp (%0) \n" /* ..and start the thread */
205 : : "a" (addr
) : "d0" /* only! */
209 /* Set EMAC unit to fractional mode with saturation for each new thread,
210 since that's what'll be the most useful for most things which the dsp
211 will do. Codecs should still initialize their preferred modes
213 #define THREAD_CPU_INIT(core, thread) \
214 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
216 #elif CONFIG_CPU == SH7034
217 /*---------------------------------------------------------------------------
218 * Store non-volatile context.
219 *---------------------------------------------------------------------------
221 static inline void store_context(void* addr
)
238 /*---------------------------------------------------------------------------
239 * Load non-volatile context.
240 *---------------------------------------------------------------------------
242 static inline void load_context(const void* addr
)
254 "mov.l @%0,r0 \n" /* Get start address */
256 "bt .running \n" /* NULL -> already running */
259 "rts \n" /* Start the thread */
260 "mov.l r0,@%0 \n" /* Clear start address */
262 : : "r" (addr
) : "r0" /* only! */
268 #ifndef THREAD_CPU_INIT
269 /* No cpu specific init - make empty */
270 #define THREAD_CPU_INIT(core, thread)
273 static void add_to_list(struct thread_entry
**list
, struct thread_entry
*thread
)
277 thread
->next
= thread
;
278 thread
->prev
= thread
;
284 thread
->next
= *list
;
285 thread
->prev
= (*list
)->prev
;
286 thread
->prev
->next
= thread
;
287 (*list
)->prev
= thread
;
290 thread->next = (*list)->next;
291 thread->prev = *list;
292 thread->next->prev = thread;
293 (*list)->next = thread;
298 static void remove_from_list(struct thread_entry
**list
,
299 struct thread_entry
*thread
)
303 if (thread
== thread
->next
)
310 *list
= thread
->next
;
313 /* Fix links to jump over the removed entry. */
314 thread
->prev
->next
= thread
->next
;
315 thread
->next
->prev
= thread
->prev
;
318 /* Compiler trick: Don't declare as static to prevent putting
319 * function in IRAM. */
320 void check_sleepers(void)
322 struct thread_entry
*current
, *next
;
324 /* Check sleeping threads. */
325 current
= cores
[CURRENT_CORE
].sleeping
;
331 next
= current
->next
;
333 if ((unsigned)current_tick
>= GET_STATE_ARG(current
->statearg
))
335 /* Sleep timeout has been reached so bring the thread
336 * back to life again. */
337 remove_from_list(&cores
[CURRENT_CORE
].sleeping
, current
);
338 add_to_list(&cores
[CURRENT_CORE
].running
, current
);
339 current
->statearg
= 0;
341 /* If there is no more processes in the list, break the loop. */
342 if (cores
[CURRENT_CORE
].sleeping
== NULL
)
351 /* Break the loop once we have walked through the list of all
352 * sleeping processes. */
353 if (current
== cores
[CURRENT_CORE
].sleeping
)
358 /* Safely finish waking all threads potentialy woken by interrupts -
359 * statearg already zeroed in wakeup_thread. */
360 static void wake_list_awaken(void)
362 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
364 /* No need for another check in the IRQ lock since IRQs are allowed
365 only to add threads to the waking list. They won't be adding more
366 until we're done here though. */
368 struct thread_entry
*waking
= cores
[CURRENT_CORE
].waking
;
369 struct thread_entry
*running
= cores
[CURRENT_CORE
].running
;
373 /* Place waking threads at the end of the running list. */
374 struct thread_entry
*tmp
;
375 waking
->prev
->next
= running
;
376 running
->prev
->next
= waking
;
378 running
->prev
= waking
->prev
;
383 /* Just transfer the list as-is - just came out of a core
385 cores
[CURRENT_CORE
].running
= waking
;
388 /* Done with waking list */
389 cores
[CURRENT_CORE
].waking
= NULL
;
390 set_irq_level(oldlevel
);
393 static inline void sleep_core(void)
395 #if CONFIG_CPU == S3C2440
401 /* We want to do these ASAP as it may change the decision to sleep
402 the core or the core has woken because an interrupt occurred
403 and posted a message to a queue. */
404 if (cores
[CURRENT_CORE
].waking
!= NULL
)
407 if (cores
[CURRENT_CORE
].last_tick
!= current_tick
)
410 cores
[CURRENT_CORE
].last_tick
= current_tick
;
413 /* We must sleep until there is at least one process in the list
414 * of running processes. */
415 if (cores
[CURRENT_CORE
].running
!= NULL
)
418 /* Enter sleep mode to reduce power usage, woken up on interrupt */
420 asm volatile ("stop #0x2000");
421 #elif CONFIG_CPU == SH7034
423 asm volatile ("sleep");
424 #elif defined (CPU_PP)
427 /* This should sleep the CPU. It appears to wake by itself on
429 if (CURRENT_CORE
== CPU
)
430 CPU_CTL
= PROC_SLEEP
;
432 COP_CTL
= PROC_SLEEP
;
435 #elif CONFIG_CPU == S3C2440
436 CLKCON
|= (1 << 2); /* set IDLE bit */
437 for(i
=0; i
<10; i
++); /* wait for IDLE */
438 CLKCON
&= ~(1 << 2); /* reset IDLE bit when wake up */
445 static int get_threadnum(struct thread_entry
*thread
)
449 for (i
= 0; i
< MAXTHREADS
; i
++)
451 if (&threads
[i
] == thread
)
458 void profile_thread(void) {
459 profstart(get_threadnum(cores
[CURRENT_CORE
].running
));
463 /* Compiler trick: Don't declare as static to prevent putting
464 * function in IRAM. */
465 void change_thread_state(struct thread_entry
**blocked_list
)
467 struct thread_entry
*old
;
468 unsigned long new_state
;
470 /* Remove the thread from the list of running threads. */
471 old
= cores
[CURRENT_CORE
].running
;
472 new_state
= GET_STATE(old
->statearg
);
474 /* Check if a thread state change has been requested. */
477 /* Change running thread state and switch to next thread. */
478 remove_from_list(&cores
[CURRENT_CORE
].running
, old
);
480 /* And put the thread into a new list of inactive threads. */
481 if (new_state
== STATE_BLOCKED
)
482 add_to_list(blocked_list
, old
);
484 add_to_list(&cores
[CURRENT_CORE
].sleeping
, old
);
486 #ifdef HAVE_PRIORITY_SCHEDULING
487 /* Reset priorities */
488 if (old
->priority
== cores
[CURRENT_CORE
].highest_priority
)
489 cores
[CURRENT_CORE
].highest_priority
= 100;
493 /* Switch to the next running thread. */
494 cores
[CURRENT_CORE
].running
= old
->next
;
497 /*---------------------------------------------------------------------------
498 * Switch thread in round robin fashion.
499 *---------------------------------------------------------------------------
501 void switch_thread(bool save_context
, struct thread_entry
**blocked_list
)
504 profile_thread_stopped(get_threadnum(cores
[CURRENT_CORE
].running
));
506 unsigned int *stackptr
;
514 /* Begin task switching by saving our current context so that we can
515 * restore the state of the current thread later to the point prior
519 store_context(&cores
[CURRENT_CORE
].running
->context
);
521 /* Check if the current thread stack is overflown */
522 stackptr
= cores
[CURRENT_CORE
].running
->stack
;
523 if(stackptr
[0] != DEADBEEF
)
524 panicf("Stkov %s", cores
[CURRENT_CORE
].running
->name
);
526 /* Rearrange thread lists as needed */
527 change_thread_state(blocked_list
);
529 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
530 /* This has to be done after the scheduler is finished with the
531 blocked_list pointer so that an IRQ can't kill us by attempting
532 a wake but before attempting any core sleep. */
533 if (cores
[CURRENT_CORE
].switch_to_irq_level
!= STAY_IRQ_LEVEL
)
535 int level
= cores
[CURRENT_CORE
].switch_to_irq_level
;
536 cores
[CURRENT_CORE
].switch_to_irq_level
= STAY_IRQ_LEVEL
;
537 set_irq_level(level
);
542 /* Go through the list of sleeping task to check if we need to wake up
543 * any of them due to timeout. Also puts core into sleep state until
544 * there is at least one running process again. */
547 #ifdef HAVE_PRIORITY_SCHEDULING
548 /* Select the new task based on priorities and the last time a process
552 int priority
= cores
[CURRENT_CORE
].running
->priority
;
554 if (priority
< cores
[CURRENT_CORE
].highest_priority
)
555 cores
[CURRENT_CORE
].highest_priority
= priority
;
557 if (priority
== cores
[CURRENT_CORE
].highest_priority
||
558 (current_tick
- cores
[CURRENT_CORE
].running
->last_run
>
560 cores
[CURRENT_CORE
].running
->priority_x
!= 0)
565 cores
[CURRENT_CORE
].running
= cores
[CURRENT_CORE
].running
->next
;
568 /* Reset the value of thread's last running time to the current time. */
569 cores
[CURRENT_CORE
].running
->last_run
= current_tick
;
575 /* And finally give control to the next thread. */
576 load_context(&cores
[CURRENT_CORE
].running
->context
);
579 profile_thread_started(get_threadnum(cores
[CURRENT_CORE
].running
));
583 void sleep_thread(int ticks
)
585 struct thread_entry
*current
;
589 current
= cores
[CURRENT_CORE
].running
;
591 #ifdef HAVE_SCHEDULER_BOOSTCTRL
592 if (STATE_IS_BOOSTED(current
->statearg
))
595 if (!boosted_threads
)
602 /* Set the thread's new state and timeout and finally force a task switch
603 * so that scheduler removes thread from the list of running processes
604 * and puts it in list of sleeping tasks. */
605 SET_STATE(current
->statearg
, STATE_SLEEPING
, current_tick
+ ticks
+ 1);
607 switch_thread(true, NULL
);
610 void block_thread(struct thread_entry
**list
)
612 struct thread_entry
*current
;
616 /* Get the entry for the current running thread. */
617 current
= cores
[CURRENT_CORE
].running
;
619 #ifdef HAVE_SCHEDULER_BOOSTCTRL
620 /* Keep the boosted state over indefinite block calls, because
621 * we are waiting until the earliest time that someone else
622 * completes an action */
623 unsigned long boost_flag
= STATE_IS_BOOSTED(current
->statearg
);
626 #ifdef THREAD_EXTRA_CHECKS
627 /* We are not allowed to mix blocking types in one queue. */
628 if (*list
&& GET_STATE((*list
)->statearg
) == STATE_BLOCKED_W_TMO
)
629 panicf("Blocking violation B->*T");
632 /* Set the state to blocked and ask the scheduler to switch tasks,
633 * this takes us off of the run queue until we are explicitly woken */
634 SET_STATE(current
->statearg
, STATE_BLOCKED
, 0);
636 switch_thread(true, list
);
638 #ifdef HAVE_SCHEDULER_BOOSTCTRL
639 /* Reset only the boosted flag to indicate we are up and running again. */
640 current
->statearg
= boost_flag
;
642 /* Clear all flags to indicate we are up and running again. */
643 current
->statearg
= 0;
647 void block_thread_w_tmo(struct thread_entry
**list
, int timeout
)
649 struct thread_entry
*current
;
650 /* Get the entry for the current running thread. */
651 current
= cores
[CURRENT_CORE
].running
;
654 #ifdef HAVE_SCHEDULER_BOOSTCTRL
655 /* A block with a timeout is a sleep situation, whatever we are waiting
656 * for _may or may not_ happen, regardless of boost state, (user input
657 * for instance), so this thread no longer needs to boost */
658 if (STATE_IS_BOOSTED(current
->statearg
))
661 if (!boosted_threads
)
668 #ifdef THREAD_EXTRA_CHECKS
669 /* We can store only one thread to the "list" if thread is used
670 * in other list (such as core's list for sleeping tasks). */
672 panicf("Blocking violation T->*B");
675 /* Set the state to blocked with the specified timeout */
676 SET_STATE(current
->statearg
, STATE_BLOCKED_W_TMO
, current_tick
+ timeout
);
678 /* Set the "list" for explicit wakeup */
681 /* Now force a task switch and block until we have been woken up
682 * by another thread or timeout is reached. */
683 switch_thread(true, NULL
);
685 /* It is now safe for another thread to block on this "list" */
689 #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) && !defined(SIMULATOR)
690 void set_irq_level_and_block_thread(struct thread_entry
**list
, int level
)
692 cores
[CURRENT_CORE
].switch_to_irq_level
= level
;
697 void set_irq_level_and_block_thread_w_tmo(struct thread_entry
**list
,
698 int timeout
, int level
)
700 cores
[CURRENT_CORE
].switch_to_irq_level
= level
;
701 block_thread_w_tmo(list
, timeout
);
704 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
706 void wakeup_thread(struct thread_entry
**list
)
708 struct thread_entry
*thread
;
710 /* Check if there is a blocked thread at all. */
716 /* Wake up the last thread first. */
719 /* Determine thread's current state. */
720 switch (GET_STATE(thread
->statearg
))
723 /* Remove thread from the list of blocked threads and add it
724 * to the scheduler's list of running processes. List removal
725 * is safe since each object maintains it's own list of
726 * sleepers and queues protect against reentrancy. */
727 remove_from_list(list
, thread
);
728 add_to_list(cores
[IF_COP2(thread
->core
)].wakeup_list
, thread
);
730 case STATE_BLOCKED_W_TMO
:
731 /* Just remove the timeout to cause scheduler to immediately
732 * wake up the thread. */
733 thread
->statearg
= 0;
737 /* Nothing to do. Thread has already been woken up
738 * or it's state is not blocked or blocked with timeout. */
743 inline static int find_empty_thread_slot(void)
747 for (n
= 0; n
< MAXTHREADS
; n
++)
749 if (threads
[n
].name
== NULL
)
756 /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
758 void wakeup_thread_irq_safe(struct thread_entry
**list
)
760 struct core_entry
*core
= &cores
[CURRENT_CORE
];
761 /* Switch wakeup lists and call wakeup_thread */
762 core
->wakeup_list
= &core
->waking
;
764 /* Switch back to normal running list */
765 core
->wakeup_list
= &core
->running
;
768 /*---------------------------------------------------------------------------
770 * If using a dual core architecture, specify which core to start the thread
771 * on, and whether to fall back to the other core if it can't be created
772 * Return ID if context area could be allocated, else NULL.
773 *---------------------------------------------------------------------------
776 create_thread(void (*function
)(void), void* stack
, int stack_size
,
777 const char *name
IF_PRIO(, int priority
)
778 IF_COP(, unsigned int core
, bool fallback
))
781 unsigned int stacklen
;
782 unsigned int *stackptr
;
785 struct thread_entry
*thread
;
789 * To prevent ifdef hell while keeping the binary size down, we define
790 * core here if it hasn't been passed as a parameter
797 /* If the kernel hasn't initialised on the COP (most likely due to an old
798 * bootloader) then refuse to start threads on the COP
800 if ((core
== COP
) && !cores
[core
].kernel_running
)
803 return create_thread(function
, stack
, stack_size
, name
804 IF_PRIO(, priority
) IF_COP(, CPU
, false));
812 slot
= find_empty_thread_slot();
819 /* Munge the stack to make it easy to spot stack overflows */
820 stacklen
= stack_size
/ sizeof(int);
822 for(i
= 0;i
< stacklen
;i
++)
824 stackptr
[i
] = DEADBEEF
;
827 /* Store interesting information */
828 thread
= &threads
[slot
];
830 thread
->stack
= stack
;
831 thread
->stack_size
= stack_size
;
832 thread
->statearg
= 0;
833 #ifdef HAVE_PRIORITY_SCHEDULING
834 thread
->priority_x
= 0;
835 thread
->priority
= priority
;
836 cores
[core
].highest_priority
= 100;
843 regs
= &thread
->context
;
844 /* Align stack to an even 32 bit boundary */
845 regs
->sp
= (void*)(((unsigned int)stack
+ stack_size
) & ~3);
846 regs
->start
= (void*)function
;
848 /* Do any CPU specific inits after initializing common items
849 to have access to valid data */
850 THREAD_CPU_INIT(core
, thread
);
852 add_to_list(&cores
[core
].running
, thread
);
861 #ifdef HAVE_SCHEDULER_BOOSTCTRL
862 void trigger_cpu_boost(void)
866 if (!STATE_IS_BOOSTED(cores
[CURRENT_CORE
].running
->statearg
))
868 SET_BOOST_STATE(cores
[CURRENT_CORE
].running
->statearg
);
869 if (!boosted_threads
)
880 /*---------------------------------------------------------------------------
881 * Remove a thread on the current core from the scheduler.
882 * Parameter is the ID as returned from create_thread().
883 *---------------------------------------------------------------------------
885 void remove_thread(struct thread_entry
*thread
)
890 thread
= cores
[CURRENT_CORE
].running
;
892 /* Free the entry by removing thread name. */
894 #ifdef HAVE_PRIORITY_SCHEDULING
895 cores
[IF_COP2(thread
->core
)].highest_priority
= 100;
898 if (thread
== cores
[IF_COP2(thread
->core
)].running
)
900 remove_from_list(&cores
[IF_COP2(thread
->core
)].running
, thread
);
901 switch_thread(false, NULL
);
905 if (thread
== cores
[IF_COP2(thread
->core
)].sleeping
)
906 remove_from_list(&cores
[IF_COP2(thread
->core
)].sleeping
, thread
);
908 remove_from_list(NULL
, thread
);
913 #ifdef HAVE_PRIORITY_SCHEDULING
914 int thread_set_priority(struct thread_entry
*thread
, int priority
)
920 thread
= cores
[CURRENT_CORE
].running
;
922 old_priority
= thread
->priority
;
923 thread
->priority
= priority
;
924 cores
[IF_COP2(thread
->core
)].highest_priority
= 100;
930 int thread_get_priority(struct thread_entry
*thread
)
933 thread
= cores
[CURRENT_CORE
].running
;
935 return thread
->priority
;
938 void priority_yield(void)
940 struct thread_entry
*thread
= cores
[CURRENT_CORE
].running
;
941 thread
->priority_x
= 1;
942 switch_thread(true, NULL
);
943 thread
->priority_x
= 0;
945 #endif /* HAVE_PRIORITY_SCHEDULING */
947 struct thread_entry
* thread_get_current(void)
949 return cores
[CURRENT_CORE
].running
;
952 void init_threads(void)
954 unsigned int core
= CURRENT_CORE
;
957 /* Let main CPU initialize first. */
961 while (!cores
[CPU
].kernel_running
) ;
966 slot
= find_empty_thread_slot();
968 cores
[core
].sleeping
= NULL
;
969 cores
[core
].running
= NULL
;
970 cores
[core
].waking
= NULL
;
971 cores
[core
].wakeup_list
= &cores
[core
].running
;
972 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
973 cores
[core
].switch_to_irq_level
= STAY_IRQ_LEVEL
;
975 threads
[slot
].name
= main_thread_name
;
976 threads
[slot
].statearg
= 0;
977 threads
[slot
].context
.start
= 0; /* core's main thread already running */
979 threads
[slot
].core
= core
;
981 #ifdef HAVE_PRIORITY_SCHEDULING
982 threads
[slot
].priority
= PRIORITY_USER_INTERFACE
;
983 threads
[slot
].priority_x
= 0;
984 cores
[core
].highest_priority
= 100;
986 #ifdef HAVE_SCHEDULER_BOOSTCTRL
989 add_to_list(&cores
[core
].running
, &threads
[slot
]);
991 /* In multiple core setups, each core has a different stack. There is
992 * probably a much better way to do this. */
995 threads
[slot
].stack
= stackbegin
;
996 threads
[slot
].stack_size
= (int)stackend
- (int)stackbegin
;
998 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
1001 threads
[slot
].stack
= cop_stackbegin
;
1002 threads
[slot
].stack_size
=
1003 (int)cop_stackend
- (int)cop_stackbegin
;
1006 cores
[core
].kernel_running
= true;
1012 int thread_stack_usage(const struct thread_entry
*thread
)
1015 unsigned int *stackptr
= thread
->stack
;
1017 for (i
= 0;i
< thread
->stack_size
/sizeof(int);i
++)
1019 if (stackptr
[i
] != DEADBEEF
)
1023 return ((thread
->stack_size
- i
* sizeof(int)) * 100) /
1027 int thread_get_status(const struct thread_entry
*thread
)
1029 return GET_STATE(thread
->statearg
);