Workaround for crashing shutdown sequence
[maemo-rb.git] / firmware / thread.c
blobea73150853891ce59a0dd3c556d4dd9c99d22eb7
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include "config.h"
22 #include <stdbool.h>
23 #include <stdio.h>
24 #include "thread.h"
25 #include "panic.h"
26 #include "system.h"
27 #include "kernel.h"
28 #include "cpu.h"
29 #include "string.h"
30 #include "buffer.h"
31 #ifdef RB_PROFILE
32 #include <profile.h>
33 #endif
34 /****************************************************************************
35 * ATTENTION!! *
36 * See notes below on implementing processor-specific portions! *
37 ***************************************************************************/
39 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
40 #ifdef DEBUG
41 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
42 #else
43 #define THREAD_EXTRA_CHECKS 0
44 #endif
46 /**
47 * General locking order to guarantee progress. Order must be observed but
48 * all stages are not nescessarily obligatory. Going from 1) to 3) is
49 * perfectly legal.
51 * 1) IRQ
52 * This is first because of the likelyhood of having an interrupt occur that
53 * also accesses one of the objects farther down the list. Any non-blocking
54 * synchronization done may already have a lock on something during normal
55 * execution and if an interrupt handler running on the same processor as
56 * the one that has the resource locked were to attempt to access the
57 * resource, the interrupt handler would wait forever waiting for an unlock
58 * that will never happen. There is no danger if the interrupt occurs on
59 * a different processor because the one that has the lock will eventually
60 * unlock and the other processor's handler may proceed at that time. Not
61 * nescessary when the resource in question is definitely not available to
62 * interrupt handlers.
64 * 2) Kernel Object
65 * 1) May be needed beforehand if the kernel object allows dual-use such as
66 * event queues. The kernel object must have a scheme to protect itself from
67 * access by another processor and is responsible for serializing the calls
68 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
69 * other. Objects' queues are also protected here.
71 * 3) Thread Slot
72 * This locks access to the thread's slot such that its state cannot be
73 * altered by another processor when a state change is in progress such as
74 * when it is in the process of going on a blocked list. An attempt to wake
75 * a thread while it is still blocking will likely desync its state with
76 * the other resources used for that state.
78 * 4) Core Lists
79 * These lists are specific to a particular processor core and are accessible
80 * by all processor cores and interrupt handlers. The running (rtr) list is
81 * the prime example where a thread may be added by any means.
84 /*---------------------------------------------------------------------------
85 * Processor specific: core_sleep/core_wake/misc. notes
87 * ARM notes:
88 * FIQ is not dealt with by the scheduler code and is simply restored if it
89 * must by masked for some reason - because threading modifies a register
90 * that FIQ may also modify and there's no way to accomplish it atomically.
91 * s3c2440 is such a case.
93 * Audio interrupts are generally treated at a higher priority than others
94 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
95 * are not in general safe. Special cases may be constructed on a per-
96 * source basis and blocking operations are not available.
98 * core_sleep procedure to implement for any CPU to ensure an asychronous
99 * wakup never results in requiring a wait until the next tick (up to
100 * 10000uS!). May require assembly and careful instruction ordering.
102 * 1) On multicore, stay awake if directed to do so by another. If so, goto
103 * step 4.
104 * 2) If processor requires, atomically reenable interrupts and perform step
105 * 3.
106 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
107 * on Coldfire) goto step 5.
108 * 4) Enable interrupts.
109 * 5) Exit procedure.
111 * core_wake and multprocessor notes for sleep/wake coordination:
112 * If possible, to wake up another processor, the forcing of an interrupt on
113 * the woken core by the waker core is the easiest way to ensure a non-
114 * delayed wake and immediate execution of any woken threads. If that isn't
115 * available then some careful non-blocking synchonization is needed (as on
116 * PP targets at the moment).
117 *---------------------------------------------------------------------------
120 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
121 * (someday :). */
122 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
123 static struct core_entry cores[NUM_CORES] IBSS_ATTR;
124 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
126 static const char main_thread_name[] = "main";
127 #if (CONFIG_PLATFORM & PLATFORM_NATIVE)
128 extern uintptr_t stackbegin[];
129 extern uintptr_t stackend[];
130 #else
131 extern uintptr_t *stackbegin;
132 extern uintptr_t *stackend;
133 #endif
135 static inline void core_sleep(IF_COP_VOID(unsigned int core))
136 __attribute__((always_inline));
138 void check_tmo_threads(void)
139 __attribute__((noinline));
141 static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
142 __attribute__((always_inline));
144 static void add_to_list_tmo(struct thread_entry *thread)
145 __attribute__((noinline));
147 static void core_schedule_wakeup(struct thread_entry *thread)
148 __attribute__((noinline));
150 #if NUM_CORES > 1
151 static inline void run_blocking_ops(
152 unsigned int core, struct thread_entry *thread)
153 __attribute__((always_inline));
154 #endif
156 static void thread_stkov(struct thread_entry *thread)
157 __attribute__((noinline));
159 static inline void store_context(void* addr)
160 __attribute__((always_inline));
162 static inline void load_context(const void* addr)
163 __attribute__((always_inline));
165 #if NUM_CORES > 1
166 static void thread_final_exit_do(struct thread_entry *current)
167 __attribute__((noinline, noreturn, used));
168 #else
169 static inline void thread_final_exit(struct thread_entry *current)
170 __attribute__((always_inline, noreturn));
171 #endif
173 void switch_thread(void)
174 __attribute__((noinline));
176 /****************************************************************************
177 * Processor/OS-specific section - include necessary core support
180 #if defined(HAVE_WIN32_FIBER_THREADS)
181 #include "thread-win32.c"
182 #elif defined(HAVE_SIGALTSTACK_THREADS)
183 #include "thread-unix.c"
184 #elif defined(CPU_ARM)
185 #include "thread-arm.c"
186 #if defined (CPU_PP)
187 #include "thread-pp.c"
188 #endif /* CPU_PP */
189 #elif defined(CPU_COLDFIRE)
190 #include "thread-coldfire.c"
191 #elif CONFIG_CPU == SH7034
192 #include "thread-sh.c"
193 #elif defined(CPU_MIPS) && CPU_MIPS == 32
194 #include "thread-mips32.c"
195 #else
196 /* Wouldn't compile anyway */
197 #error Processor not implemented.
198 #endif /* CONFIG_CPU == */
200 #ifndef IF_NO_SKIP_YIELD
201 #define IF_NO_SKIP_YIELD(...)
202 #endif
205 * End Processor-specific section
206 ***************************************************************************/
208 #if THREAD_EXTRA_CHECKS
209 static void thread_panicf(const char *msg, struct thread_entry *thread)
211 IF_COP( const unsigned int core = thread->core; )
212 static char name[32];
213 thread_get_name(name, 32, thread);
214 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
216 static void thread_stkov(struct thread_entry *thread)
218 thread_panicf("Stkov", thread);
220 #define THREAD_PANICF(msg, thread) \
221 thread_panicf(msg, thread)
222 #define THREAD_ASSERT(exp, msg, thread) \
223 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
224 #else
225 static void thread_stkov(struct thread_entry *thread)
227 IF_COP( const unsigned int core = thread->core; )
228 static char name[32];
229 thread_get_name(name, 32, thread);
230 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
232 #define THREAD_PANICF(msg, thread)
233 #define THREAD_ASSERT(exp, msg, thread)
234 #endif /* THREAD_EXTRA_CHECKS */
236 /* Thread locking */
237 #if NUM_CORES > 1
238 #define LOCK_THREAD(thread) \
239 ({ corelock_lock(&(thread)->slot_cl); })
240 #define TRY_LOCK_THREAD(thread) \
241 ({ corelock_try_lock(&(thread)->slot_cl); })
242 #define UNLOCK_THREAD(thread) \
243 ({ corelock_unlock(&(thread)->slot_cl); })
244 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
245 ({ unsigned int _core = (thread)->core; \
246 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
247 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
248 #else
249 #define LOCK_THREAD(thread) \
250 ({ })
251 #define TRY_LOCK_THREAD(thread) \
252 ({ })
253 #define UNLOCK_THREAD(thread) \
254 ({ })
255 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
256 ({ })
257 #endif
259 /* RTR list */
260 #define RTR_LOCK(core) \
261 ({ corelock_lock(&cores[core].rtr_cl); })
262 #define RTR_UNLOCK(core) \
263 ({ corelock_unlock(&cores[core].rtr_cl); })
265 #ifdef HAVE_PRIORITY_SCHEDULING
266 #define rtr_add_entry(core, priority) \
267 prio_add_entry(&cores[core].rtr, (priority))
269 #define rtr_subtract_entry(core, priority) \
270 prio_subtract_entry(&cores[core].rtr, (priority))
272 #define rtr_move_entry(core, from, to) \
273 prio_move_entry(&cores[core].rtr, (from), (to))
274 #else
275 #define rtr_add_entry(core, priority)
276 #define rtr_add_entry_inl(core, priority)
277 #define rtr_subtract_entry(core, priority)
278 #define rtr_subtract_entry_inl(core, priotity)
279 #define rtr_move_entry(core, from, to)
280 #define rtr_move_entry_inl(core, from, to)
281 #endif
283 /*---------------------------------------------------------------------------
284 * Thread list structure - circular:
285 * +------------------------------+
286 * | |
287 * +--+---+<-+---+<-+---+<-+---+<-+
288 * Head->| T | | T | | T | | T |
289 * +->+---+->+---+->+---+->+---+--+
290 * | |
291 * +------------------------------+
292 *---------------------------------------------------------------------------
295 /*---------------------------------------------------------------------------
296 * Adds a thread to a list of threads using "insert last". Uses the "l"
297 * links.
298 *---------------------------------------------------------------------------
300 static void add_to_list_l(struct thread_entry **list,
301 struct thread_entry *thread)
303 struct thread_entry *l = *list;
305 if (l == NULL)
307 /* Insert into unoccupied list */
308 thread->l.prev = thread;
309 thread->l.next = thread;
310 *list = thread;
311 return;
314 /* Insert last */
315 thread->l.prev = l->l.prev;
316 thread->l.next = l;
317 l->l.prev->l.next = thread;
318 l->l.prev = thread;
321 /*---------------------------------------------------------------------------
322 * Removes a thread from a list of threads. Uses the "l" links.
323 *---------------------------------------------------------------------------
325 static void remove_from_list_l(struct thread_entry **list,
326 struct thread_entry *thread)
328 struct thread_entry *prev, *next;
330 next = thread->l.next;
332 if (thread == next)
334 /* The only item */
335 *list = NULL;
336 return;
339 if (thread == *list)
341 /* List becomes next item */
342 *list = next;
345 prev = thread->l.prev;
347 /* Fix links to jump over the removed entry. */
348 next->l.prev = prev;
349 prev->l.next = next;
352 /*---------------------------------------------------------------------------
353 * Timeout list structure - circular reverse (to make "remove item" O(1)),
354 * NULL-terminated forward (to ease the far more common forward traversal):
355 * +------------------------------+
356 * | |
357 * +--+---+<-+---+<-+---+<-+---+<-+
358 * Head->| T | | T | | T | | T |
359 * +---+->+---+->+---+->+---+-X
360 *---------------------------------------------------------------------------
363 /*---------------------------------------------------------------------------
364 * Add a thread from the core's timout list by linking the pointers in its
365 * tmo structure.
366 *---------------------------------------------------------------------------
368 static void add_to_list_tmo(struct thread_entry *thread)
370 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
371 THREAD_ASSERT(thread->tmo.prev == NULL,
372 "add_to_list_tmo->already listed", thread);
374 thread->tmo.next = NULL;
376 if (tmo == NULL)
378 /* Insert into unoccupied list */
379 thread->tmo.prev = thread;
380 cores[IF_COP_CORE(thread->core)].timeout = thread;
381 return;
384 /* Insert Last */
385 thread->tmo.prev = tmo->tmo.prev;
386 tmo->tmo.prev->tmo.next = thread;
387 tmo->tmo.prev = thread;
390 /*---------------------------------------------------------------------------
391 * Remove a thread from the core's timout list by unlinking the pointers in
392 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
393 * is cancelled.
394 *---------------------------------------------------------------------------
396 static void remove_from_list_tmo(struct thread_entry *thread)
398 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
399 struct thread_entry *prev = thread->tmo.prev;
400 struct thread_entry *next = thread->tmo.next;
402 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
404 if (next != NULL)
405 next->tmo.prev = prev;
407 if (thread == *list)
409 /* List becomes next item and empty if next == NULL */
410 *list = next;
411 /* Mark as unlisted */
412 thread->tmo.prev = NULL;
414 else
416 if (next == NULL)
417 (*list)->tmo.prev = prev;
418 prev->tmo.next = next;
419 /* Mark as unlisted */
420 thread->tmo.prev = NULL;
425 #ifdef HAVE_PRIORITY_SCHEDULING
426 /*---------------------------------------------------------------------------
427 * Priority distribution structure (one category for each possible priority):
429 * +----+----+----+ ... +-----+
430 * hist: | F0 | F1 | F2 | | F31 |
431 * +----+----+----+ ... +-----+
432 * mask: | b0 | b1 | b2 | | b31 |
433 * +----+----+----+ ... +-----+
435 * F = count of threads at priority category n (frequency)
436 * b = bitmask of non-zero priority categories (occupancy)
438 * / if H[n] != 0 : 1
439 * b[n] = |
440 * \ else : 0
442 *---------------------------------------------------------------------------
443 * Basic priority inheritance priotocol (PIP):
445 * Mn = mutex n, Tn = thread n
447 * A lower priority thread inherits the priority of the highest priority
448 * thread blocked waiting for it to complete an action (such as release a
449 * mutex or respond to a message via queue_send):
451 * 1) T2->M1->T1
453 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
454 * priority than T1 then T1 inherits the priority of T2.
456 * 2) T3
457 * \/
458 * T2->M1->T1
460 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
461 * T1 inherits the higher of T2 and T3.
463 * 3) T3->M2->T2->M1->T1
465 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
466 * then T1 inherits the priority of T3 through T2.
468 * Blocking chains can grow arbitrarily complex (though it's best that they
469 * not form at all very often :) and build-up from these units.
470 *---------------------------------------------------------------------------
473 /*---------------------------------------------------------------------------
474 * Increment frequency at category "priority"
475 *---------------------------------------------------------------------------
477 static inline unsigned int prio_add_entry(
478 struct priority_distribution *pd, int priority)
480 unsigned int count;
481 /* Enough size/instruction count difference for ARM makes it worth it to
482 * use different code (192 bytes for ARM). Only thing better is ASM. */
483 #ifdef CPU_ARM
484 count = pd->hist[priority];
485 if (++count == 1)
486 pd->mask |= 1 << priority;
487 pd->hist[priority] = count;
488 #else /* This one's better for Coldfire */
489 if ((count = ++pd->hist[priority]) == 1)
490 pd->mask |= 1 << priority;
491 #endif
493 return count;
496 /*---------------------------------------------------------------------------
497 * Decrement frequency at category "priority"
498 *---------------------------------------------------------------------------
500 static inline unsigned int prio_subtract_entry(
501 struct priority_distribution *pd, int priority)
503 unsigned int count;
505 #ifdef CPU_ARM
506 count = pd->hist[priority];
507 if (--count == 0)
508 pd->mask &= ~(1 << priority);
509 pd->hist[priority] = count;
510 #else
511 if ((count = --pd->hist[priority]) == 0)
512 pd->mask &= ~(1 << priority);
513 #endif
515 return count;
518 /*---------------------------------------------------------------------------
519 * Remove from one category and add to another
520 *---------------------------------------------------------------------------
522 static inline void prio_move_entry(
523 struct priority_distribution *pd, int from, int to)
525 uint32_t mask = pd->mask;
527 #ifdef CPU_ARM
528 unsigned int count;
530 count = pd->hist[from];
531 if (--count == 0)
532 mask &= ~(1 << from);
533 pd->hist[from] = count;
535 count = pd->hist[to];
536 if (++count == 1)
537 mask |= 1 << to;
538 pd->hist[to] = count;
539 #else
540 if (--pd->hist[from] == 0)
541 mask &= ~(1 << from);
543 if (++pd->hist[to] == 1)
544 mask |= 1 << to;
545 #endif
547 pd->mask = mask;
550 /*---------------------------------------------------------------------------
551 * Change the priority and rtr entry for a running thread
552 *---------------------------------------------------------------------------
554 static inline void set_running_thread_priority(
555 struct thread_entry *thread, int priority)
557 const unsigned int core = IF_COP_CORE(thread->core);
558 RTR_LOCK(core);
559 rtr_move_entry(core, thread->priority, priority);
560 thread->priority = priority;
561 RTR_UNLOCK(core);
564 /*---------------------------------------------------------------------------
565 * Finds the highest priority thread in a list of threads. If the list is
566 * empty, the PRIORITY_IDLE is returned.
568 * It is possible to use the struct priority_distribution within an object
569 * instead of scanning the remaining threads in the list but as a compromise,
570 * the resulting per-object memory overhead is saved at a slight speed
571 * penalty under high contention.
572 *---------------------------------------------------------------------------
574 static int find_highest_priority_in_list_l(
575 struct thread_entry * const thread)
577 if (LIKELY(thread != NULL))
579 /* Go though list until the ending up at the initial thread */
580 int highest_priority = thread->priority;
581 struct thread_entry *curr = thread;
585 int priority = curr->priority;
587 if (priority < highest_priority)
588 highest_priority = priority;
590 curr = curr->l.next;
592 while (curr != thread);
594 return highest_priority;
597 return PRIORITY_IDLE;
600 /*---------------------------------------------------------------------------
601 * Register priority with blocking system and bubble it down the chain if
602 * any until we reach the end or something is already equal or higher.
604 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
605 * targets but that same action also guarantees a circular block anyway and
606 * those are prevented, right? :-)
607 *---------------------------------------------------------------------------
609 static struct thread_entry *
610 blocker_inherit_priority(struct thread_entry *current)
612 const int priority = current->priority;
613 struct blocker *bl = current->blocker;
614 struct thread_entry * const tstart = current;
615 struct thread_entry *bl_t = bl->thread;
617 /* Blocker cannot change since the object protection is held */
618 LOCK_THREAD(bl_t);
620 for (;;)
622 struct thread_entry *next;
623 int bl_pr = bl->priority;
625 if (priority >= bl_pr)
626 break; /* Object priority already high enough */
628 bl->priority = priority;
630 /* Add this one */
631 prio_add_entry(&bl_t->pdist, priority);
633 if (bl_pr < PRIORITY_IDLE)
635 /* Not first waiter - subtract old one */
636 prio_subtract_entry(&bl_t->pdist, bl_pr);
639 if (priority >= bl_t->priority)
640 break; /* Thread priority high enough */
642 if (bl_t->state == STATE_RUNNING)
644 /* Blocking thread is a running thread therefore there are no
645 * further blockers. Change the "run queue" on which it
646 * resides. */
647 set_running_thread_priority(bl_t, priority);
648 break;
651 bl_t->priority = priority;
653 /* If blocking thread has a blocker, apply transitive inheritance */
654 bl = bl_t->blocker;
656 if (bl == NULL)
657 break; /* End of chain or object doesn't support inheritance */
659 next = bl->thread;
661 if (UNLIKELY(next == tstart))
662 break; /* Full-circle - deadlock! */
664 UNLOCK_THREAD(current);
666 #if NUM_CORES > 1
667 for (;;)
669 LOCK_THREAD(next);
671 /* Blocker could change - retest condition */
672 if (LIKELY(bl->thread == next))
673 break;
675 UNLOCK_THREAD(next);
676 next = bl->thread;
678 #endif
679 current = bl_t;
680 bl_t = next;
683 UNLOCK_THREAD(bl_t);
685 return current;
688 /*---------------------------------------------------------------------------
689 * Readjust priorities when waking a thread blocked waiting for another
690 * in essence "releasing" the thread's effect on the object owner. Can be
691 * performed from any context.
692 *---------------------------------------------------------------------------
694 struct thread_entry *
695 wakeup_priority_protocol_release(struct thread_entry *thread)
697 const int priority = thread->priority;
698 struct blocker *bl = thread->blocker;
699 struct thread_entry * const tstart = thread;
700 struct thread_entry *bl_t = bl->thread;
702 /* Blocker cannot change since object will be locked */
703 LOCK_THREAD(bl_t);
705 thread->blocker = NULL; /* Thread not blocked */
707 for (;;)
709 struct thread_entry *next;
710 int bl_pr = bl->priority;
712 if (priority > bl_pr)
713 break; /* Object priority higher */
715 next = *thread->bqp;
717 if (next == NULL)
719 /* No more threads in queue */
720 prio_subtract_entry(&bl_t->pdist, bl_pr);
721 bl->priority = PRIORITY_IDLE;
723 else
725 /* Check list for highest remaining priority */
726 int queue_pr = find_highest_priority_in_list_l(next);
728 if (queue_pr == bl_pr)
729 break; /* Object priority not changing */
731 /* Change queue priority */
732 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
733 bl->priority = queue_pr;
736 if (bl_pr > bl_t->priority)
737 break; /* thread priority is higher */
739 bl_pr = find_first_set_bit(bl_t->pdist.mask);
741 if (bl_pr == bl_t->priority)
742 break; /* Thread priority not changing */
744 if (bl_t->state == STATE_RUNNING)
746 /* No further blockers */
747 set_running_thread_priority(bl_t, bl_pr);
748 break;
751 bl_t->priority = bl_pr;
753 /* If blocking thread has a blocker, apply transitive inheritance */
754 bl = bl_t->blocker;
756 if (bl == NULL)
757 break; /* End of chain or object doesn't support inheritance */
759 next = bl->thread;
761 if (UNLIKELY(next == tstart))
762 break; /* Full-circle - deadlock! */
764 UNLOCK_THREAD(thread);
766 #if NUM_CORES > 1
767 for (;;)
769 LOCK_THREAD(next);
771 /* Blocker could change - retest condition */
772 if (LIKELY(bl->thread == next))
773 break;
775 UNLOCK_THREAD(next);
776 next = bl->thread;
778 #endif
779 thread = bl_t;
780 bl_t = next;
783 UNLOCK_THREAD(bl_t);
785 #if NUM_CORES > 1
786 if (UNLIKELY(thread != tstart))
788 /* Relock original if it changed */
789 LOCK_THREAD(tstart);
791 #endif
793 return cores[CURRENT_CORE].running;
796 /*---------------------------------------------------------------------------
797 * Transfer ownership to a thread waiting for an objects and transfer
798 * inherited priority boost from other waiters. This algorithm knows that
799 * blocking chains may only unblock from the very end.
801 * Only the owning thread itself may call this and so the assumption that
802 * it is the running thread is made.
803 *---------------------------------------------------------------------------
805 struct thread_entry *
806 wakeup_priority_protocol_transfer(struct thread_entry *thread)
808 /* Waking thread inherits priority boost from object owner */
809 struct blocker *bl = thread->blocker;
810 struct thread_entry *bl_t = bl->thread;
811 struct thread_entry *next;
812 int bl_pr;
814 THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t,
815 "UPPT->wrong thread", cores[CURRENT_CORE].running);
817 LOCK_THREAD(bl_t);
819 bl_pr = bl->priority;
821 /* Remove the object's boost from the owning thread */
822 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
823 bl_pr <= bl_t->priority)
825 /* No more threads at this priority are waiting and the old level is
826 * at least the thread level */
827 int priority = find_first_set_bit(bl_t->pdist.mask);
829 if (priority != bl_t->priority)
831 /* Adjust this thread's priority */
832 set_running_thread_priority(bl_t, priority);
836 next = *thread->bqp;
838 if (LIKELY(next == NULL))
840 /* Expected shortcut - no more waiters */
841 bl_pr = PRIORITY_IDLE;
843 else
845 if (thread->priority <= bl_pr)
847 /* Need to scan threads remaining in queue */
848 bl_pr = find_highest_priority_in_list_l(next);
851 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
852 bl_pr < thread->priority)
854 /* Thread priority must be raised */
855 thread->priority = bl_pr;
859 bl->thread = thread; /* This thread pwns */
860 bl->priority = bl_pr; /* Save highest blocked priority */
861 thread->blocker = NULL; /* Thread not blocked */
863 UNLOCK_THREAD(bl_t);
865 return bl_t;
868 /*---------------------------------------------------------------------------
869 * No threads must be blocked waiting for this thread except for it to exit.
870 * The alternative is more elaborate cleanup and object registration code.
871 * Check this for risk of silent data corruption when objects with
872 * inheritable blocking are abandoned by the owner - not precise but may
873 * catch something.
874 *---------------------------------------------------------------------------
876 static void __attribute__((noinline)) check_for_obj_waiters(
877 const char *function, struct thread_entry *thread)
879 /* Only one bit in the mask should be set with a frequency on 1 which
880 * represents the thread's own base priority */
881 uint32_t mask = thread->pdist.mask;
882 if ((mask & (mask - 1)) != 0 ||
883 thread->pdist.hist[find_first_set_bit(mask)] > 1)
885 unsigned char name[32];
886 thread_get_name(name, 32, thread);
887 panicf("%s->%s with obj. waiters", function, name);
890 #endif /* HAVE_PRIORITY_SCHEDULING */
892 /*---------------------------------------------------------------------------
893 * Move a thread back to a running state on its core.
894 *---------------------------------------------------------------------------
896 static void core_schedule_wakeup(struct thread_entry *thread)
898 const unsigned int core = IF_COP_CORE(thread->core);
900 RTR_LOCK(core);
902 thread->state = STATE_RUNNING;
904 add_to_list_l(&cores[core].running, thread);
905 rtr_add_entry(core, thread->priority);
907 RTR_UNLOCK(core);
909 #if NUM_CORES > 1
910 if (core != CURRENT_CORE)
911 core_wake(core);
912 #endif
915 /*---------------------------------------------------------------------------
916 * Check the core's timeout list when at least one thread is due to wake.
917 * Filtering for the condition is done before making the call. Resets the
918 * tick when the next check will occur.
919 *---------------------------------------------------------------------------
921 void check_tmo_threads(void)
923 const unsigned int core = CURRENT_CORE;
924 const long tick = current_tick; /* snapshot the current tick */
925 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
926 struct thread_entry *next = cores[core].timeout;
928 /* If there are no processes waiting for a timeout, just keep the check
929 tick from falling into the past. */
931 /* Break the loop once we have walked through the list of all
932 * sleeping processes or have removed them all. */
933 while (next != NULL)
935 /* Check sleeping threads. Allow interrupts between checks. */
936 enable_irq();
938 struct thread_entry *curr = next;
940 next = curr->tmo.next;
942 /* Lock thread slot against explicit wakeup */
943 disable_irq();
944 LOCK_THREAD(curr);
946 unsigned state = curr->state;
948 if (state < TIMEOUT_STATE_FIRST)
950 /* Cleanup threads no longer on a timeout but still on the
951 * list. */
952 remove_from_list_tmo(curr);
954 else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick)))
956 /* Timeout still pending - this will be the usual case */
957 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
959 /* Earliest timeout found so far - move the next check up
960 to its time */
961 next_tmo_check = curr->tmo_tick;
964 else
966 /* Sleep timeout has been reached so bring the thread back to
967 * life again. */
968 if (state == STATE_BLOCKED_W_TMO)
970 #ifdef HAVE_CORELOCK_OBJECT
971 /* Lock the waiting thread's kernel object */
972 struct corelock *ocl = curr->obj_cl;
974 if (UNLIKELY(corelock_try_lock(ocl) == 0))
976 /* Need to retry in the correct order though the need is
977 * unlikely */
978 UNLOCK_THREAD(curr);
979 corelock_lock(ocl);
980 LOCK_THREAD(curr);
982 if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO))
984 /* Thread was woken or removed explicitely while slot
985 * was unlocked */
986 corelock_unlock(ocl);
987 remove_from_list_tmo(curr);
988 UNLOCK_THREAD(curr);
989 continue;
992 #endif /* NUM_CORES */
994 remove_from_list_l(curr->bqp, curr);
996 #ifdef HAVE_WAKEUP_EXT_CB
997 if (curr->wakeup_ext_cb != NULL)
998 curr->wakeup_ext_cb(curr);
999 #endif
1001 #ifdef HAVE_PRIORITY_SCHEDULING
1002 if (curr->blocker != NULL)
1003 wakeup_priority_protocol_release(curr);
1004 #endif
1005 corelock_unlock(ocl);
1007 /* else state == STATE_SLEEPING */
1009 remove_from_list_tmo(curr);
1011 RTR_LOCK(core);
1013 curr->state = STATE_RUNNING;
1015 add_to_list_l(&cores[core].running, curr);
1016 rtr_add_entry(core, curr->priority);
1018 RTR_UNLOCK(core);
1021 UNLOCK_THREAD(curr);
1024 cores[core].next_tmo_check = next_tmo_check;
1027 /*---------------------------------------------------------------------------
1028 * Performs operations that must be done before blocking a thread but after
1029 * the state is saved.
1030 *---------------------------------------------------------------------------
1032 #if NUM_CORES > 1
1033 static inline void run_blocking_ops(
1034 unsigned int core, struct thread_entry *thread)
1036 struct thread_blk_ops *ops = &cores[core].blk_ops;
1037 const unsigned flags = ops->flags;
1039 if (LIKELY(flags == TBOP_CLEAR))
1040 return;
1042 switch (flags)
1044 case TBOP_SWITCH_CORE:
1045 core_switch_blk_op(core, thread);
1046 /* Fall-through */
1047 case TBOP_UNLOCK_CORELOCK:
1048 corelock_unlock(ops->cl_p);
1049 break;
1052 ops->flags = TBOP_CLEAR;
1054 #endif /* NUM_CORES > 1 */
1056 #ifdef RB_PROFILE
1057 void profile_thread(void)
1059 profstart(cores[CURRENT_CORE].running - threads);
1061 #endif
1063 /*---------------------------------------------------------------------------
1064 * Prepares a thread to block on an object's list and/or for a specified
1065 * duration - expects object and slot to be appropriately locked if needed
1066 * and interrupts to be masked.
1067 *---------------------------------------------------------------------------
1069 static inline void block_thread_on_l(struct thread_entry *thread,
1070 unsigned state)
1072 /* If inlined, unreachable branches will be pruned with no size penalty
1073 because state is passed as a constant parameter. */
1074 const unsigned int core = IF_COP_CORE(thread->core);
1076 /* Remove the thread from the list of running threads. */
1077 RTR_LOCK(core);
1078 remove_from_list_l(&cores[core].running, thread);
1079 rtr_subtract_entry(core, thread->priority);
1080 RTR_UNLOCK(core);
1082 /* Add a timeout to the block if not infinite */
1083 switch (state)
1085 case STATE_BLOCKED:
1086 case STATE_BLOCKED_W_TMO:
1087 /* Put the thread into a new list of inactive threads. */
1088 add_to_list_l(thread->bqp, thread);
1090 if (state == STATE_BLOCKED)
1091 break;
1093 /* Fall-through */
1094 case STATE_SLEEPING:
1095 /* If this thread times out sooner than any other thread, update
1096 next_tmo_check to its timeout */
1097 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1099 cores[core].next_tmo_check = thread->tmo_tick;
1102 if (thread->tmo.prev == NULL)
1104 add_to_list_tmo(thread);
1106 /* else thread was never removed from list - just keep it there */
1107 break;
1110 /* Remember the the next thread about to block. */
1111 cores[core].block_task = thread;
1113 /* Report new state. */
1114 thread->state = state;
1117 /*---------------------------------------------------------------------------
1118 * Switch thread in round robin fashion for any given priority. Any thread
1119 * that removed itself from the running list first must specify itself in
1120 * the paramter.
1122 * INTERNAL: Intended for use by kernel and not for programs.
1123 *---------------------------------------------------------------------------
1125 void switch_thread(void)
1128 const unsigned int core = CURRENT_CORE;
1129 struct thread_entry *block = cores[core].block_task;
1130 struct thread_entry *thread = cores[core].running;
1132 /* Get context to save - next thread to run is unknown until all wakeups
1133 * are evaluated */
1134 if (block != NULL)
1136 cores[core].block_task = NULL;
1138 #if NUM_CORES > 1
1139 if (UNLIKELY(thread == block))
1141 /* This was the last thread running and another core woke us before
1142 * reaching here. Force next thread selection to give tmo threads or
1143 * other threads woken before this block a first chance. */
1144 block = NULL;
1146 else
1147 #endif
1149 /* Blocking task is the old one */
1150 thread = block;
1154 #ifdef RB_PROFILE
1155 #ifdef CPU_COLDFIRE
1156 _profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1157 #else
1158 profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1159 #endif
1160 #endif
1162 /* Begin task switching by saving our current context so that we can
1163 * restore the state of the current thread later to the point prior
1164 * to this call. */
1165 store_context(&thread->context);
1167 /* Check if the current thread stack is overflown */
1168 if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
1169 thread_stkov(thread);
1171 #ifdef BUFFER_ALLOC_DEBUG
1172 /* Check if the current thread just did bad things with buffer_alloc()ed
1173 * memory */
1175 static char name[32];
1176 thread_get_name(name, 32, thread);
1177 buffer_alloc_check(name);
1179 #endif
1181 #if NUM_CORES > 1
1182 /* Run any blocking operations requested before switching/sleeping */
1183 run_blocking_ops(core, thread);
1184 #endif
1186 #ifdef HAVE_PRIORITY_SCHEDULING
1187 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1188 /* Reset the value of thread's skip count */
1189 thread->skip_count = 0;
1190 #endif
1192 for (;;)
1194 /* If there are threads on a timeout and the earliest wakeup is due,
1195 * check the list and wake any threads that need to start running
1196 * again. */
1197 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1199 check_tmo_threads();
1202 disable_irq();
1203 RTR_LOCK(core);
1205 thread = cores[core].running;
1207 if (UNLIKELY(thread == NULL))
1209 /* Enter sleep mode to reduce power usage - woken up on interrupt
1210 * or wakeup request from another core - expected to enable
1211 * interrupts. */
1212 RTR_UNLOCK(core);
1213 core_sleep(IF_COP(core));
1215 else
1217 #ifdef HAVE_PRIORITY_SCHEDULING
1218 /* Select the new task based on priorities and the last time a
1219 * process got CPU time relative to the highest priority runnable
1220 * task. */
1221 struct priority_distribution *pd = &cores[core].rtr;
1222 int max = find_first_set_bit(pd->mask);
1224 if (block == NULL)
1226 /* Not switching on a block, tentatively select next thread */
1227 thread = thread->l.next;
1230 for (;;)
1232 int priority = thread->priority;
1233 int diff;
1235 /* This ridiculously simple method of aging seems to work
1236 * suspiciously well. It does tend to reward CPU hogs (under
1237 * yielding) but that's generally not desirable at all. On
1238 * the plus side, it, relatively to other threads, penalizes
1239 * excess yielding which is good if some high priority thread
1240 * is performing no useful work such as polling for a device
1241 * to be ready. Of course, aging is only employed when higher
1242 * and lower priority threads are runnable. The highest
1243 * priority runnable thread(s) are never skipped unless a
1244 * lower-priority process has aged sufficiently. Priorities
1245 * of REALTIME class are run strictly according to priority
1246 * thus are not subject to switchout due to lower-priority
1247 * processes aging; they must give up the processor by going
1248 * off the run list. */
1249 if (LIKELY(priority <= max) ||
1250 IF_NO_SKIP_YIELD( thread->skip_count == -1 || )
1251 (priority > PRIORITY_REALTIME &&
1252 (diff = priority - max,
1253 ++thread->skip_count > diff*diff)))
1255 cores[core].running = thread;
1256 break;
1259 thread = thread->l.next;
1261 #else
1262 /* Without priority use a simple FCFS algorithm */
1263 if (block == NULL)
1265 /* Not switching on a block, select next thread */
1266 thread = thread->l.next;
1267 cores[core].running = thread;
1269 #endif /* HAVE_PRIORITY_SCHEDULING */
1271 RTR_UNLOCK(core);
1272 enable_irq();
1273 break;
1277 /* And finally give control to the next thread. */
1278 load_context(&thread->context);
1280 #ifdef RB_PROFILE
1281 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
1282 #endif
1286 /*---------------------------------------------------------------------------
1287 * Sleeps a thread for at least a specified number of ticks with zero being
1288 * a wait until the next tick.
1290 * INTERNAL: Intended for use by kernel and not for programs.
1291 *---------------------------------------------------------------------------
1293 void sleep_thread(int ticks)
1295 struct thread_entry *current = cores[CURRENT_CORE].running;
1297 LOCK_THREAD(current);
1299 /* Set our timeout, remove from run list and join timeout list. */
1300 current->tmo_tick = current_tick + ticks + 1;
1301 block_thread_on_l(current, STATE_SLEEPING);
1303 UNLOCK_THREAD(current);
1306 /*---------------------------------------------------------------------------
1307 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1309 * INTERNAL: Intended for use by kernel objects and not for programs.
1310 *---------------------------------------------------------------------------
1312 void block_thread(struct thread_entry *current)
1314 /* Set the state to blocked and take us off of the run queue until we
1315 * are explicitly woken */
1316 LOCK_THREAD(current);
1318 /* Set the list for explicit wakeup */
1319 block_thread_on_l(current, STATE_BLOCKED);
1321 #ifdef HAVE_PRIORITY_SCHEDULING
1322 if (current->blocker != NULL)
1324 /* Object supports PIP */
1325 current = blocker_inherit_priority(current);
1327 #endif
1329 UNLOCK_THREAD(current);
1332 /*---------------------------------------------------------------------------
1333 * Block a thread on a blocking queue for a specified time interval or until
1334 * explicitly woken - whichever happens first.
1336 * INTERNAL: Intended for use by kernel objects and not for programs.
1337 *---------------------------------------------------------------------------
1339 void block_thread_w_tmo(struct thread_entry *current, int timeout)
1341 /* Get the entry for the current running thread. */
1342 LOCK_THREAD(current);
1344 /* Set the state to blocked with the specified timeout */
1345 current->tmo_tick = current_tick + timeout;
1347 /* Set the list for explicit wakeup */
1348 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1350 #ifdef HAVE_PRIORITY_SCHEDULING
1351 if (current->blocker != NULL)
1353 /* Object supports PIP */
1354 current = blocker_inherit_priority(current);
1356 #endif
1358 UNLOCK_THREAD(current);
1361 /*---------------------------------------------------------------------------
1362 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1363 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1365 * This code should be considered a critical section by the caller meaning
1366 * that the object's corelock should be held.
1368 * INTERNAL: Intended for use by kernel objects and not for programs.
1369 *---------------------------------------------------------------------------
1371 unsigned int wakeup_thread(struct thread_entry **list)
1373 struct thread_entry *thread = *list;
1374 unsigned int result = THREAD_NONE;
1376 /* Check if there is a blocked thread at all. */
1377 if (thread == NULL)
1378 return result;
1380 LOCK_THREAD(thread);
1382 /* Determine thread's current state. */
1383 switch (thread->state)
1385 case STATE_BLOCKED:
1386 case STATE_BLOCKED_W_TMO:
1387 remove_from_list_l(list, thread);
1389 result = THREAD_OK;
1391 #ifdef HAVE_PRIORITY_SCHEDULING
1392 struct thread_entry *current;
1393 struct blocker *bl = thread->blocker;
1395 if (bl == NULL)
1397 /* No inheritance - just boost the thread by aging */
1398 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1399 thread->skip_count = thread->priority;
1400 current = cores[CURRENT_CORE].running;
1402 else
1404 /* Call the specified unblocking PIP */
1405 current = bl->wakeup_protocol(thread);
1408 if (current != NULL &&
1409 find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask)
1410 < current->priority)
1412 /* There is a thread ready to run of higher or same priority on
1413 * the same core as the current one; recommend a task switch.
1414 * Knowing if this is an interrupt call would be helpful here. */
1415 result |= THREAD_SWITCH;
1417 #endif /* HAVE_PRIORITY_SCHEDULING */
1419 core_schedule_wakeup(thread);
1420 break;
1422 /* Nothing to do. State is not blocked. */
1423 #if THREAD_EXTRA_CHECKS
1424 default:
1425 THREAD_PANICF("wakeup_thread->block invalid", thread);
1426 case STATE_RUNNING:
1427 case STATE_KILLED:
1428 break;
1429 #endif
1432 UNLOCK_THREAD(thread);
1433 return result;
1436 /*---------------------------------------------------------------------------
1437 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1438 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1439 * the queue must be locked first.
1441 * INTERNAL: Intended for use by kernel objects and not for programs.
1442 *---------------------------------------------------------------------------
1444 unsigned int thread_queue_wake(struct thread_entry **list)
1446 unsigned result = THREAD_NONE;
1448 for (;;)
1450 unsigned int rc = wakeup_thread(list);
1452 if (rc == THREAD_NONE)
1453 break; /* No more threads */
1455 result |= rc;
1458 return result;
1461 /*---------------------------------------------------------------------------
1462 * Assign the thread slot a new ID. Version is 1-255.
1463 *---------------------------------------------------------------------------
1465 static void new_thread_id(unsigned int slot_num,
1466 struct thread_entry *thread)
1468 unsigned int version =
1469 (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
1470 & THREAD_ID_VERSION_MASK;
1472 /* If wrapped to 0, make it 1 */
1473 if (version == 0)
1474 version = 1u << THREAD_ID_VERSION_SHIFT;
1476 thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
1479 /*---------------------------------------------------------------------------
1480 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1481 * will be locked on multicore.
1482 *---------------------------------------------------------------------------
1484 static struct thread_entry * find_empty_thread_slot(void)
1486 /* Any slot could be on an interrupt-accessible list */
1487 IF_COP( int oldlevel = disable_irq_save(); )
1488 struct thread_entry *thread = NULL;
1489 int n;
1491 for (n = 0; n < MAXTHREADS; n++)
1493 /* Obtain current slot state - lock it on multicore */
1494 struct thread_entry *t = &threads[n];
1495 LOCK_THREAD(t);
1497 if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
1499 /* Slot is empty - leave it locked and caller will unlock */
1500 thread = t;
1501 break;
1504 /* Finished examining slot - no longer busy - unlock on multicore */
1505 UNLOCK_THREAD(t);
1508 IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
1509 not accesible to them yet */
1510 return thread;
1513 /*---------------------------------------------------------------------------
1514 * Return the thread_entry pointer for a thread_id. Return the current
1515 * thread if the ID is (unsigned int)-1 (alias for current).
1516 *---------------------------------------------------------------------------
1518 struct thread_entry * thread_id_entry(unsigned int thread_id)
1520 return (thread_id == THREAD_ID_CURRENT) ?
1521 cores[CURRENT_CORE].running :
1522 &threads[thread_id & THREAD_ID_SLOT_MASK];
1525 /*---------------------------------------------------------------------------
1526 * Place the current core in idle mode - woken up on interrupt or wake
1527 * request from another core.
1528 *---------------------------------------------------------------------------
1530 void core_idle(void)
1532 IF_COP( const unsigned int core = CURRENT_CORE; )
1533 disable_irq();
1534 core_sleep(IF_COP(core));
1537 /*---------------------------------------------------------------------------
1538 * Create a thread. If using a dual core architecture, specify which core to
1539 * start the thread on.
1541 * Return ID if context area could be allocated, else NULL.
1542 *---------------------------------------------------------------------------
1544 unsigned int create_thread(void (*function)(void),
1545 void* stack, size_t stack_size,
1546 unsigned flags, const char *name
1547 IF_PRIO(, int priority)
1548 IF_COP(, unsigned int core))
1550 unsigned int i;
1551 unsigned int stack_words;
1552 uintptr_t stackptr, stackend;
1553 struct thread_entry *thread;
1554 unsigned state;
1555 int oldlevel;
1557 thread = find_empty_thread_slot();
1558 if (thread == NULL)
1560 return 0;
1563 oldlevel = disable_irq_save();
1565 /* Munge the stack to make it easy to spot stack overflows */
1566 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
1567 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
1568 stack_size = stackend - stackptr;
1569 stack_words = stack_size / sizeof (uintptr_t);
1571 for (i = 0; i < stack_words; i++)
1573 ((uintptr_t *)stackptr)[i] = DEADBEEF;
1576 /* Store interesting information */
1577 thread->name = name;
1578 thread->stack = (uintptr_t *)stackptr;
1579 thread->stack_size = stack_size;
1580 thread->queue = NULL;
1581 #ifdef HAVE_WAKEUP_EXT_CB
1582 thread->wakeup_ext_cb = NULL;
1583 #endif
1584 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1585 thread->cpu_boost = 0;
1586 #endif
1587 #ifdef HAVE_PRIORITY_SCHEDULING
1588 memset(&thread->pdist, 0, sizeof(thread->pdist));
1589 thread->blocker = NULL;
1590 thread->base_priority = priority;
1591 thread->priority = priority;
1592 thread->skip_count = priority;
1593 prio_add_entry(&thread->pdist, priority);
1594 #endif
1596 #ifdef HAVE_IO_PRIORITY
1597 /* Default to high (foreground) priority */
1598 thread->io_priority = IO_PRIORITY_IMMEDIATE;
1599 #endif
1601 #if NUM_CORES > 1
1602 thread->core = core;
1604 /* Writeback stack munging or anything else before starting */
1605 if (core != CURRENT_CORE)
1607 cpucache_flush();
1609 #endif
1611 /* Thread is not on any timeout list but be a bit paranoid */
1612 thread->tmo.prev = NULL;
1614 state = (flags & CREATE_THREAD_FROZEN) ?
1615 STATE_FROZEN : STATE_RUNNING;
1617 thread->context.sp = (typeof (thread->context.sp))stackend;
1619 /* Load the thread's context structure with needed startup information */
1620 THREAD_STARTUP_INIT(core, thread, function);
1622 thread->state = state;
1623 i = thread->id; /* Snapshot while locked */
1625 if (state == STATE_RUNNING)
1626 core_schedule_wakeup(thread);
1628 UNLOCK_THREAD(thread);
1629 restore_irq(oldlevel);
1631 return i;
1634 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1635 /*---------------------------------------------------------------------------
1636 * Change the boost state of a thread boosting or unboosting the CPU
1637 * as required.
1638 *---------------------------------------------------------------------------
1640 static inline void boost_thread(struct thread_entry *thread, bool boost)
1642 if ((thread->cpu_boost != 0) != boost)
1644 thread->cpu_boost = boost;
1645 cpu_boost(boost);
1649 void trigger_cpu_boost(void)
1651 struct thread_entry *current = cores[CURRENT_CORE].running;
1652 boost_thread(current, true);
1655 void cancel_cpu_boost(void)
1657 struct thread_entry *current = cores[CURRENT_CORE].running;
1658 boost_thread(current, false);
1660 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
1662 /*---------------------------------------------------------------------------
1663 * Block the current thread until another thread terminates. A thread may
1664 * wait on itself to terminate which prevents it from running again and it
1665 * will need to be killed externally.
1666 * Parameter is the ID as returned from create_thread().
1667 *---------------------------------------------------------------------------
1669 void thread_wait(unsigned int thread_id)
1671 struct thread_entry *current = cores[CURRENT_CORE].running;
1672 struct thread_entry *thread = thread_id_entry(thread_id);
1674 /* Lock thread-as-waitable-object lock */
1675 corelock_lock(&thread->waiter_cl);
1677 /* Be sure it hasn't been killed yet */
1678 if (thread_id == THREAD_ID_CURRENT ||
1679 (thread->id == thread_id && thread->state != STATE_KILLED))
1681 IF_COP( current->obj_cl = &thread->waiter_cl; )
1682 current->bqp = &thread->queue;
1684 disable_irq();
1685 block_thread(current);
1687 corelock_unlock(&thread->waiter_cl);
1689 switch_thread();
1690 return;
1693 corelock_unlock(&thread->waiter_cl);
1696 /*---------------------------------------------------------------------------
1697 * Exit the current thread. The Right Way to Do Things (TM).
1698 *---------------------------------------------------------------------------
1700 /* This is done to foil optimizations that may require the current stack,
1701 * such as optimizing subexpressions that put variables on the stack that
1702 * get used after switching stacks. */
1703 #if NUM_CORES > 1
1704 /* Called by ASM stub */
1705 static void thread_final_exit_do(struct thread_entry *current)
1706 #else
1707 /* No special procedure is required before calling */
1708 static inline void thread_final_exit(struct thread_entry *current)
1709 #endif
1711 /* At this point, this thread isn't using resources allocated for
1712 * execution except the slot itself. */
1714 /* Signal this thread */
1715 thread_queue_wake(&current->queue);
1716 corelock_unlock(&current->waiter_cl);
1717 switch_thread();
1718 /* This should never and must never be reached - if it is, the
1719 * state is corrupted */
1720 THREAD_PANICF("thread_exit->K:*R", current);
1721 while (1);
1724 void thread_exit(void)
1726 register struct thread_entry * current = cores[CURRENT_CORE].running;
1728 /* Cancel CPU boost if any */
1729 cancel_cpu_boost();
1731 disable_irq();
1733 corelock_lock(&current->waiter_cl);
1734 LOCK_THREAD(current);
1736 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1737 if (current->name == THREAD_DESTRUCT)
1739 /* Thread being killed - become a waiter */
1740 unsigned int id = current->id;
1741 UNLOCK_THREAD(current);
1742 corelock_unlock(&current->waiter_cl);
1743 thread_wait(id);
1744 THREAD_PANICF("thread_exit->WK:*R", current);
1746 #endif
1748 #ifdef HAVE_PRIORITY_SCHEDULING
1749 check_for_obj_waiters("thread_exit", current);
1750 #endif
1752 if (current->tmo.prev != NULL)
1754 /* Cancel pending timeout list removal */
1755 remove_from_list_tmo(current);
1758 /* Switch tasks and never return */
1759 block_thread_on_l(current, STATE_KILLED);
1761 /* Slot must be unusable until thread is really gone */
1762 UNLOCK_THREAD_AT_TASK_SWITCH(current);
1764 /* Update ID for this slot */
1765 new_thread_id(current->id, current);
1766 current->name = NULL;
1768 /* Do final cleanup and remove the thread */
1769 thread_final_exit(current);
1772 #ifdef ALLOW_REMOVE_THREAD
1773 /*---------------------------------------------------------------------------
1774 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1775 * normal programs.
1777 * Parameter is the ID as returned from create_thread().
1779 * Use with care on threads that are not under careful control as this may
1780 * leave various objects in an undefined state.
1781 *---------------------------------------------------------------------------
1783 void remove_thread(unsigned int thread_id)
1785 #ifdef HAVE_CORELOCK_OBJECT
1786 /* core is not constant here because of core switching */
1787 unsigned int core = CURRENT_CORE;
1788 unsigned int old_core = NUM_CORES;
1789 struct corelock *ocl = NULL;
1790 #else
1791 const unsigned int core = CURRENT_CORE;
1792 #endif
1793 struct thread_entry *current = cores[core].running;
1794 struct thread_entry *thread = thread_id_entry(thread_id);
1796 unsigned state;
1797 int oldlevel;
1799 if (thread == current)
1800 thread_exit(); /* Current thread - do normal exit */
1802 oldlevel = disable_irq_save();
1804 corelock_lock(&thread->waiter_cl);
1805 LOCK_THREAD(thread);
1807 state = thread->state;
1809 if (thread->id != thread_id || state == STATE_KILLED)
1810 goto thread_killed;
1812 #if NUM_CORES > 1
1813 if (thread->name == THREAD_DESTRUCT)
1815 /* Thread being killed - become a waiter */
1816 UNLOCK_THREAD(thread);
1817 corelock_unlock(&thread->waiter_cl);
1818 restore_irq(oldlevel);
1819 thread_wait(thread_id);
1820 return;
1823 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
1825 #ifdef HAVE_PRIORITY_SCHEDULING
1826 check_for_obj_waiters("remove_thread", thread);
1827 #endif
1829 if (thread->core != core)
1831 /* Switch cores and safely extract the thread there */
1832 /* Slot HAS to be unlocked or a deadlock could occur which means other
1833 * threads have to be guided into becoming thread waiters if they
1834 * attempt to remove it. */
1835 unsigned int new_core = thread->core;
1837 corelock_unlock(&thread->waiter_cl);
1839 UNLOCK_THREAD(thread);
1840 restore_irq(oldlevel);
1842 old_core = switch_core(new_core);
1844 oldlevel = disable_irq_save();
1846 corelock_lock(&thread->waiter_cl);
1847 LOCK_THREAD(thread);
1849 state = thread->state;
1850 core = new_core;
1851 /* Perform the extraction and switch ourselves back to the original
1852 processor */
1854 #endif /* NUM_CORES > 1 */
1856 if (thread->tmo.prev != NULL)
1858 /* Clean thread off the timeout list if a timeout check hasn't
1859 * run yet */
1860 remove_from_list_tmo(thread);
1863 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1864 /* Cancel CPU boost if any */
1865 boost_thread(thread, false);
1866 #endif
1868 IF_COP( retry_state: )
1870 switch (state)
1872 case STATE_RUNNING:
1873 RTR_LOCK(core);
1874 /* Remove thread from ready to run tasks */
1875 remove_from_list_l(&cores[core].running, thread);
1876 rtr_subtract_entry(core, thread->priority);
1877 RTR_UNLOCK(core);
1878 break;
1879 case STATE_BLOCKED:
1880 case STATE_BLOCKED_W_TMO:
1881 /* Remove thread from the queue it's blocked on - including its
1882 * own if waiting there */
1883 #if NUM_CORES > 1
1884 if (&thread->waiter_cl != thread->obj_cl)
1886 ocl = thread->obj_cl;
1888 if (UNLIKELY(corelock_try_lock(ocl) == 0))
1890 UNLOCK_THREAD(thread);
1891 corelock_lock(ocl);
1892 LOCK_THREAD(thread);
1894 if (UNLIKELY(thread->state != state))
1896 /* Something woke the thread */
1897 state = thread->state;
1898 corelock_unlock(ocl);
1899 goto retry_state;
1903 #endif
1904 remove_from_list_l(thread->bqp, thread);
1906 #ifdef HAVE_WAKEUP_EXT_CB
1907 if (thread->wakeup_ext_cb != NULL)
1908 thread->wakeup_ext_cb(thread);
1909 #endif
1911 #ifdef HAVE_PRIORITY_SCHEDULING
1912 if (thread->blocker != NULL)
1914 /* Remove thread's priority influence from its chain */
1915 wakeup_priority_protocol_release(thread);
1917 #endif
1919 #if NUM_CORES > 1
1920 if (ocl != NULL)
1921 corelock_unlock(ocl);
1922 #endif
1923 break;
1924 /* Otherwise thread is frozen and hasn't run yet */
1927 new_thread_id(thread_id, thread);
1928 thread->state = STATE_KILLED;
1930 /* If thread was waiting on itself, it will have been removed above.
1931 * The wrong order would result in waking the thread first and deadlocking
1932 * since the slot is already locked. */
1933 thread_queue_wake(&thread->queue);
1935 thread->name = NULL;
1937 thread_killed: /* Thread was already killed */
1938 /* Removal complete - safe to unlock and reenable interrupts */
1939 corelock_unlock(&thread->waiter_cl);
1940 UNLOCK_THREAD(thread);
1941 restore_irq(oldlevel);
1943 #if NUM_CORES > 1
1944 if (old_core < NUM_CORES)
1946 /* Did a removal on another processor's thread - switch back to
1947 native core */
1948 switch_core(old_core);
1950 #endif
1952 #endif /* ALLOW_REMOVE_THREAD */
1954 #ifdef HAVE_PRIORITY_SCHEDULING
1955 /*---------------------------------------------------------------------------
1956 * Sets the thread's relative base priority for the core it runs on. Any
1957 * needed inheritance changes also may happen.
1958 *---------------------------------------------------------------------------
1960 int thread_set_priority(unsigned int thread_id, int priority)
1962 int old_base_priority = -1;
1963 struct thread_entry *thread = thread_id_entry(thread_id);
1965 /* A little safety measure */
1966 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
1967 return -1;
1969 /* Thread could be on any list and therefore on an interrupt accessible
1970 one - disable interrupts */
1971 int oldlevel = disable_irq_save();
1973 LOCK_THREAD(thread);
1975 /* Make sure it's not killed */
1976 if (thread_id == THREAD_ID_CURRENT ||
1977 (thread->id == thread_id && thread->state != STATE_KILLED))
1979 int old_priority = thread->priority;
1981 old_base_priority = thread->base_priority;
1982 thread->base_priority = priority;
1984 prio_move_entry(&thread->pdist, old_base_priority, priority);
1985 priority = find_first_set_bit(thread->pdist.mask);
1987 if (old_priority == priority)
1989 /* No priority change - do nothing */
1991 else if (thread->state == STATE_RUNNING)
1993 /* This thread is running - change location on the run
1994 * queue. No transitive inheritance needed. */
1995 set_running_thread_priority(thread, priority);
1997 else
1999 thread->priority = priority;
2001 if (thread->blocker != NULL)
2003 /* Bubble new priority down the chain */
2004 struct blocker *bl = thread->blocker; /* Blocker struct */
2005 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
2006 struct thread_entry * const tstart = thread; /* Initial thread */
2007 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2009 for (;;)
2011 struct thread_entry *next; /* Next thread to check */
2012 int bl_pr; /* Highest blocked thread */
2013 int queue_pr; /* New highest blocked thread */
2014 #if NUM_CORES > 1
2015 /* Owner can change but thread cannot be dislodged - thread
2016 * may not be the first in the queue which allows other
2017 * threads ahead in the list to be given ownership during the
2018 * operation. If thread is next then the waker will have to
2019 * wait for us and the owner of the object will remain fixed.
2020 * If we successfully grab the owner -- which at some point
2021 * is guaranteed -- then the queue remains fixed until we
2022 * pass by. */
2023 for (;;)
2025 LOCK_THREAD(bl_t);
2027 /* Double-check the owner - retry if it changed */
2028 if (LIKELY(bl->thread == bl_t))
2029 break;
2031 UNLOCK_THREAD(bl_t);
2032 bl_t = bl->thread;
2034 #endif
2035 bl_pr = bl->priority;
2037 if (highest > bl_pr)
2038 break; /* Object priority won't change */
2040 /* This will include the thread being set */
2041 queue_pr = find_highest_priority_in_list_l(*thread->bqp);
2043 if (queue_pr == bl_pr)
2044 break; /* Object priority not changing */
2046 /* Update thread boost for this object */
2047 bl->priority = queue_pr;
2048 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2049 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2051 if (bl_t->priority == bl_pr)
2052 break; /* Blocking thread priority not changing */
2054 if (bl_t->state == STATE_RUNNING)
2056 /* Thread not blocked - we're done */
2057 set_running_thread_priority(bl_t, bl_pr);
2058 break;
2061 bl_t->priority = bl_pr;
2062 bl = bl_t->blocker; /* Blocking thread has a blocker? */
2064 if (bl == NULL)
2065 break; /* End of chain */
2067 next = bl->thread;
2069 if (UNLIKELY(next == tstart))
2070 break; /* Full-circle */
2072 UNLOCK_THREAD(thread);
2074 thread = bl_t;
2075 bl_t = next;
2076 } /* for (;;) */
2078 UNLOCK_THREAD(bl_t);
2083 UNLOCK_THREAD(thread);
2085 restore_irq(oldlevel);
2087 return old_base_priority;
2090 /*---------------------------------------------------------------------------
2091 * Returns the current base priority for a thread.
2092 *---------------------------------------------------------------------------
2094 int thread_get_priority(unsigned int thread_id)
2096 struct thread_entry *thread = thread_id_entry(thread_id);
2097 int base_priority = thread->base_priority;
2099 /* Simply check without locking slot. It may or may not be valid by the
2100 * time the function returns anyway. If all tests pass, it is the
2101 * correct value for when it was valid. */
2102 if (thread_id != THREAD_ID_CURRENT &&
2103 (thread->id != thread_id || thread->state == STATE_KILLED))
2104 base_priority = -1;
2106 return base_priority;
2108 #endif /* HAVE_PRIORITY_SCHEDULING */
2110 #ifdef HAVE_IO_PRIORITY
2111 int thread_get_io_priority(unsigned int thread_id)
2113 struct thread_entry *thread = thread_id_entry(thread_id);
2114 return thread->io_priority;
2117 void thread_set_io_priority(unsigned int thread_id,int io_priority)
2119 struct thread_entry *thread = thread_id_entry(thread_id);
2120 thread->io_priority = io_priority;
2122 #endif
2124 /*---------------------------------------------------------------------------
2125 * Starts a frozen thread - similar semantics to wakeup_thread except that
2126 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2127 * virtue of the slot having a state of STATE_FROZEN.
2128 *---------------------------------------------------------------------------
2130 void thread_thaw(unsigned int thread_id)
2132 struct thread_entry *thread = thread_id_entry(thread_id);
2133 int oldlevel = disable_irq_save();
2135 LOCK_THREAD(thread);
2137 /* If thread is the current one, it cannot be frozen, therefore
2138 * there is no need to check that. */
2139 if (thread->id == thread_id && thread->state == STATE_FROZEN)
2140 core_schedule_wakeup(thread);
2142 UNLOCK_THREAD(thread);
2143 restore_irq(oldlevel);
2146 /*---------------------------------------------------------------------------
2147 * Return the ID of the currently executing thread.
2148 *---------------------------------------------------------------------------
2150 unsigned int thread_get_current(void)
2152 return cores[CURRENT_CORE].running->id;
2155 #if NUM_CORES > 1
2156 /*---------------------------------------------------------------------------
2157 * Switch the processor that the currently executing thread runs on.
2158 *---------------------------------------------------------------------------
2160 unsigned int switch_core(unsigned int new_core)
2162 const unsigned int core = CURRENT_CORE;
2163 struct thread_entry *current = cores[core].running;
2165 if (core == new_core)
2167 /* No change - just return same core */
2168 return core;
2171 int oldlevel = disable_irq_save();
2172 LOCK_THREAD(current);
2174 if (current->name == THREAD_DESTRUCT)
2176 /* Thread being killed - deactivate and let process complete */
2177 unsigned int id = current->id;
2178 UNLOCK_THREAD(current);
2179 restore_irq(oldlevel);
2180 thread_wait(id);
2181 /* Should never be reached */
2182 THREAD_PANICF("switch_core->D:*R", current);
2185 /* Get us off the running list for the current core */
2186 RTR_LOCK(core);
2187 remove_from_list_l(&cores[core].running, current);
2188 rtr_subtract_entry(core, current->priority);
2189 RTR_UNLOCK(core);
2191 /* Stash return value (old core) in a safe place */
2192 current->retval = core;
2194 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2195 * the other core will likely attempt a removal from the wrong list! */
2196 if (current->tmo.prev != NULL)
2198 remove_from_list_tmo(current);
2201 /* Change the core number for this thread slot */
2202 current->core = new_core;
2204 /* Do not use core_schedule_wakeup here since this will result in
2205 * the thread starting to run on the other core before being finished on
2206 * this one. Delay the list unlock to keep the other core stuck
2207 * until this thread is ready. */
2208 RTR_LOCK(new_core);
2210 rtr_add_entry(new_core, current->priority);
2211 add_to_list_l(&cores[new_core].running, current);
2213 /* Make a callback into device-specific code, unlock the wakeup list so
2214 * that execution may resume on the new core, unlock our slot and finally
2215 * restore the interrupt level */
2216 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
2217 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
2218 cores[core].block_task = current;
2220 UNLOCK_THREAD(current);
2222 /* Alert other core to activity */
2223 core_wake(new_core);
2225 /* Do the stack switching, cache_maintenence and switch_thread call -
2226 requires native code */
2227 switch_thread_core(core, current);
2229 /* Finally return the old core to caller */
2230 return current->retval;
2232 #endif /* NUM_CORES > 1 */
2234 /*---------------------------------------------------------------------------
2235 * Initialize threading API. This assumes interrupts are not yet enabled. On
2236 * multicore setups, no core is allowed to proceed until create_thread calls
2237 * are safe to perform.
2238 *---------------------------------------------------------------------------
2240 void init_threads(void)
2242 const unsigned int core = CURRENT_CORE;
2243 struct thread_entry *thread;
2245 if (core == CPU)
2247 /* Initialize core locks and IDs in all slots */
2248 int n;
2249 for (n = 0; n < MAXTHREADS; n++)
2251 thread = &threads[n];
2252 corelock_init(&thread->waiter_cl);
2253 corelock_init(&thread->slot_cl);
2254 thread->id = THREAD_ID_INIT(n);
2258 /* CPU will initialize first and then sleep */
2259 thread = find_empty_thread_slot();
2261 if (thread == NULL)
2263 /* WTF? There really must be a slot available at this stage.
2264 * This can fail if, for example, .bss isn't zero'ed out by the loader
2265 * or threads is in the wrong section. */
2266 THREAD_PANICF("init_threads->no slot", NULL);
2269 /* Initialize initially non-zero members of core */
2270 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2272 /* Initialize initially non-zero members of slot */
2273 UNLOCK_THREAD(thread); /* No sync worries yet */
2274 thread->name = main_thread_name;
2275 thread->state = STATE_RUNNING;
2276 IF_COP( thread->core = core; )
2277 #ifdef HAVE_PRIORITY_SCHEDULING
2278 corelock_init(&cores[core].rtr_cl);
2279 thread->base_priority = PRIORITY_USER_INTERFACE;
2280 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2281 thread->priority = PRIORITY_USER_INTERFACE;
2282 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2283 #endif
2285 add_to_list_l(&cores[core].running, thread);
2287 if (core == CPU)
2289 thread->stack = stackbegin;
2290 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
2291 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2292 /* Wait for other processors to finish their inits since create_thread
2293 * isn't safe to call until the kernel inits are done. The first
2294 * threads created in the system must of course be created by CPU.
2295 * Another possible approach is to initialize all cores and slots
2296 * for each core by CPU, let the remainder proceed in parallel and
2297 * signal CPU when all are finished. */
2298 core_thread_init(CPU);
2300 else
2302 /* Initial stack is the idle stack */
2303 thread->stack = idle_stacks[core];
2304 thread->stack_size = IDLE_STACK_SIZE;
2305 /* After last processor completes, it should signal all others to
2306 * proceed or may signal the next and call thread_exit(). The last one
2307 * to finish will signal CPU. */
2308 core_thread_init(core);
2309 /* Other cores do not have a main thread - go idle inside switch_thread
2310 * until a thread can run on the core. */
2311 thread_exit();
2312 #endif /* NUM_CORES */
2314 #ifdef INIT_MAIN_THREAD
2315 init_main_thread(&thread->context);
2316 #endif
2319 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2320 #if NUM_CORES == 1
2321 static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
2322 #else
2323 static int stack_usage(uintptr_t *stackptr, size_t stack_size)
2324 #endif
2326 unsigned int stack_words = stack_size / sizeof (uintptr_t);
2327 unsigned int i;
2328 int usage = 0;
2330 for (i = 0; i < stack_words; i++)
2332 if (stackptr[i] != DEADBEEF)
2334 usage = ((stack_words - i) * 100) / stack_words;
2335 break;
2339 return usage;
2342 /*---------------------------------------------------------------------------
2343 * Returns the maximum percentage of stack a thread ever used while running.
2344 * NOTE: Some large buffer allocations that don't use enough the buffer to
2345 * overwrite stackptr[0] will not be seen.
2346 *---------------------------------------------------------------------------
2348 int thread_stack_usage(const struct thread_entry *thread)
2350 if (LIKELY(thread->stack_size > 0))
2351 return stack_usage(thread->stack, thread->stack_size);
2352 return 0;
2355 #if NUM_CORES > 1
2356 /*---------------------------------------------------------------------------
2357 * Returns the maximum percentage of the core's idle stack ever used during
2358 * runtime.
2359 *---------------------------------------------------------------------------
2361 int idle_stack_usage(unsigned int core)
2363 return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
2365 #endif
2367 /*---------------------------------------------------------------------------
2368 * Fills in the buffer with the specified thread's name. If the name is NULL,
2369 * empty, or the thread is in destruct state a formatted ID is written
2370 * instead.
2371 *---------------------------------------------------------------------------
2373 void thread_get_name(char *buffer, int size,
2374 struct thread_entry *thread)
2376 if (size <= 0)
2377 return;
2379 *buffer = '\0';
2381 if (thread)
2383 /* Display thread name if one or ID if none */
2384 const char *name = thread->name;
2385 const char *fmt = "%s";
2386 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2388 name = (const char *)(uintptr_t)thread->id;
2389 fmt = "%04lX";
2391 snprintf(buffer, size, fmt, name);