Fix red on hwcodec. Forgot to change the macro for it.
[kugel-rb.git] / firmware / thread.c
blob655af1a940ff6c622b4928a6f4928b30288b7faa
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include "config.h"
22 #include <stdbool.h>
23 #include <stdio.h>
24 #include "thread.h"
25 #include "panic.h"
26 #include "system.h"
27 #include "kernel.h"
28 #include "cpu.h"
29 #include "string.h"
30 #include "buffer.h"
31 #ifdef RB_PROFILE
32 #include <profile.h>
33 #endif
34 /****************************************************************************
35 * ATTENTION!! *
36 * See notes below on implementing processor-specific portions! *
37 ***************************************************************************/
39 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
40 #ifdef DEBUG
41 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
42 #else
43 #define THREAD_EXTRA_CHECKS 0
44 #endif
46 /**
47 * General locking order to guarantee progress. Order must be observed but
48 * all stages are not nescessarily obligatory. Going from 1) to 3) is
49 * perfectly legal.
51 * 1) IRQ
52 * This is first because of the likelyhood of having an interrupt occur that
53 * also accesses one of the objects farther down the list. Any non-blocking
54 * synchronization done may already have a lock on something during normal
55 * execution and if an interrupt handler running on the same processor as
56 * the one that has the resource locked were to attempt to access the
57 * resource, the interrupt handler would wait forever waiting for an unlock
58 * that will never happen. There is no danger if the interrupt occurs on
59 * a different processor because the one that has the lock will eventually
60 * unlock and the other processor's handler may proceed at that time. Not
61 * nescessary when the resource in question is definitely not available to
62 * interrupt handlers.
64 * 2) Kernel Object
65 * 1) May be needed beforehand if the kernel object allows dual-use such as
66 * event queues. The kernel object must have a scheme to protect itself from
67 * access by another processor and is responsible for serializing the calls
68 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
69 * other. Objects' queues are also protected here.
71 * 3) Thread Slot
72 * This locks access to the thread's slot such that its state cannot be
73 * altered by another processor when a state change is in progress such as
74 * when it is in the process of going on a blocked list. An attempt to wake
75 * a thread while it is still blocking will likely desync its state with
76 * the other resources used for that state.
78 * 4) Core Lists
79 * These lists are specific to a particular processor core and are accessible
80 * by all processor cores and interrupt handlers. The running (rtr) list is
81 * the prime example where a thread may be added by any means.
84 /*---------------------------------------------------------------------------
85 * Processor specific: core_sleep/core_wake/misc. notes
87 * ARM notes:
88 * FIQ is not dealt with by the scheduler code and is simply restored if it
89 * must by masked for some reason - because threading modifies a register
90 * that FIQ may also modify and there's no way to accomplish it atomically.
91 * s3c2440 is such a case.
93 * Audio interrupts are generally treated at a higher priority than others
94 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
95 * are not in general safe. Special cases may be constructed on a per-
96 * source basis and blocking operations are not available.
98 * core_sleep procedure to implement for any CPU to ensure an asychronous
99 * wakup never results in requiring a wait until the next tick (up to
100 * 10000uS!). May require assembly and careful instruction ordering.
102 * 1) On multicore, stay awake if directed to do so by another. If so, goto
103 * step 4.
104 * 2) If processor requires, atomically reenable interrupts and perform step
105 * 3.
106 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
107 * on Coldfire) goto step 5.
108 * 4) Enable interrupts.
109 * 5) Exit procedure.
111 * core_wake and multprocessor notes for sleep/wake coordination:
112 * If possible, to wake up another processor, the forcing of an interrupt on
113 * the woken core by the waker core is the easiest way to ensure a non-
114 * delayed wake and immediate execution of any woken threads. If that isn't
115 * available then some careful non-blocking synchonization is needed (as on
116 * PP targets at the moment).
117 *---------------------------------------------------------------------------
120 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
121 * (someday :). */
122 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
123 static struct core_entry cores[NUM_CORES] IBSS_ATTR;
124 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
126 static const char main_thread_name[] = "main";
127 #if (CONFIG_PLATFORM & PLATFORM_NATIVE)
128 extern uintptr_t stackbegin[];
129 extern uintptr_t stackend[];
130 #else
131 extern uintptr_t *stackbegin;
132 extern uintptr_t *stackend;
133 #endif
135 static inline void core_sleep(IF_COP_VOID(unsigned int core))
136 __attribute__((always_inline));
138 void check_tmo_threads(void)
139 __attribute__((noinline));
141 static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
142 __attribute__((always_inline));
144 static void add_to_list_tmo(struct thread_entry *thread)
145 __attribute__((noinline));
147 static void core_schedule_wakeup(struct thread_entry *thread)
148 __attribute__((noinline));
150 #if NUM_CORES > 1
151 static inline void run_blocking_ops(
152 unsigned int core, struct thread_entry *thread)
153 __attribute__((always_inline));
154 #endif
156 static void thread_stkov(struct thread_entry *thread)
157 __attribute__((noinline));
159 static inline void store_context(void* addr)
160 __attribute__((always_inline));
162 static inline void load_context(const void* addr)
163 __attribute__((always_inline));
165 #if NUM_CORES > 1
166 static void thread_final_exit_do(struct thread_entry *current)
167 __attribute__((noinline, noreturn, used));
168 #else
169 static inline void thread_final_exit(struct thread_entry *current)
170 __attribute__((always_inline, noreturn));
171 #endif
173 void switch_thread(void)
174 __attribute__((noinline));
176 /****************************************************************************
177 * Processor-specific section - include necessary core support
179 #if defined(ANDROID)
180 #include "thread-android-arm.c"
181 #elif defined(CPU_ARM)
182 #include "thread-arm.c"
183 #if defined (CPU_PP)
184 #include "thread-pp.c"
185 #endif /* CPU_PP */
186 #elif defined(CPU_COLDFIRE)
187 #include "thread-coldfire.c"
188 #elif CONFIG_CPU == SH7034
189 #include "thread-sh.c"
190 #elif defined(CPU_MIPS) && CPU_MIPS == 32
191 #include "thread-mips32.c"
192 #else
193 /* Wouldn't compile anyway */
194 #error Processor not implemented.
195 #endif /* CONFIG_CPU == */
197 #ifndef IF_NO_SKIP_YIELD
198 #define IF_NO_SKIP_YIELD(...)
199 #endif
202 * End Processor-specific section
203 ***************************************************************************/
205 #if THREAD_EXTRA_CHECKS
206 static void thread_panicf(const char *msg, struct thread_entry *thread)
208 IF_COP( const unsigned int core = thread->core; )
209 static char name[32];
210 thread_get_name(name, 32, thread);
211 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
213 static void thread_stkov(struct thread_entry *thread)
215 thread_panicf("Stkov", thread);
217 #define THREAD_PANICF(msg, thread) \
218 thread_panicf(msg, thread)
219 #define THREAD_ASSERT(exp, msg, thread) \
220 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
221 #else
222 static void thread_stkov(struct thread_entry *thread)
224 IF_COP( const unsigned int core = thread->core; )
225 static char name[32];
226 thread_get_name(name, 32, thread);
227 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
229 #define THREAD_PANICF(msg, thread)
230 #define THREAD_ASSERT(exp, msg, thread)
231 #endif /* THREAD_EXTRA_CHECKS */
233 /* Thread locking */
234 #if NUM_CORES > 1
235 #define LOCK_THREAD(thread) \
236 ({ corelock_lock(&(thread)->slot_cl); })
237 #define TRY_LOCK_THREAD(thread) \
238 ({ corelock_try_lock(&(thread)->slot_cl); })
239 #define UNLOCK_THREAD(thread) \
240 ({ corelock_unlock(&(thread)->slot_cl); })
241 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
242 ({ unsigned int _core = (thread)->core; \
243 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
244 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
245 #else
246 #define LOCK_THREAD(thread) \
247 ({ })
248 #define TRY_LOCK_THREAD(thread) \
249 ({ })
250 #define UNLOCK_THREAD(thread) \
251 ({ })
252 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
253 ({ })
254 #endif
256 /* RTR list */
257 #define RTR_LOCK(core) \
258 ({ corelock_lock(&cores[core].rtr_cl); })
259 #define RTR_UNLOCK(core) \
260 ({ corelock_unlock(&cores[core].rtr_cl); })
262 #ifdef HAVE_PRIORITY_SCHEDULING
263 #define rtr_add_entry(core, priority) \
264 prio_add_entry(&cores[core].rtr, (priority))
266 #define rtr_subtract_entry(core, priority) \
267 prio_subtract_entry(&cores[core].rtr, (priority))
269 #define rtr_move_entry(core, from, to) \
270 prio_move_entry(&cores[core].rtr, (from), (to))
271 #else
272 #define rtr_add_entry(core, priority)
273 #define rtr_add_entry_inl(core, priority)
274 #define rtr_subtract_entry(core, priority)
275 #define rtr_subtract_entry_inl(core, priotity)
276 #define rtr_move_entry(core, from, to)
277 #define rtr_move_entry_inl(core, from, to)
278 #endif
280 /*---------------------------------------------------------------------------
281 * Thread list structure - circular:
282 * +------------------------------+
283 * | |
284 * +--+---+<-+---+<-+---+<-+---+<-+
285 * Head->| T | | T | | T | | T |
286 * +->+---+->+---+->+---+->+---+--+
287 * | |
288 * +------------------------------+
289 *---------------------------------------------------------------------------
292 /*---------------------------------------------------------------------------
293 * Adds a thread to a list of threads using "insert last". Uses the "l"
294 * links.
295 *---------------------------------------------------------------------------
297 static void add_to_list_l(struct thread_entry **list,
298 struct thread_entry *thread)
300 struct thread_entry *l = *list;
302 if (l == NULL)
304 /* Insert into unoccupied list */
305 thread->l.prev = thread;
306 thread->l.next = thread;
307 *list = thread;
308 return;
311 /* Insert last */
312 thread->l.prev = l->l.prev;
313 thread->l.next = l;
314 l->l.prev->l.next = thread;
315 l->l.prev = thread;
318 /*---------------------------------------------------------------------------
319 * Removes a thread from a list of threads. Uses the "l" links.
320 *---------------------------------------------------------------------------
322 static void remove_from_list_l(struct thread_entry **list,
323 struct thread_entry *thread)
325 struct thread_entry *prev, *next;
327 next = thread->l.next;
329 if (thread == next)
331 /* The only item */
332 *list = NULL;
333 return;
336 if (thread == *list)
338 /* List becomes next item */
339 *list = next;
342 prev = thread->l.prev;
344 /* Fix links to jump over the removed entry. */
345 next->l.prev = prev;
346 prev->l.next = next;
349 /*---------------------------------------------------------------------------
350 * Timeout list structure - circular reverse (to make "remove item" O(1)),
351 * NULL-terminated forward (to ease the far more common forward traversal):
352 * +------------------------------+
353 * | |
354 * +--+---+<-+---+<-+---+<-+---+<-+
355 * Head->| T | | T | | T | | T |
356 * +---+->+---+->+---+->+---+-X
357 *---------------------------------------------------------------------------
360 /*---------------------------------------------------------------------------
361 * Add a thread from the core's timout list by linking the pointers in its
362 * tmo structure.
363 *---------------------------------------------------------------------------
365 static void add_to_list_tmo(struct thread_entry *thread)
367 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
368 THREAD_ASSERT(thread->tmo.prev == NULL,
369 "add_to_list_tmo->already listed", thread);
371 thread->tmo.next = NULL;
373 if (tmo == NULL)
375 /* Insert into unoccupied list */
376 thread->tmo.prev = thread;
377 cores[IF_COP_CORE(thread->core)].timeout = thread;
378 return;
381 /* Insert Last */
382 thread->tmo.prev = tmo->tmo.prev;
383 tmo->tmo.prev->tmo.next = thread;
384 tmo->tmo.prev = thread;
387 /*---------------------------------------------------------------------------
388 * Remove a thread from the core's timout list by unlinking the pointers in
389 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
390 * is cancelled.
391 *---------------------------------------------------------------------------
393 static void remove_from_list_tmo(struct thread_entry *thread)
395 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
396 struct thread_entry *prev = thread->tmo.prev;
397 struct thread_entry *next = thread->tmo.next;
399 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
401 if (next != NULL)
402 next->tmo.prev = prev;
404 if (thread == *list)
406 /* List becomes next item and empty if next == NULL */
407 *list = next;
408 /* Mark as unlisted */
409 thread->tmo.prev = NULL;
411 else
413 if (next == NULL)
414 (*list)->tmo.prev = prev;
415 prev->tmo.next = next;
416 /* Mark as unlisted */
417 thread->tmo.prev = NULL;
422 #ifdef HAVE_PRIORITY_SCHEDULING
423 /*---------------------------------------------------------------------------
424 * Priority distribution structure (one category for each possible priority):
426 * +----+----+----+ ... +-----+
427 * hist: | F0 | F1 | F2 | | F31 |
428 * +----+----+----+ ... +-----+
429 * mask: | b0 | b1 | b2 | | b31 |
430 * +----+----+----+ ... +-----+
432 * F = count of threads at priority category n (frequency)
433 * b = bitmask of non-zero priority categories (occupancy)
435 * / if H[n] != 0 : 1
436 * b[n] = |
437 * \ else : 0
439 *---------------------------------------------------------------------------
440 * Basic priority inheritance priotocol (PIP):
442 * Mn = mutex n, Tn = thread n
444 * A lower priority thread inherits the priority of the highest priority
445 * thread blocked waiting for it to complete an action (such as release a
446 * mutex or respond to a message via queue_send):
448 * 1) T2->M1->T1
450 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
451 * priority than T1 then T1 inherits the priority of T2.
453 * 2) T3
454 * \/
455 * T2->M1->T1
457 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
458 * T1 inherits the higher of T2 and T3.
460 * 3) T3->M2->T2->M1->T1
462 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
463 * then T1 inherits the priority of T3 through T2.
465 * Blocking chains can grow arbitrarily complex (though it's best that they
466 * not form at all very often :) and build-up from these units.
467 *---------------------------------------------------------------------------
470 /*---------------------------------------------------------------------------
471 * Increment frequency at category "priority"
472 *---------------------------------------------------------------------------
474 static inline unsigned int prio_add_entry(
475 struct priority_distribution *pd, int priority)
477 unsigned int count;
478 /* Enough size/instruction count difference for ARM makes it worth it to
479 * use different code (192 bytes for ARM). Only thing better is ASM. */
480 #ifdef CPU_ARM
481 count = pd->hist[priority];
482 if (++count == 1)
483 pd->mask |= 1 << priority;
484 pd->hist[priority] = count;
485 #else /* This one's better for Coldfire */
486 if ((count = ++pd->hist[priority]) == 1)
487 pd->mask |= 1 << priority;
488 #endif
490 return count;
493 /*---------------------------------------------------------------------------
494 * Decrement frequency at category "priority"
495 *---------------------------------------------------------------------------
497 static inline unsigned int prio_subtract_entry(
498 struct priority_distribution *pd, int priority)
500 unsigned int count;
502 #ifdef CPU_ARM
503 count = pd->hist[priority];
504 if (--count == 0)
505 pd->mask &= ~(1 << priority);
506 pd->hist[priority] = count;
507 #else
508 if ((count = --pd->hist[priority]) == 0)
509 pd->mask &= ~(1 << priority);
510 #endif
512 return count;
515 /*---------------------------------------------------------------------------
516 * Remove from one category and add to another
517 *---------------------------------------------------------------------------
519 static inline void prio_move_entry(
520 struct priority_distribution *pd, int from, int to)
522 uint32_t mask = pd->mask;
524 #ifdef CPU_ARM
525 unsigned int count;
527 count = pd->hist[from];
528 if (--count == 0)
529 mask &= ~(1 << from);
530 pd->hist[from] = count;
532 count = pd->hist[to];
533 if (++count == 1)
534 mask |= 1 << to;
535 pd->hist[to] = count;
536 #else
537 if (--pd->hist[from] == 0)
538 mask &= ~(1 << from);
540 if (++pd->hist[to] == 1)
541 mask |= 1 << to;
542 #endif
544 pd->mask = mask;
547 /*---------------------------------------------------------------------------
548 * Change the priority and rtr entry for a running thread
549 *---------------------------------------------------------------------------
551 static inline void set_running_thread_priority(
552 struct thread_entry *thread, int priority)
554 const unsigned int core = IF_COP_CORE(thread->core);
555 RTR_LOCK(core);
556 rtr_move_entry(core, thread->priority, priority);
557 thread->priority = priority;
558 RTR_UNLOCK(core);
561 /*---------------------------------------------------------------------------
562 * Finds the highest priority thread in a list of threads. If the list is
563 * empty, the PRIORITY_IDLE is returned.
565 * It is possible to use the struct priority_distribution within an object
566 * instead of scanning the remaining threads in the list but as a compromise,
567 * the resulting per-object memory overhead is saved at a slight speed
568 * penalty under high contention.
569 *---------------------------------------------------------------------------
571 static int find_highest_priority_in_list_l(
572 struct thread_entry * const thread)
574 if (LIKELY(thread != NULL))
576 /* Go though list until the ending up at the initial thread */
577 int highest_priority = thread->priority;
578 struct thread_entry *curr = thread;
582 int priority = curr->priority;
584 if (priority < highest_priority)
585 highest_priority = priority;
587 curr = curr->l.next;
589 while (curr != thread);
591 return highest_priority;
594 return PRIORITY_IDLE;
597 /*---------------------------------------------------------------------------
598 * Register priority with blocking system and bubble it down the chain if
599 * any until we reach the end or something is already equal or higher.
601 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
602 * targets but that same action also guarantees a circular block anyway and
603 * those are prevented, right? :-)
604 *---------------------------------------------------------------------------
606 static struct thread_entry *
607 blocker_inherit_priority(struct thread_entry *current)
609 const int priority = current->priority;
610 struct blocker *bl = current->blocker;
611 struct thread_entry * const tstart = current;
612 struct thread_entry *bl_t = bl->thread;
614 /* Blocker cannot change since the object protection is held */
615 LOCK_THREAD(bl_t);
617 for (;;)
619 struct thread_entry *next;
620 int bl_pr = bl->priority;
622 if (priority >= bl_pr)
623 break; /* Object priority already high enough */
625 bl->priority = priority;
627 /* Add this one */
628 prio_add_entry(&bl_t->pdist, priority);
630 if (bl_pr < PRIORITY_IDLE)
632 /* Not first waiter - subtract old one */
633 prio_subtract_entry(&bl_t->pdist, bl_pr);
636 if (priority >= bl_t->priority)
637 break; /* Thread priority high enough */
639 if (bl_t->state == STATE_RUNNING)
641 /* Blocking thread is a running thread therefore there are no
642 * further blockers. Change the "run queue" on which it
643 * resides. */
644 set_running_thread_priority(bl_t, priority);
645 break;
648 bl_t->priority = priority;
650 /* If blocking thread has a blocker, apply transitive inheritance */
651 bl = bl_t->blocker;
653 if (bl == NULL)
654 break; /* End of chain or object doesn't support inheritance */
656 next = bl->thread;
658 if (UNLIKELY(next == tstart))
659 break; /* Full-circle - deadlock! */
661 UNLOCK_THREAD(current);
663 #if NUM_CORES > 1
664 for (;;)
666 LOCK_THREAD(next);
668 /* Blocker could change - retest condition */
669 if (LIKELY(bl->thread == next))
670 break;
672 UNLOCK_THREAD(next);
673 next = bl->thread;
675 #endif
676 current = bl_t;
677 bl_t = next;
680 UNLOCK_THREAD(bl_t);
682 return current;
685 /*---------------------------------------------------------------------------
686 * Readjust priorities when waking a thread blocked waiting for another
687 * in essence "releasing" the thread's effect on the object owner. Can be
688 * performed from any context.
689 *---------------------------------------------------------------------------
691 struct thread_entry *
692 wakeup_priority_protocol_release(struct thread_entry *thread)
694 const int priority = thread->priority;
695 struct blocker *bl = thread->blocker;
696 struct thread_entry * const tstart = thread;
697 struct thread_entry *bl_t = bl->thread;
699 /* Blocker cannot change since object will be locked */
700 LOCK_THREAD(bl_t);
702 thread->blocker = NULL; /* Thread not blocked */
704 for (;;)
706 struct thread_entry *next;
707 int bl_pr = bl->priority;
709 if (priority > bl_pr)
710 break; /* Object priority higher */
712 next = *thread->bqp;
714 if (next == NULL)
716 /* No more threads in queue */
717 prio_subtract_entry(&bl_t->pdist, bl_pr);
718 bl->priority = PRIORITY_IDLE;
720 else
722 /* Check list for highest remaining priority */
723 int queue_pr = find_highest_priority_in_list_l(next);
725 if (queue_pr == bl_pr)
726 break; /* Object priority not changing */
728 /* Change queue priority */
729 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
730 bl->priority = queue_pr;
733 if (bl_pr > bl_t->priority)
734 break; /* thread priority is higher */
736 bl_pr = find_first_set_bit(bl_t->pdist.mask);
738 if (bl_pr == bl_t->priority)
739 break; /* Thread priority not changing */
741 if (bl_t->state == STATE_RUNNING)
743 /* No further blockers */
744 set_running_thread_priority(bl_t, bl_pr);
745 break;
748 bl_t->priority = bl_pr;
750 /* If blocking thread has a blocker, apply transitive inheritance */
751 bl = bl_t->blocker;
753 if (bl == NULL)
754 break; /* End of chain or object doesn't support inheritance */
756 next = bl->thread;
758 if (UNLIKELY(next == tstart))
759 break; /* Full-circle - deadlock! */
761 UNLOCK_THREAD(thread);
763 #if NUM_CORES > 1
764 for (;;)
766 LOCK_THREAD(next);
768 /* Blocker could change - retest condition */
769 if (LIKELY(bl->thread == next))
770 break;
772 UNLOCK_THREAD(next);
773 next = bl->thread;
775 #endif
776 thread = bl_t;
777 bl_t = next;
780 UNLOCK_THREAD(bl_t);
782 #if NUM_CORES > 1
783 if (UNLIKELY(thread != tstart))
785 /* Relock original if it changed */
786 LOCK_THREAD(tstart);
788 #endif
790 return cores[CURRENT_CORE].running;
793 /*---------------------------------------------------------------------------
794 * Transfer ownership to a thread waiting for an objects and transfer
795 * inherited priority boost from other waiters. This algorithm knows that
796 * blocking chains may only unblock from the very end.
798 * Only the owning thread itself may call this and so the assumption that
799 * it is the running thread is made.
800 *---------------------------------------------------------------------------
802 struct thread_entry *
803 wakeup_priority_protocol_transfer(struct thread_entry *thread)
805 /* Waking thread inherits priority boost from object owner */
806 struct blocker *bl = thread->blocker;
807 struct thread_entry *bl_t = bl->thread;
808 struct thread_entry *next;
809 int bl_pr;
811 THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t,
812 "UPPT->wrong thread", cores[CURRENT_CORE].running);
814 LOCK_THREAD(bl_t);
816 bl_pr = bl->priority;
818 /* Remove the object's boost from the owning thread */
819 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
820 bl_pr <= bl_t->priority)
822 /* No more threads at this priority are waiting and the old level is
823 * at least the thread level */
824 int priority = find_first_set_bit(bl_t->pdist.mask);
826 if (priority != bl_t->priority)
828 /* Adjust this thread's priority */
829 set_running_thread_priority(bl_t, priority);
833 next = *thread->bqp;
835 if (LIKELY(next == NULL))
837 /* Expected shortcut - no more waiters */
838 bl_pr = PRIORITY_IDLE;
840 else
842 if (thread->priority <= bl_pr)
844 /* Need to scan threads remaining in queue */
845 bl_pr = find_highest_priority_in_list_l(next);
848 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
849 bl_pr < thread->priority)
851 /* Thread priority must be raised */
852 thread->priority = bl_pr;
856 bl->thread = thread; /* This thread pwns */
857 bl->priority = bl_pr; /* Save highest blocked priority */
858 thread->blocker = NULL; /* Thread not blocked */
860 UNLOCK_THREAD(bl_t);
862 return bl_t;
865 /*---------------------------------------------------------------------------
866 * No threads must be blocked waiting for this thread except for it to exit.
867 * The alternative is more elaborate cleanup and object registration code.
868 * Check this for risk of silent data corruption when objects with
869 * inheritable blocking are abandoned by the owner - not precise but may
870 * catch something.
871 *---------------------------------------------------------------------------
873 static void __attribute__((noinline)) check_for_obj_waiters(
874 const char *function, struct thread_entry *thread)
876 /* Only one bit in the mask should be set with a frequency on 1 which
877 * represents the thread's own base priority */
878 uint32_t mask = thread->pdist.mask;
879 if ((mask & (mask - 1)) != 0 ||
880 thread->pdist.hist[find_first_set_bit(mask)] > 1)
882 unsigned char name[32];
883 thread_get_name(name, 32, thread);
884 panicf("%s->%s with obj. waiters", function, name);
887 #endif /* HAVE_PRIORITY_SCHEDULING */
889 /*---------------------------------------------------------------------------
890 * Move a thread back to a running state on its core.
891 *---------------------------------------------------------------------------
893 static void core_schedule_wakeup(struct thread_entry *thread)
895 const unsigned int core = IF_COP_CORE(thread->core);
897 RTR_LOCK(core);
899 thread->state = STATE_RUNNING;
901 add_to_list_l(&cores[core].running, thread);
902 rtr_add_entry(core, thread->priority);
904 RTR_UNLOCK(core);
906 #if NUM_CORES > 1
907 if (core != CURRENT_CORE)
908 core_wake(core);
909 #endif
912 /*---------------------------------------------------------------------------
913 * Check the core's timeout list when at least one thread is due to wake.
914 * Filtering for the condition is done before making the call. Resets the
915 * tick when the next check will occur.
916 *---------------------------------------------------------------------------
918 void check_tmo_threads(void)
920 const unsigned int core = CURRENT_CORE;
921 const long tick = current_tick; /* snapshot the current tick */
922 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
923 struct thread_entry *next = cores[core].timeout;
925 /* If there are no processes waiting for a timeout, just keep the check
926 tick from falling into the past. */
928 /* Break the loop once we have walked through the list of all
929 * sleeping processes or have removed them all. */
930 while (next != NULL)
932 /* Check sleeping threads. Allow interrupts between checks. */
933 enable_irq();
935 struct thread_entry *curr = next;
937 next = curr->tmo.next;
939 /* Lock thread slot against explicit wakeup */
940 disable_irq();
941 LOCK_THREAD(curr);
943 unsigned state = curr->state;
945 if (state < TIMEOUT_STATE_FIRST)
947 /* Cleanup threads no longer on a timeout but still on the
948 * list. */
949 remove_from_list_tmo(curr);
951 else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick)))
953 /* Timeout still pending - this will be the usual case */
954 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
956 /* Earliest timeout found so far - move the next check up
957 to its time */
958 next_tmo_check = curr->tmo_tick;
961 else
963 /* Sleep timeout has been reached so bring the thread back to
964 * life again. */
965 if (state == STATE_BLOCKED_W_TMO)
967 #if NUM_CORES > 1
968 /* Lock the waiting thread's kernel object */
969 struct corelock *ocl = curr->obj_cl;
971 if (UNLIKELY(corelock_try_lock(ocl) == 0))
973 /* Need to retry in the correct order though the need is
974 * unlikely */
975 UNLOCK_THREAD(curr);
976 corelock_lock(ocl);
977 LOCK_THREAD(curr);
979 if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO))
981 /* Thread was woken or removed explicitely while slot
982 * was unlocked */
983 corelock_unlock(ocl);
984 remove_from_list_tmo(curr);
985 UNLOCK_THREAD(curr);
986 continue;
989 #endif /* NUM_CORES */
991 remove_from_list_l(curr->bqp, curr);
993 #ifdef HAVE_WAKEUP_EXT_CB
994 if (curr->wakeup_ext_cb != NULL)
995 curr->wakeup_ext_cb(curr);
996 #endif
998 #ifdef HAVE_PRIORITY_SCHEDULING
999 if (curr->blocker != NULL)
1000 wakeup_priority_protocol_release(curr);
1001 #endif
1002 corelock_unlock(ocl);
1004 /* else state == STATE_SLEEPING */
1006 remove_from_list_tmo(curr);
1008 RTR_LOCK(core);
1010 curr->state = STATE_RUNNING;
1012 add_to_list_l(&cores[core].running, curr);
1013 rtr_add_entry(core, curr->priority);
1015 RTR_UNLOCK(core);
1018 UNLOCK_THREAD(curr);
1021 cores[core].next_tmo_check = next_tmo_check;
1024 /*---------------------------------------------------------------------------
1025 * Performs operations that must be done before blocking a thread but after
1026 * the state is saved.
1027 *---------------------------------------------------------------------------
1029 #if NUM_CORES > 1
1030 static inline void run_blocking_ops(
1031 unsigned int core, struct thread_entry *thread)
1033 struct thread_blk_ops *ops = &cores[core].blk_ops;
1034 const unsigned flags = ops->flags;
1036 if (LIKELY(flags == TBOP_CLEAR))
1037 return;
1039 switch (flags)
1041 case TBOP_SWITCH_CORE:
1042 core_switch_blk_op(core, thread);
1043 /* Fall-through */
1044 case TBOP_UNLOCK_CORELOCK:
1045 corelock_unlock(ops->cl_p);
1046 break;
1049 ops->flags = TBOP_CLEAR;
1051 #endif /* NUM_CORES > 1 */
1053 #ifdef RB_PROFILE
1054 void profile_thread(void)
1056 profstart(cores[CURRENT_CORE].running - threads);
1058 #endif
1060 /*---------------------------------------------------------------------------
1061 * Prepares a thread to block on an object's list and/or for a specified
1062 * duration - expects object and slot to be appropriately locked if needed
1063 * and interrupts to be masked.
1064 *---------------------------------------------------------------------------
1066 static inline void block_thread_on_l(struct thread_entry *thread,
1067 unsigned state)
1069 /* If inlined, unreachable branches will be pruned with no size penalty
1070 because state is passed as a constant parameter. */
1071 const unsigned int core = IF_COP_CORE(thread->core);
1073 /* Remove the thread from the list of running threads. */
1074 RTR_LOCK(core);
1075 remove_from_list_l(&cores[core].running, thread);
1076 rtr_subtract_entry(core, thread->priority);
1077 RTR_UNLOCK(core);
1079 /* Add a timeout to the block if not infinite */
1080 switch (state)
1082 case STATE_BLOCKED:
1083 case STATE_BLOCKED_W_TMO:
1084 /* Put the thread into a new list of inactive threads. */
1085 add_to_list_l(thread->bqp, thread);
1087 if (state == STATE_BLOCKED)
1088 break;
1090 /* Fall-through */
1091 case STATE_SLEEPING:
1092 /* If this thread times out sooner than any other thread, update
1093 next_tmo_check to its timeout */
1094 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1096 cores[core].next_tmo_check = thread->tmo_tick;
1099 if (thread->tmo.prev == NULL)
1101 add_to_list_tmo(thread);
1103 /* else thread was never removed from list - just keep it there */
1104 break;
1107 /* Remember the the next thread about to block. */
1108 cores[core].block_task = thread;
1110 /* Report new state. */
1111 thread->state = state;
1114 /*---------------------------------------------------------------------------
1115 * Switch thread in round robin fashion for any given priority. Any thread
1116 * that removed itself from the running list first must specify itself in
1117 * the paramter.
1119 * INTERNAL: Intended for use by kernel and not for programs.
1120 *---------------------------------------------------------------------------
1122 void switch_thread(void)
1125 const unsigned int core = CURRENT_CORE;
1126 struct thread_entry *block = cores[core].block_task;
1127 struct thread_entry *thread = cores[core].running;
1129 /* Get context to save - next thread to run is unknown until all wakeups
1130 * are evaluated */
1131 if (block != NULL)
1133 cores[core].block_task = NULL;
1135 #if NUM_CORES > 1
1136 if (UNLIKELY(thread == block))
1138 /* This was the last thread running and another core woke us before
1139 * reaching here. Force next thread selection to give tmo threads or
1140 * other threads woken before this block a first chance. */
1141 block = NULL;
1143 else
1144 #endif
1146 /* Blocking task is the old one */
1147 thread = block;
1151 #ifdef RB_PROFILE
1152 profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1153 #endif
1155 /* Begin task switching by saving our current context so that we can
1156 * restore the state of the current thread later to the point prior
1157 * to this call. */
1158 store_context(&thread->context);
1160 /* Check if the current thread stack is overflown */
1161 if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
1162 thread_stkov(thread);
1164 #ifdef BUFFER_ALLOC_DEBUG
1165 /* Check if the current thread just did bad things with buffer_alloc()ed
1166 * memory */
1168 static char name[32];
1169 thread_get_name(name, 32, thread);
1170 buffer_alloc_check(name);
1172 #endif
1174 #if NUM_CORES > 1
1175 /* Run any blocking operations requested before switching/sleeping */
1176 run_blocking_ops(core, thread);
1177 #endif
1179 #ifdef HAVE_PRIORITY_SCHEDULING
1180 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1181 /* Reset the value of thread's skip count */
1182 thread->skip_count = 0;
1183 #endif
1185 for (;;)
1187 /* If there are threads on a timeout and the earliest wakeup is due,
1188 * check the list and wake any threads that need to start running
1189 * again. */
1190 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1192 check_tmo_threads();
1195 disable_irq();
1196 RTR_LOCK(core);
1198 thread = cores[core].running;
1200 if (UNLIKELY(thread == NULL))
1202 /* Enter sleep mode to reduce power usage - woken up on interrupt
1203 * or wakeup request from another core - expected to enable
1204 * interrupts. */
1205 RTR_UNLOCK(core);
1206 core_sleep(IF_COP(core));
1208 else
1210 #ifdef HAVE_PRIORITY_SCHEDULING
1211 /* Select the new task based on priorities and the last time a
1212 * process got CPU time relative to the highest priority runnable
1213 * task. */
1214 struct priority_distribution *pd = &cores[core].rtr;
1215 int max = find_first_set_bit(pd->mask);
1217 if (block == NULL)
1219 /* Not switching on a block, tentatively select next thread */
1220 thread = thread->l.next;
1223 for (;;)
1225 int priority = thread->priority;
1226 int diff;
1228 /* This ridiculously simple method of aging seems to work
1229 * suspiciously well. It does tend to reward CPU hogs (under
1230 * yielding) but that's generally not desirable at all. On
1231 * the plus side, it, relatively to other threads, penalizes
1232 * excess yielding which is good if some high priority thread
1233 * is performing no useful work such as polling for a device
1234 * to be ready. Of course, aging is only employed when higher
1235 * and lower priority threads are runnable. The highest
1236 * priority runnable thread(s) are never skipped unless a
1237 * lower-priority process has aged sufficiently. Priorities
1238 * of REALTIME class are run strictly according to priority
1239 * thus are not subject to switchout due to lower-priority
1240 * processes aging; they must give up the processor by going
1241 * off the run list. */
1242 if (LIKELY(priority <= max) ||
1243 IF_NO_SKIP_YIELD( thread->skip_count == -1 || )
1244 (priority > PRIORITY_REALTIME &&
1245 (diff = priority - max,
1246 ++thread->skip_count > diff*diff)))
1248 cores[core].running = thread;
1249 break;
1252 thread = thread->l.next;
1254 #else
1255 /* Without priority use a simple FCFS algorithm */
1256 if (block == NULL)
1258 /* Not switching on a block, select next thread */
1259 thread = thread->l.next;
1260 cores[core].running = thread;
1262 #endif /* HAVE_PRIORITY_SCHEDULING */
1264 RTR_UNLOCK(core);
1265 enable_irq();
1266 break;
1270 /* And finally give control to the next thread. */
1271 load_context(&thread->context);
1273 #ifdef RB_PROFILE
1274 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
1275 #endif
1279 /*---------------------------------------------------------------------------
1280 * Sleeps a thread for at least a specified number of ticks with zero being
1281 * a wait until the next tick.
1283 * INTERNAL: Intended for use by kernel and not for programs.
1284 *---------------------------------------------------------------------------
1286 void sleep_thread(int ticks)
1288 struct thread_entry *current = cores[CURRENT_CORE].running;
1290 LOCK_THREAD(current);
1292 /* Set our timeout, remove from run list and join timeout list. */
1293 current->tmo_tick = current_tick + ticks + 1;
1294 block_thread_on_l(current, STATE_SLEEPING);
1296 UNLOCK_THREAD(current);
1299 /*---------------------------------------------------------------------------
1300 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1302 * INTERNAL: Intended for use by kernel objects and not for programs.
1303 *---------------------------------------------------------------------------
1305 void block_thread(struct thread_entry *current)
1307 /* Set the state to blocked and take us off of the run queue until we
1308 * are explicitly woken */
1309 LOCK_THREAD(current);
1311 /* Set the list for explicit wakeup */
1312 block_thread_on_l(current, STATE_BLOCKED);
1314 #ifdef HAVE_PRIORITY_SCHEDULING
1315 if (current->blocker != NULL)
1317 /* Object supports PIP */
1318 current = blocker_inherit_priority(current);
1320 #endif
1322 UNLOCK_THREAD(current);
1325 /*---------------------------------------------------------------------------
1326 * Block a thread on a blocking queue for a specified time interval or until
1327 * explicitly woken - whichever happens first.
1329 * INTERNAL: Intended for use by kernel objects and not for programs.
1330 *---------------------------------------------------------------------------
1332 void block_thread_w_tmo(struct thread_entry *current, int timeout)
1334 /* Get the entry for the current running thread. */
1335 LOCK_THREAD(current);
1337 /* Set the state to blocked with the specified timeout */
1338 current->tmo_tick = current_tick + timeout;
1340 /* Set the list for explicit wakeup */
1341 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1343 #ifdef HAVE_PRIORITY_SCHEDULING
1344 if (current->blocker != NULL)
1346 /* Object supports PIP */
1347 current = blocker_inherit_priority(current);
1349 #endif
1351 UNLOCK_THREAD(current);
1354 /*---------------------------------------------------------------------------
1355 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1356 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1358 * This code should be considered a critical section by the caller meaning
1359 * that the object's corelock should be held.
1361 * INTERNAL: Intended for use by kernel objects and not for programs.
1362 *---------------------------------------------------------------------------
1364 unsigned int wakeup_thread(struct thread_entry **list)
1366 struct thread_entry *thread = *list;
1367 unsigned int result = THREAD_NONE;
1369 /* Check if there is a blocked thread at all. */
1370 if (thread == NULL)
1371 return result;
1373 LOCK_THREAD(thread);
1375 /* Determine thread's current state. */
1376 switch (thread->state)
1378 case STATE_BLOCKED:
1379 case STATE_BLOCKED_W_TMO:
1380 remove_from_list_l(list, thread);
1382 result = THREAD_OK;
1384 #ifdef HAVE_PRIORITY_SCHEDULING
1385 struct thread_entry *current;
1386 struct blocker *bl = thread->blocker;
1388 if (bl == NULL)
1390 /* No inheritance - just boost the thread by aging */
1391 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1392 thread->skip_count = thread->priority;
1393 current = cores[CURRENT_CORE].running;
1395 else
1397 /* Call the specified unblocking PIP */
1398 current = bl->wakeup_protocol(thread);
1401 if (current != NULL &&
1402 find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask)
1403 < current->priority)
1405 /* There is a thread ready to run of higher or same priority on
1406 * the same core as the current one; recommend a task switch.
1407 * Knowing if this is an interrupt call would be helpful here. */
1408 result |= THREAD_SWITCH;
1410 #endif /* HAVE_PRIORITY_SCHEDULING */
1412 core_schedule_wakeup(thread);
1413 break;
1415 /* Nothing to do. State is not blocked. */
1416 #if THREAD_EXTRA_CHECKS
1417 default:
1418 THREAD_PANICF("wakeup_thread->block invalid", thread);
1419 case STATE_RUNNING:
1420 case STATE_KILLED:
1421 break;
1422 #endif
1425 UNLOCK_THREAD(thread);
1426 return result;
1429 /*---------------------------------------------------------------------------
1430 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1431 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1432 * the queue must be locked first.
1434 * INTERNAL: Intended for use by kernel objects and not for programs.
1435 *---------------------------------------------------------------------------
1437 unsigned int thread_queue_wake(struct thread_entry **list)
1439 unsigned result = THREAD_NONE;
1441 for (;;)
1443 unsigned int rc = wakeup_thread(list);
1445 if (rc == THREAD_NONE)
1446 break; /* No more threads */
1448 result |= rc;
1451 return result;
1454 /*---------------------------------------------------------------------------
1455 * Assign the thread slot a new ID. Version is 1-255.
1456 *---------------------------------------------------------------------------
1458 static void new_thread_id(unsigned int slot_num,
1459 struct thread_entry *thread)
1461 unsigned int version =
1462 (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
1463 & THREAD_ID_VERSION_MASK;
1465 /* If wrapped to 0, make it 1 */
1466 if (version == 0)
1467 version = 1u << THREAD_ID_VERSION_SHIFT;
1469 thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
1472 /*---------------------------------------------------------------------------
1473 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1474 * will be locked on multicore.
1475 *---------------------------------------------------------------------------
1477 static struct thread_entry * find_empty_thread_slot(void)
1479 /* Any slot could be on an interrupt-accessible list */
1480 IF_COP( int oldlevel = disable_irq_save(); )
1481 struct thread_entry *thread = NULL;
1482 int n;
1484 for (n = 0; n < MAXTHREADS; n++)
1486 /* Obtain current slot state - lock it on multicore */
1487 struct thread_entry *t = &threads[n];
1488 LOCK_THREAD(t);
1490 if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
1492 /* Slot is empty - leave it locked and caller will unlock */
1493 thread = t;
1494 break;
1497 /* Finished examining slot - no longer busy - unlock on multicore */
1498 UNLOCK_THREAD(t);
1501 IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
1502 not accesible to them yet */
1503 return thread;
1506 /*---------------------------------------------------------------------------
1507 * Return the thread_entry pointer for a thread_id. Return the current
1508 * thread if the ID is 0 (alias for current).
1509 *---------------------------------------------------------------------------
1511 struct thread_entry * thread_id_entry(unsigned int thread_id)
1513 return (thread_id == THREAD_ID_CURRENT) ?
1514 cores[CURRENT_CORE].running :
1515 &threads[thread_id & THREAD_ID_SLOT_MASK];
1518 /*---------------------------------------------------------------------------
1519 * Place the current core in idle mode - woken up on interrupt or wake
1520 * request from another core.
1521 *---------------------------------------------------------------------------
1523 void core_idle(void)
1525 IF_COP( const unsigned int core = CURRENT_CORE; )
1526 disable_irq();
1527 core_sleep(IF_COP(core));
1530 /*---------------------------------------------------------------------------
1531 * Create a thread. If using a dual core architecture, specify which core to
1532 * start the thread on.
1534 * Return ID if context area could be allocated, else NULL.
1535 *---------------------------------------------------------------------------
1537 unsigned int create_thread(void (*function)(void),
1538 void* stack, size_t stack_size,
1539 unsigned flags, const char *name
1540 IF_PRIO(, int priority)
1541 IF_COP(, unsigned int core))
1543 unsigned int i;
1544 unsigned int stack_words;
1545 uintptr_t stackptr, stackend;
1546 struct thread_entry *thread;
1547 unsigned state;
1548 int oldlevel;
1550 thread = find_empty_thread_slot();
1551 if (thread == NULL)
1553 return 0;
1556 oldlevel = disable_irq_save();
1558 /* Munge the stack to make it easy to spot stack overflows */
1559 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
1560 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
1561 stack_size = stackend - stackptr;
1562 stack_words = stack_size / sizeof (uintptr_t);
1564 for (i = 0; i < stack_words; i++)
1566 ((uintptr_t *)stackptr)[i] = DEADBEEF;
1569 /* Store interesting information */
1570 thread->name = name;
1571 thread->stack = (uintptr_t *)stackptr;
1572 thread->stack_size = stack_size;
1573 thread->queue = NULL;
1574 #ifdef HAVE_WAKEUP_EXT_CB
1575 thread->wakeup_ext_cb = NULL;
1576 #endif
1577 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1578 thread->cpu_boost = 0;
1579 #endif
1580 #ifdef HAVE_PRIORITY_SCHEDULING
1581 memset(&thread->pdist, 0, sizeof(thread->pdist));
1582 thread->blocker = NULL;
1583 thread->base_priority = priority;
1584 thread->priority = priority;
1585 thread->skip_count = priority;
1586 prio_add_entry(&thread->pdist, priority);
1587 #endif
1589 #ifdef HAVE_IO_PRIORITY
1590 /* Default to high (foreground) priority */
1591 thread->io_priority = IO_PRIORITY_IMMEDIATE;
1592 #endif
1594 #if NUM_CORES > 1
1595 thread->core = core;
1597 /* Writeback stack munging or anything else before starting */
1598 if (core != CURRENT_CORE)
1600 cpucache_flush();
1602 #endif
1604 /* Thread is not on any timeout list but be a bit paranoid */
1605 thread->tmo.prev = NULL;
1607 state = (flags & CREATE_THREAD_FROZEN) ?
1608 STATE_FROZEN : STATE_RUNNING;
1610 thread->context.sp = (typeof (thread->context.sp))stackend;
1612 /* Load the thread's context structure with needed startup information */
1613 THREAD_STARTUP_INIT(core, thread, function);
1615 thread->state = state;
1616 i = thread->id; /* Snapshot while locked */
1618 if (state == STATE_RUNNING)
1619 core_schedule_wakeup(thread);
1621 UNLOCK_THREAD(thread);
1622 restore_irq(oldlevel);
1624 return i;
1627 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1628 /*---------------------------------------------------------------------------
1629 * Change the boost state of a thread boosting or unboosting the CPU
1630 * as required.
1631 *---------------------------------------------------------------------------
1633 static inline void boost_thread(struct thread_entry *thread, bool boost)
1635 if ((thread->cpu_boost != 0) != boost)
1637 thread->cpu_boost = boost;
1638 cpu_boost(boost);
1642 void trigger_cpu_boost(void)
1644 struct thread_entry *current = cores[CURRENT_CORE].running;
1645 boost_thread(current, true);
1648 void cancel_cpu_boost(void)
1650 struct thread_entry *current = cores[CURRENT_CORE].running;
1651 boost_thread(current, false);
1653 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
1655 /*---------------------------------------------------------------------------
1656 * Block the current thread until another thread terminates. A thread may
1657 * wait on itself to terminate which prevents it from running again and it
1658 * will need to be killed externally.
1659 * Parameter is the ID as returned from create_thread().
1660 *---------------------------------------------------------------------------
1662 void thread_wait(unsigned int thread_id)
1664 struct thread_entry *current = cores[CURRENT_CORE].running;
1665 struct thread_entry *thread = thread_id_entry(thread_id);
1667 /* Lock thread-as-waitable-object lock */
1668 corelock_lock(&thread->waiter_cl);
1670 /* Be sure it hasn't been killed yet */
1671 if (thread_id == THREAD_ID_CURRENT ||
1672 (thread->id == thread_id && thread->state != STATE_KILLED))
1674 IF_COP( current->obj_cl = &thread->waiter_cl; )
1675 current->bqp = &thread->queue;
1677 disable_irq();
1678 block_thread(current);
1680 corelock_unlock(&thread->waiter_cl);
1682 switch_thread();
1683 return;
1686 corelock_unlock(&thread->waiter_cl);
1689 /*---------------------------------------------------------------------------
1690 * Exit the current thread. The Right Way to Do Things (TM).
1691 *---------------------------------------------------------------------------
1693 /* This is done to foil optimizations that may require the current stack,
1694 * such as optimizing subexpressions that put variables on the stack that
1695 * get used after switching stacks. */
1696 #if NUM_CORES > 1
1697 /* Called by ASM stub */
1698 static void thread_final_exit_do(struct thread_entry *current)
1699 #else
1700 /* No special procedure is required before calling */
1701 static inline void thread_final_exit(struct thread_entry *current)
1702 #endif
1704 /* At this point, this thread isn't using resources allocated for
1705 * execution except the slot itself. */
1707 /* Signal this thread */
1708 thread_queue_wake(&current->queue);
1709 corelock_unlock(&current->waiter_cl);
1710 switch_thread();
1711 /* This should never and must never be reached - if it is, the
1712 * state is corrupted */
1713 THREAD_PANICF("thread_exit->K:*R", current);
1714 while (1);
1717 void thread_exit(void)
1719 register struct thread_entry * current = cores[CURRENT_CORE].running;
1721 /* Cancel CPU boost if any */
1722 cancel_cpu_boost();
1724 disable_irq();
1726 corelock_lock(&current->waiter_cl);
1727 LOCK_THREAD(current);
1729 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1730 if (current->name == THREAD_DESTRUCT)
1732 /* Thread being killed - become a waiter */
1733 unsigned int id = current->id;
1734 UNLOCK_THREAD(current);
1735 corelock_unlock(&current->waiter_cl);
1736 thread_wait(id);
1737 THREAD_PANICF("thread_exit->WK:*R", current);
1739 #endif
1741 #ifdef HAVE_PRIORITY_SCHEDULING
1742 check_for_obj_waiters("thread_exit", current);
1743 #endif
1745 if (current->tmo.prev != NULL)
1747 /* Cancel pending timeout list removal */
1748 remove_from_list_tmo(current);
1751 /* Switch tasks and never return */
1752 block_thread_on_l(current, STATE_KILLED);
1754 /* Slot must be unusable until thread is really gone */
1755 UNLOCK_THREAD_AT_TASK_SWITCH(current);
1757 /* Update ID for this slot */
1758 new_thread_id(current->id, current);
1759 current->name = NULL;
1761 /* Do final cleanup and remove the thread */
1762 thread_final_exit(current);
1765 #ifdef ALLOW_REMOVE_THREAD
1766 /*---------------------------------------------------------------------------
1767 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1768 * normal programs.
1770 * Parameter is the ID as returned from create_thread().
1772 * Use with care on threads that are not under careful control as this may
1773 * leave various objects in an undefined state.
1774 *---------------------------------------------------------------------------
1776 void remove_thread(unsigned int thread_id)
1778 #if NUM_CORES > 1
1779 /* core is not constant here because of core switching */
1780 unsigned int core = CURRENT_CORE;
1781 unsigned int old_core = NUM_CORES;
1782 struct corelock *ocl = NULL;
1783 #else
1784 const unsigned int core = CURRENT_CORE;
1785 #endif
1786 struct thread_entry *current = cores[core].running;
1787 struct thread_entry *thread = thread_id_entry(thread_id);
1789 unsigned state;
1790 int oldlevel;
1792 if (thread == current)
1793 thread_exit(); /* Current thread - do normal exit */
1795 oldlevel = disable_irq_save();
1797 corelock_lock(&thread->waiter_cl);
1798 LOCK_THREAD(thread);
1800 state = thread->state;
1802 if (thread->id != thread_id || state == STATE_KILLED)
1803 goto thread_killed;
1805 #if NUM_CORES > 1
1806 if (thread->name == THREAD_DESTRUCT)
1808 /* Thread being killed - become a waiter */
1809 UNLOCK_THREAD(thread);
1810 corelock_unlock(&thread->waiter_cl);
1811 restore_irq(oldlevel);
1812 thread_wait(thread_id);
1813 return;
1816 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
1818 #ifdef HAVE_PRIORITY_SCHEDULING
1819 check_for_obj_waiters("remove_thread", thread);
1820 #endif
1822 if (thread->core != core)
1824 /* Switch cores and safely extract the thread there */
1825 /* Slot HAS to be unlocked or a deadlock could occur which means other
1826 * threads have to be guided into becoming thread waiters if they
1827 * attempt to remove it. */
1828 unsigned int new_core = thread->core;
1830 corelock_unlock(&thread->waiter_cl);
1832 UNLOCK_THREAD(thread);
1833 restore_irq(oldlevel);
1835 old_core = switch_core(new_core);
1837 oldlevel = disable_irq_save();
1839 corelock_lock(&thread->waiter_cl);
1840 LOCK_THREAD(thread);
1842 state = thread->state;
1843 core = new_core;
1844 /* Perform the extraction and switch ourselves back to the original
1845 processor */
1847 #endif /* NUM_CORES > 1 */
1849 if (thread->tmo.prev != NULL)
1851 /* Clean thread off the timeout list if a timeout check hasn't
1852 * run yet */
1853 remove_from_list_tmo(thread);
1856 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1857 /* Cancel CPU boost if any */
1858 boost_thread(thread, false);
1859 #endif
1861 IF_COP( retry_state: )
1863 switch (state)
1865 case STATE_RUNNING:
1866 RTR_LOCK(core);
1867 /* Remove thread from ready to run tasks */
1868 remove_from_list_l(&cores[core].running, thread);
1869 rtr_subtract_entry(core, thread->priority);
1870 RTR_UNLOCK(core);
1871 break;
1872 case STATE_BLOCKED:
1873 case STATE_BLOCKED_W_TMO:
1874 /* Remove thread from the queue it's blocked on - including its
1875 * own if waiting there */
1876 #if NUM_CORES > 1
1877 if (&thread->waiter_cl != thread->obj_cl)
1879 ocl = thread->obj_cl;
1881 if (UNLIKELY(corelock_try_lock(ocl) == 0))
1883 UNLOCK_THREAD(thread);
1884 corelock_lock(ocl);
1885 LOCK_THREAD(thread);
1887 if (UNLIKELY(thread->state != state))
1889 /* Something woke the thread */
1890 state = thread->state;
1891 corelock_unlock(ocl);
1892 goto retry_state;
1896 #endif
1897 remove_from_list_l(thread->bqp, thread);
1899 #ifdef HAVE_WAKEUP_EXT_CB
1900 if (thread->wakeup_ext_cb != NULL)
1901 thread->wakeup_ext_cb(thread);
1902 #endif
1904 #ifdef HAVE_PRIORITY_SCHEDULING
1905 if (thread->blocker != NULL)
1907 /* Remove thread's priority influence from its chain */
1908 wakeup_priority_protocol_release(thread);
1910 #endif
1912 #if NUM_CORES > 1
1913 if (ocl != NULL)
1914 corelock_unlock(ocl);
1915 #endif
1916 break;
1917 /* Otherwise thread is frozen and hasn't run yet */
1920 new_thread_id(thread_id, thread);
1921 thread->state = STATE_KILLED;
1923 /* If thread was waiting on itself, it will have been removed above.
1924 * The wrong order would result in waking the thread first and deadlocking
1925 * since the slot is already locked. */
1926 thread_queue_wake(&thread->queue);
1928 thread->name = NULL;
1930 thread_killed: /* Thread was already killed */
1931 /* Removal complete - safe to unlock and reenable interrupts */
1932 corelock_unlock(&thread->waiter_cl);
1933 UNLOCK_THREAD(thread);
1934 restore_irq(oldlevel);
1936 #if NUM_CORES > 1
1937 if (old_core < NUM_CORES)
1939 /* Did a removal on another processor's thread - switch back to
1940 native core */
1941 switch_core(old_core);
1943 #endif
1945 #endif /* ALLOW_REMOVE_THREAD */
1947 #ifdef HAVE_PRIORITY_SCHEDULING
1948 /*---------------------------------------------------------------------------
1949 * Sets the thread's relative base priority for the core it runs on. Any
1950 * needed inheritance changes also may happen.
1951 *---------------------------------------------------------------------------
1953 int thread_set_priority(unsigned int thread_id, int priority)
1955 int old_base_priority = -1;
1956 struct thread_entry *thread = thread_id_entry(thread_id);
1958 /* A little safety measure */
1959 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
1960 return -1;
1962 /* Thread could be on any list and therefore on an interrupt accessible
1963 one - disable interrupts */
1964 int oldlevel = disable_irq_save();
1966 LOCK_THREAD(thread);
1968 /* Make sure it's not killed */
1969 if (thread_id == THREAD_ID_CURRENT ||
1970 (thread->id == thread_id && thread->state != STATE_KILLED))
1972 int old_priority = thread->priority;
1974 old_base_priority = thread->base_priority;
1975 thread->base_priority = priority;
1977 prio_move_entry(&thread->pdist, old_base_priority, priority);
1978 priority = find_first_set_bit(thread->pdist.mask);
1980 if (old_priority == priority)
1982 /* No priority change - do nothing */
1984 else if (thread->state == STATE_RUNNING)
1986 /* This thread is running - change location on the run
1987 * queue. No transitive inheritance needed. */
1988 set_running_thread_priority(thread, priority);
1990 else
1992 thread->priority = priority;
1994 if (thread->blocker != NULL)
1996 /* Bubble new priority down the chain */
1997 struct blocker *bl = thread->blocker; /* Blocker struct */
1998 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
1999 struct thread_entry * const tstart = thread; /* Initial thread */
2000 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2002 for (;;)
2004 struct thread_entry *next; /* Next thread to check */
2005 int bl_pr; /* Highest blocked thread */
2006 int queue_pr; /* New highest blocked thread */
2007 #if NUM_CORES > 1
2008 /* Owner can change but thread cannot be dislodged - thread
2009 * may not be the first in the queue which allows other
2010 * threads ahead in the list to be given ownership during the
2011 * operation. If thread is next then the waker will have to
2012 * wait for us and the owner of the object will remain fixed.
2013 * If we successfully grab the owner -- which at some point
2014 * is guaranteed -- then the queue remains fixed until we
2015 * pass by. */
2016 for (;;)
2018 LOCK_THREAD(bl_t);
2020 /* Double-check the owner - retry if it changed */
2021 if (LIKELY(bl->thread == bl_t))
2022 break;
2024 UNLOCK_THREAD(bl_t);
2025 bl_t = bl->thread;
2027 #endif
2028 bl_pr = bl->priority;
2030 if (highest > bl_pr)
2031 break; /* Object priority won't change */
2033 /* This will include the thread being set */
2034 queue_pr = find_highest_priority_in_list_l(*thread->bqp);
2036 if (queue_pr == bl_pr)
2037 break; /* Object priority not changing */
2039 /* Update thread boost for this object */
2040 bl->priority = queue_pr;
2041 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2042 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2044 if (bl_t->priority == bl_pr)
2045 break; /* Blocking thread priority not changing */
2047 if (bl_t->state == STATE_RUNNING)
2049 /* Thread not blocked - we're done */
2050 set_running_thread_priority(bl_t, bl_pr);
2051 break;
2054 bl_t->priority = bl_pr;
2055 bl = bl_t->blocker; /* Blocking thread has a blocker? */
2057 if (bl == NULL)
2058 break; /* End of chain */
2060 next = bl->thread;
2062 if (UNLIKELY(next == tstart))
2063 break; /* Full-circle */
2065 UNLOCK_THREAD(thread);
2067 thread = bl_t;
2068 bl_t = next;
2069 } /* for (;;) */
2071 UNLOCK_THREAD(bl_t);
2076 UNLOCK_THREAD(thread);
2078 restore_irq(oldlevel);
2080 return old_base_priority;
2083 /*---------------------------------------------------------------------------
2084 * Returns the current base priority for a thread.
2085 *---------------------------------------------------------------------------
2087 int thread_get_priority(unsigned int thread_id)
2089 struct thread_entry *thread = thread_id_entry(thread_id);
2090 int base_priority = thread->base_priority;
2092 /* Simply check without locking slot. It may or may not be valid by the
2093 * time the function returns anyway. If all tests pass, it is the
2094 * correct value for when it was valid. */
2095 if (thread_id != THREAD_ID_CURRENT &&
2096 (thread->id != thread_id || thread->state == STATE_KILLED))
2097 base_priority = -1;
2099 return base_priority;
2101 #endif /* HAVE_PRIORITY_SCHEDULING */
2103 #ifdef HAVE_IO_PRIORITY
2104 int thread_get_io_priority(unsigned int thread_id)
2106 struct thread_entry *thread = thread_id_entry(thread_id);
2107 return thread->io_priority;
2110 void thread_set_io_priority(unsigned int thread_id,int io_priority)
2112 struct thread_entry *thread = thread_id_entry(thread_id);
2113 thread->io_priority = io_priority;
2115 #endif
2117 /*---------------------------------------------------------------------------
2118 * Starts a frozen thread - similar semantics to wakeup_thread except that
2119 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2120 * virtue of the slot having a state of STATE_FROZEN.
2121 *---------------------------------------------------------------------------
2123 void thread_thaw(unsigned int thread_id)
2125 struct thread_entry *thread = thread_id_entry(thread_id);
2126 int oldlevel = disable_irq_save();
2128 LOCK_THREAD(thread);
2130 /* If thread is the current one, it cannot be frozen, therefore
2131 * there is no need to check that. */
2132 if (thread->id == thread_id && thread->state == STATE_FROZEN)
2133 core_schedule_wakeup(thread);
2135 UNLOCK_THREAD(thread);
2136 restore_irq(oldlevel);
2139 /*---------------------------------------------------------------------------
2140 * Return the ID of the currently executing thread.
2141 *---------------------------------------------------------------------------
2143 unsigned int thread_get_current(void)
2145 return cores[CURRENT_CORE].running->id;
2148 #if NUM_CORES > 1
2149 /*---------------------------------------------------------------------------
2150 * Switch the processor that the currently executing thread runs on.
2151 *---------------------------------------------------------------------------
2153 unsigned int switch_core(unsigned int new_core)
2155 const unsigned int core = CURRENT_CORE;
2156 struct thread_entry *current = cores[core].running;
2158 if (core == new_core)
2160 /* No change - just return same core */
2161 return core;
2164 int oldlevel = disable_irq_save();
2165 LOCK_THREAD(current);
2167 if (current->name == THREAD_DESTRUCT)
2169 /* Thread being killed - deactivate and let process complete */
2170 unsigned int id = current->id;
2171 UNLOCK_THREAD(current);
2172 restore_irq(oldlevel);
2173 thread_wait(id);
2174 /* Should never be reached */
2175 THREAD_PANICF("switch_core->D:*R", current);
2178 /* Get us off the running list for the current core */
2179 RTR_LOCK(core);
2180 remove_from_list_l(&cores[core].running, current);
2181 rtr_subtract_entry(core, current->priority);
2182 RTR_UNLOCK(core);
2184 /* Stash return value (old core) in a safe place */
2185 current->retval = core;
2187 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2188 * the other core will likely attempt a removal from the wrong list! */
2189 if (current->tmo.prev != NULL)
2191 remove_from_list_tmo(current);
2194 /* Change the core number for this thread slot */
2195 current->core = new_core;
2197 /* Do not use core_schedule_wakeup here since this will result in
2198 * the thread starting to run on the other core before being finished on
2199 * this one. Delay the list unlock to keep the other core stuck
2200 * until this thread is ready. */
2201 RTR_LOCK(new_core);
2203 rtr_add_entry(new_core, current->priority);
2204 add_to_list_l(&cores[new_core].running, current);
2206 /* Make a callback into device-specific code, unlock the wakeup list so
2207 * that execution may resume on the new core, unlock our slot and finally
2208 * restore the interrupt level */
2209 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
2210 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
2211 cores[core].block_task = current;
2213 UNLOCK_THREAD(current);
2215 /* Alert other core to activity */
2216 core_wake(new_core);
2218 /* Do the stack switching, cache_maintenence and switch_thread call -
2219 requires native code */
2220 switch_thread_core(core, current);
2222 /* Finally return the old core to caller */
2223 return current->retval;
2225 #endif /* NUM_CORES > 1 */
2227 /*---------------------------------------------------------------------------
2228 * Initialize threading API. This assumes interrupts are not yet enabled. On
2229 * multicore setups, no core is allowed to proceed until create_thread calls
2230 * are safe to perform.
2231 *---------------------------------------------------------------------------
2233 void init_threads(void)
2235 const unsigned int core = CURRENT_CORE;
2236 struct thread_entry *thread;
2238 if (core == CPU)
2240 /* Initialize core locks and IDs in all slots */
2241 int n;
2242 for (n = 0; n < MAXTHREADS; n++)
2244 thread = &threads[n];
2245 corelock_init(&thread->waiter_cl);
2246 corelock_init(&thread->slot_cl);
2247 thread->id = THREAD_ID_INIT(n);
2251 /* CPU will initialize first and then sleep */
2252 thread = find_empty_thread_slot();
2254 if (thread == NULL)
2256 /* WTF? There really must be a slot available at this stage.
2257 * This can fail if, for example, .bss isn't zero'ed out by the loader
2258 * or threads is in the wrong section. */
2259 THREAD_PANICF("init_threads->no slot", NULL);
2262 /* Initialize initially non-zero members of core */
2263 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2265 /* Initialize initially non-zero members of slot */
2266 UNLOCK_THREAD(thread); /* No sync worries yet */
2267 thread->name = main_thread_name;
2268 thread->state = STATE_RUNNING;
2269 IF_COP( thread->core = core; )
2270 #ifdef HAVE_PRIORITY_SCHEDULING
2271 corelock_init(&cores[core].rtr_cl);
2272 thread->base_priority = PRIORITY_USER_INTERFACE;
2273 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2274 thread->priority = PRIORITY_USER_INTERFACE;
2275 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2276 #endif
2278 add_to_list_l(&cores[core].running, thread);
2280 if (core == CPU)
2282 thread->stack = stackbegin;
2283 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
2284 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2285 /* Wait for other processors to finish their inits since create_thread
2286 * isn't safe to call until the kernel inits are done. The first
2287 * threads created in the system must of course be created by CPU.
2288 * Another possible approach is to initialize all cores and slots
2289 * for each core by CPU, let the remainder proceed in parallel and
2290 * signal CPU when all are finished. */
2291 core_thread_init(CPU);
2293 else
2295 /* Initial stack is the idle stack */
2296 thread->stack = idle_stacks[core];
2297 thread->stack_size = IDLE_STACK_SIZE;
2298 /* After last processor completes, it should signal all others to
2299 * proceed or may signal the next and call thread_exit(). The last one
2300 * to finish will signal CPU. */
2301 core_thread_init(core);
2302 /* Other cores do not have a main thread - go idle inside switch_thread
2303 * until a thread can run on the core. */
2304 thread_exit();
2305 #endif /* NUM_CORES */
2309 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2310 #if NUM_CORES == 1
2311 static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
2312 #else
2313 static int stack_usage(uintptr_t *stackptr, size_t stack_size)
2314 #endif
2316 unsigned int stack_words = stack_size / sizeof (uintptr_t);
2317 unsigned int i;
2318 int usage = 0;
2320 for (i = 0; i < stack_words; i++)
2322 if (stackptr[i] != DEADBEEF)
2324 usage = ((stack_words - i) * 100) / stack_words;
2325 break;
2329 return usage;
2332 /*---------------------------------------------------------------------------
2333 * Returns the maximum percentage of stack a thread ever used while running.
2334 * NOTE: Some large buffer allocations that don't use enough the buffer to
2335 * overwrite stackptr[0] will not be seen.
2336 *---------------------------------------------------------------------------
2338 int thread_stack_usage(const struct thread_entry *thread)
2340 if (LIKELY(thread->stack_size > 0))
2341 return stack_usage(thread->stack, thread->stack_size);
2342 return 0;
2345 #if NUM_CORES > 1
2346 /*---------------------------------------------------------------------------
2347 * Returns the maximum percentage of the core's idle stack ever used during
2348 * runtime.
2349 *---------------------------------------------------------------------------
2351 int idle_stack_usage(unsigned int core)
2353 return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
2355 #endif
2357 /*---------------------------------------------------------------------------
2358 * Fills in the buffer with the specified thread's name. If the name is NULL,
2359 * empty, or the thread is in destruct state a formatted ID is written
2360 * instead.
2361 *---------------------------------------------------------------------------
2363 void thread_get_name(char *buffer, int size,
2364 struct thread_entry *thread)
2366 if (size <= 0)
2367 return;
2369 *buffer = '\0';
2371 if (thread)
2373 /* Display thread name if one or ID if none */
2374 const char *name = thread->name;
2375 const char *fmt = "%s";
2376 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2378 name = (const char *)thread;
2379 fmt = "%08lX";
2381 snprintf(buffer, size, fmt, name);