Theme Editor: Implemented %xd tag with subimages
[kugel-rb.git] / firmware / thread.c
blobc00fc36e3f1b1d26e10abf797ca08f06ab541724
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #include "config.h"
22 #include <stdbool.h>
23 #include <stdio.h>
24 #include "thread.h"
25 #include "panic.h"
26 #include "system.h"
27 #include "kernel.h"
28 #include "cpu.h"
29 #include "string.h"
30 #ifdef RB_PROFILE
31 #include <profile.h>
32 #endif
33 /****************************************************************************
34 * ATTENTION!! *
35 * See notes below on implementing processor-specific portions! *
36 ***************************************************************************/
38 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
39 #ifdef DEBUG
40 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
41 #else
42 #define THREAD_EXTRA_CHECKS 0
43 #endif
45 /**
46 * General locking order to guarantee progress. Order must be observed but
47 * all stages are not nescessarily obligatory. Going from 1) to 3) is
48 * perfectly legal.
50 * 1) IRQ
51 * This is first because of the likelyhood of having an interrupt occur that
52 * also accesses one of the objects farther down the list. Any non-blocking
53 * synchronization done may already have a lock on something during normal
54 * execution and if an interrupt handler running on the same processor as
55 * the one that has the resource locked were to attempt to access the
56 * resource, the interrupt handler would wait forever waiting for an unlock
57 * that will never happen. There is no danger if the interrupt occurs on
58 * a different processor because the one that has the lock will eventually
59 * unlock and the other processor's handler may proceed at that time. Not
60 * nescessary when the resource in question is definitely not available to
61 * interrupt handlers.
63 * 2) Kernel Object
64 * 1) May be needed beforehand if the kernel object allows dual-use such as
65 * event queues. The kernel object must have a scheme to protect itself from
66 * access by another processor and is responsible for serializing the calls
67 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
68 * other. Objects' queues are also protected here.
70 * 3) Thread Slot
71 * This locks access to the thread's slot such that its state cannot be
72 * altered by another processor when a state change is in progress such as
73 * when it is in the process of going on a blocked list. An attempt to wake
74 * a thread while it is still blocking will likely desync its state with
75 * the other resources used for that state.
77 * 4) Core Lists
78 * These lists are specific to a particular processor core and are accessible
79 * by all processor cores and interrupt handlers. The running (rtr) list is
80 * the prime example where a thread may be added by any means.
83 /*---------------------------------------------------------------------------
84 * Processor specific: core_sleep/core_wake/misc. notes
86 * ARM notes:
87 * FIQ is not dealt with by the scheduler code and is simply restored if it
88 * must by masked for some reason - because threading modifies a register
89 * that FIQ may also modify and there's no way to accomplish it atomically.
90 * s3c2440 is such a case.
92 * Audio interrupts are generally treated at a higher priority than others
93 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
94 * are not in general safe. Special cases may be constructed on a per-
95 * source basis and blocking operations are not available.
97 * core_sleep procedure to implement for any CPU to ensure an asychronous
98 * wakup never results in requiring a wait until the next tick (up to
99 * 10000uS!). May require assembly and careful instruction ordering.
101 * 1) On multicore, stay awake if directed to do so by another. If so, goto
102 * step 4.
103 * 2) If processor requires, atomically reenable interrupts and perform step
104 * 3.
105 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
106 * on Coldfire) goto step 5.
107 * 4) Enable interrupts.
108 * 5) Exit procedure.
110 * core_wake and multprocessor notes for sleep/wake coordination:
111 * If possible, to wake up another processor, the forcing of an interrupt on
112 * the woken core by the waker core is the easiest way to ensure a non-
113 * delayed wake and immediate execution of any woken threads. If that isn't
114 * available then some careful non-blocking synchonization is needed (as on
115 * PP targets at the moment).
116 *---------------------------------------------------------------------------
119 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
120 * (someday :). */
121 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
122 static struct core_entry cores[NUM_CORES] IBSS_ATTR;
123 struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
125 static const char main_thread_name[] = "main";
126 extern uintptr_t stackbegin[];
127 extern uintptr_t stackend[];
129 static inline void core_sleep(IF_COP_VOID(unsigned int core))
130 __attribute__((always_inline));
132 void check_tmo_threads(void)
133 __attribute__((noinline));
135 static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
136 __attribute__((always_inline));
138 static void add_to_list_tmo(struct thread_entry *thread)
139 __attribute__((noinline));
141 static void core_schedule_wakeup(struct thread_entry *thread)
142 __attribute__((noinline));
144 #if NUM_CORES > 1
145 static inline void run_blocking_ops(
146 unsigned int core, struct thread_entry *thread)
147 __attribute__((always_inline));
148 #endif
150 static void thread_stkov(struct thread_entry *thread)
151 __attribute__((noinline));
153 static inline void store_context(void* addr)
154 __attribute__((always_inline));
156 static inline void load_context(const void* addr)
157 __attribute__((always_inline));
159 #if NUM_CORES > 1
160 static void thread_final_exit_do(struct thread_entry *current)
161 __attribute__((noinline, noreturn, used));
162 #else
163 static inline void thread_final_exit(struct thread_entry *current)
164 __attribute__((always_inline, noreturn));
165 #endif
167 void switch_thread(void)
168 __attribute__((noinline));
170 /****************************************************************************
171 * Processor-specific section - include necessary core support
173 #if defined(CPU_ARM)
174 #include "thread-arm.c"
175 #if defined (CPU_PP)
176 #include "thread-pp.c"
177 #endif /* CPU_PP */
178 #elif defined(CPU_COLDFIRE)
179 #include "thread-coldfire.c"
180 #elif CONFIG_CPU == SH7034
181 #include "thread-sh.c"
182 #elif defined(CPU_MIPS) && CPU_MIPS == 32
183 #include "thread-mips32.c"
184 #else
185 /* Wouldn't compile anyway */
186 #error Processor not implemented.
187 #endif /* CONFIG_CPU == */
189 #ifndef IF_NO_SKIP_YIELD
190 #define IF_NO_SKIP_YIELD(...)
191 #endif
194 * End Processor-specific section
195 ***************************************************************************/
197 #if THREAD_EXTRA_CHECKS
198 static void thread_panicf(const char *msg, struct thread_entry *thread)
200 IF_COP( const unsigned int core = thread->core; )
201 static char name[32];
202 thread_get_name(name, 32, thread);
203 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
205 static void thread_stkov(struct thread_entry *thread)
207 thread_panicf("Stkov", thread);
209 #define THREAD_PANICF(msg, thread) \
210 thread_panicf(msg, thread)
211 #define THREAD_ASSERT(exp, msg, thread) \
212 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
213 #else
214 static void thread_stkov(struct thread_entry *thread)
216 IF_COP( const unsigned int core = thread->core; )
217 static char name[32];
218 thread_get_name(name, 32, thread);
219 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
221 #define THREAD_PANICF(msg, thread)
222 #define THREAD_ASSERT(exp, msg, thread)
223 #endif /* THREAD_EXTRA_CHECKS */
225 /* Thread locking */
226 #if NUM_CORES > 1
227 #define LOCK_THREAD(thread) \
228 ({ corelock_lock(&(thread)->slot_cl); })
229 #define TRY_LOCK_THREAD(thread) \
230 ({ corelock_try_lock(&(thread)->slot_cl); })
231 #define UNLOCK_THREAD(thread) \
232 ({ corelock_unlock(&(thread)->slot_cl); })
233 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
234 ({ unsigned int _core = (thread)->core; \
235 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
236 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
237 #else
238 #define LOCK_THREAD(thread) \
239 ({ })
240 #define TRY_LOCK_THREAD(thread) \
241 ({ })
242 #define UNLOCK_THREAD(thread) \
243 ({ })
244 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
245 ({ })
246 #endif
248 /* RTR list */
249 #define RTR_LOCK(core) \
250 ({ corelock_lock(&cores[core].rtr_cl); })
251 #define RTR_UNLOCK(core) \
252 ({ corelock_unlock(&cores[core].rtr_cl); })
254 #ifdef HAVE_PRIORITY_SCHEDULING
255 #define rtr_add_entry(core, priority) \
256 prio_add_entry(&cores[core].rtr, (priority))
258 #define rtr_subtract_entry(core, priority) \
259 prio_subtract_entry(&cores[core].rtr, (priority))
261 #define rtr_move_entry(core, from, to) \
262 prio_move_entry(&cores[core].rtr, (from), (to))
263 #else
264 #define rtr_add_entry(core, priority)
265 #define rtr_add_entry_inl(core, priority)
266 #define rtr_subtract_entry(core, priority)
267 #define rtr_subtract_entry_inl(core, priotity)
268 #define rtr_move_entry(core, from, to)
269 #define rtr_move_entry_inl(core, from, to)
270 #endif
272 /*---------------------------------------------------------------------------
273 * Thread list structure - circular:
274 * +------------------------------+
275 * | |
276 * +--+---+<-+---+<-+---+<-+---+<-+
277 * Head->| T | | T | | T | | T |
278 * +->+---+->+---+->+---+->+---+--+
279 * | |
280 * +------------------------------+
281 *---------------------------------------------------------------------------
284 /*---------------------------------------------------------------------------
285 * Adds a thread to a list of threads using "insert last". Uses the "l"
286 * links.
287 *---------------------------------------------------------------------------
289 static void add_to_list_l(struct thread_entry **list,
290 struct thread_entry *thread)
292 struct thread_entry *l = *list;
294 if (l == NULL)
296 /* Insert into unoccupied list */
297 thread->l.prev = thread;
298 thread->l.next = thread;
299 *list = thread;
300 return;
303 /* Insert last */
304 thread->l.prev = l->l.prev;
305 thread->l.next = l;
306 l->l.prev->l.next = thread;
307 l->l.prev = thread;
310 /*---------------------------------------------------------------------------
311 * Removes a thread from a list of threads. Uses the "l" links.
312 *---------------------------------------------------------------------------
314 static void remove_from_list_l(struct thread_entry **list,
315 struct thread_entry *thread)
317 struct thread_entry *prev, *next;
319 next = thread->l.next;
321 if (thread == next)
323 /* The only item */
324 *list = NULL;
325 return;
328 if (thread == *list)
330 /* List becomes next item */
331 *list = next;
334 prev = thread->l.prev;
336 /* Fix links to jump over the removed entry. */
337 next->l.prev = prev;
338 prev->l.next = next;
341 /*---------------------------------------------------------------------------
342 * Timeout list structure - circular reverse (to make "remove item" O(1)),
343 * NULL-terminated forward (to ease the far more common forward traversal):
344 * +------------------------------+
345 * | |
346 * +--+---+<-+---+<-+---+<-+---+<-+
347 * Head->| T | | T | | T | | T |
348 * +---+->+---+->+---+->+---+-X
349 *---------------------------------------------------------------------------
352 /*---------------------------------------------------------------------------
353 * Add a thread from the core's timout list by linking the pointers in its
354 * tmo structure.
355 *---------------------------------------------------------------------------
357 static void add_to_list_tmo(struct thread_entry *thread)
359 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
360 THREAD_ASSERT(thread->tmo.prev == NULL,
361 "add_to_list_tmo->already listed", thread);
363 thread->tmo.next = NULL;
365 if (tmo == NULL)
367 /* Insert into unoccupied list */
368 thread->tmo.prev = thread;
369 cores[IF_COP_CORE(thread->core)].timeout = thread;
370 return;
373 /* Insert Last */
374 thread->tmo.prev = tmo->tmo.prev;
375 tmo->tmo.prev->tmo.next = thread;
376 tmo->tmo.prev = thread;
379 /*---------------------------------------------------------------------------
380 * Remove a thread from the core's timout list by unlinking the pointers in
381 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
382 * is cancelled.
383 *---------------------------------------------------------------------------
385 static void remove_from_list_tmo(struct thread_entry *thread)
387 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
388 struct thread_entry *prev = thread->tmo.prev;
389 struct thread_entry *next = thread->tmo.next;
391 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
393 if (next != NULL)
394 next->tmo.prev = prev;
396 if (thread == *list)
398 /* List becomes next item and empty if next == NULL */
399 *list = next;
400 /* Mark as unlisted */
401 thread->tmo.prev = NULL;
403 else
405 if (next == NULL)
406 (*list)->tmo.prev = prev;
407 prev->tmo.next = next;
408 /* Mark as unlisted */
409 thread->tmo.prev = NULL;
414 #ifdef HAVE_PRIORITY_SCHEDULING
415 /*---------------------------------------------------------------------------
416 * Priority distribution structure (one category for each possible priority):
418 * +----+----+----+ ... +-----+
419 * hist: | F0 | F1 | F2 | | F31 |
420 * +----+----+----+ ... +-----+
421 * mask: | b0 | b1 | b2 | | b31 |
422 * +----+----+----+ ... +-----+
424 * F = count of threads at priority category n (frequency)
425 * b = bitmask of non-zero priority categories (occupancy)
427 * / if H[n] != 0 : 1
428 * b[n] = |
429 * \ else : 0
431 *---------------------------------------------------------------------------
432 * Basic priority inheritance priotocol (PIP):
434 * Mn = mutex n, Tn = thread n
436 * A lower priority thread inherits the priority of the highest priority
437 * thread blocked waiting for it to complete an action (such as release a
438 * mutex or respond to a message via queue_send):
440 * 1) T2->M1->T1
442 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
443 * priority than T1 then T1 inherits the priority of T2.
445 * 2) T3
446 * \/
447 * T2->M1->T1
449 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
450 * T1 inherits the higher of T2 and T3.
452 * 3) T3->M2->T2->M1->T1
454 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
455 * then T1 inherits the priority of T3 through T2.
457 * Blocking chains can grow arbitrarily complex (though it's best that they
458 * not form at all very often :) and build-up from these units.
459 *---------------------------------------------------------------------------
462 /*---------------------------------------------------------------------------
463 * Increment frequency at category "priority"
464 *---------------------------------------------------------------------------
466 static inline unsigned int prio_add_entry(
467 struct priority_distribution *pd, int priority)
469 unsigned int count;
470 /* Enough size/instruction count difference for ARM makes it worth it to
471 * use different code (192 bytes for ARM). Only thing better is ASM. */
472 #ifdef CPU_ARM
473 count = pd->hist[priority];
474 if (++count == 1)
475 pd->mask |= 1 << priority;
476 pd->hist[priority] = count;
477 #else /* This one's better for Coldfire */
478 if ((count = ++pd->hist[priority]) == 1)
479 pd->mask |= 1 << priority;
480 #endif
482 return count;
485 /*---------------------------------------------------------------------------
486 * Decrement frequency at category "priority"
487 *---------------------------------------------------------------------------
489 static inline unsigned int prio_subtract_entry(
490 struct priority_distribution *pd, int priority)
492 unsigned int count;
494 #ifdef CPU_ARM
495 count = pd->hist[priority];
496 if (--count == 0)
497 pd->mask &= ~(1 << priority);
498 pd->hist[priority] = count;
499 #else
500 if ((count = --pd->hist[priority]) == 0)
501 pd->mask &= ~(1 << priority);
502 #endif
504 return count;
507 /*---------------------------------------------------------------------------
508 * Remove from one category and add to another
509 *---------------------------------------------------------------------------
511 static inline void prio_move_entry(
512 struct priority_distribution *pd, int from, int to)
514 uint32_t mask = pd->mask;
516 #ifdef CPU_ARM
517 unsigned int count;
519 count = pd->hist[from];
520 if (--count == 0)
521 mask &= ~(1 << from);
522 pd->hist[from] = count;
524 count = pd->hist[to];
525 if (++count == 1)
526 mask |= 1 << to;
527 pd->hist[to] = count;
528 #else
529 if (--pd->hist[from] == 0)
530 mask &= ~(1 << from);
532 if (++pd->hist[to] == 1)
533 mask |= 1 << to;
534 #endif
536 pd->mask = mask;
539 /*---------------------------------------------------------------------------
540 * Change the priority and rtr entry for a running thread
541 *---------------------------------------------------------------------------
543 static inline void set_running_thread_priority(
544 struct thread_entry *thread, int priority)
546 const unsigned int core = IF_COP_CORE(thread->core);
547 RTR_LOCK(core);
548 rtr_move_entry(core, thread->priority, priority);
549 thread->priority = priority;
550 RTR_UNLOCK(core);
553 /*---------------------------------------------------------------------------
554 * Finds the highest priority thread in a list of threads. If the list is
555 * empty, the PRIORITY_IDLE is returned.
557 * It is possible to use the struct priority_distribution within an object
558 * instead of scanning the remaining threads in the list but as a compromise,
559 * the resulting per-object memory overhead is saved at a slight speed
560 * penalty under high contention.
561 *---------------------------------------------------------------------------
563 static int find_highest_priority_in_list_l(
564 struct thread_entry * const thread)
566 if (LIKELY(thread != NULL))
568 /* Go though list until the ending up at the initial thread */
569 int highest_priority = thread->priority;
570 struct thread_entry *curr = thread;
574 int priority = curr->priority;
576 if (priority < highest_priority)
577 highest_priority = priority;
579 curr = curr->l.next;
581 while (curr != thread);
583 return highest_priority;
586 return PRIORITY_IDLE;
589 /*---------------------------------------------------------------------------
590 * Register priority with blocking system and bubble it down the chain if
591 * any until we reach the end or something is already equal or higher.
593 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
594 * targets but that same action also guarantees a circular block anyway and
595 * those are prevented, right? :-)
596 *---------------------------------------------------------------------------
598 static struct thread_entry *
599 blocker_inherit_priority(struct thread_entry *current)
601 const int priority = current->priority;
602 struct blocker *bl = current->blocker;
603 struct thread_entry * const tstart = current;
604 struct thread_entry *bl_t = bl->thread;
606 /* Blocker cannot change since the object protection is held */
607 LOCK_THREAD(bl_t);
609 for (;;)
611 struct thread_entry *next;
612 int bl_pr = bl->priority;
614 if (priority >= bl_pr)
615 break; /* Object priority already high enough */
617 bl->priority = priority;
619 /* Add this one */
620 prio_add_entry(&bl_t->pdist, priority);
622 if (bl_pr < PRIORITY_IDLE)
624 /* Not first waiter - subtract old one */
625 prio_subtract_entry(&bl_t->pdist, bl_pr);
628 if (priority >= bl_t->priority)
629 break; /* Thread priority high enough */
631 if (bl_t->state == STATE_RUNNING)
633 /* Blocking thread is a running thread therefore there are no
634 * further blockers. Change the "run queue" on which it
635 * resides. */
636 set_running_thread_priority(bl_t, priority);
637 break;
640 bl_t->priority = priority;
642 /* If blocking thread has a blocker, apply transitive inheritance */
643 bl = bl_t->blocker;
645 if (bl == NULL)
646 break; /* End of chain or object doesn't support inheritance */
648 next = bl->thread;
650 if (UNLIKELY(next == tstart))
651 break; /* Full-circle - deadlock! */
653 UNLOCK_THREAD(current);
655 #if NUM_CORES > 1
656 for (;;)
658 LOCK_THREAD(next);
660 /* Blocker could change - retest condition */
661 if (LIKELY(bl->thread == next))
662 break;
664 UNLOCK_THREAD(next);
665 next = bl->thread;
667 #endif
668 current = bl_t;
669 bl_t = next;
672 UNLOCK_THREAD(bl_t);
674 return current;
677 /*---------------------------------------------------------------------------
678 * Readjust priorities when waking a thread blocked waiting for another
679 * in essence "releasing" the thread's effect on the object owner. Can be
680 * performed from any context.
681 *---------------------------------------------------------------------------
683 struct thread_entry *
684 wakeup_priority_protocol_release(struct thread_entry *thread)
686 const int priority = thread->priority;
687 struct blocker *bl = thread->blocker;
688 struct thread_entry * const tstart = thread;
689 struct thread_entry *bl_t = bl->thread;
691 /* Blocker cannot change since object will be locked */
692 LOCK_THREAD(bl_t);
694 thread->blocker = NULL; /* Thread not blocked */
696 for (;;)
698 struct thread_entry *next;
699 int bl_pr = bl->priority;
701 if (priority > bl_pr)
702 break; /* Object priority higher */
704 next = *thread->bqp;
706 if (next == NULL)
708 /* No more threads in queue */
709 prio_subtract_entry(&bl_t->pdist, bl_pr);
710 bl->priority = PRIORITY_IDLE;
712 else
714 /* Check list for highest remaining priority */
715 int queue_pr = find_highest_priority_in_list_l(next);
717 if (queue_pr == bl_pr)
718 break; /* Object priority not changing */
720 /* Change queue priority */
721 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
722 bl->priority = queue_pr;
725 if (bl_pr > bl_t->priority)
726 break; /* thread priority is higher */
728 bl_pr = find_first_set_bit(bl_t->pdist.mask);
730 if (bl_pr == bl_t->priority)
731 break; /* Thread priority not changing */
733 if (bl_t->state == STATE_RUNNING)
735 /* No further blockers */
736 set_running_thread_priority(bl_t, bl_pr);
737 break;
740 bl_t->priority = bl_pr;
742 /* If blocking thread has a blocker, apply transitive inheritance */
743 bl = bl_t->blocker;
745 if (bl == NULL)
746 break; /* End of chain or object doesn't support inheritance */
748 next = bl->thread;
750 if (UNLIKELY(next == tstart))
751 break; /* Full-circle - deadlock! */
753 UNLOCK_THREAD(thread);
755 #if NUM_CORES > 1
756 for (;;)
758 LOCK_THREAD(next);
760 /* Blocker could change - retest condition */
761 if (LIKELY(bl->thread == next))
762 break;
764 UNLOCK_THREAD(next);
765 next = bl->thread;
767 #endif
768 thread = bl_t;
769 bl_t = next;
772 UNLOCK_THREAD(bl_t);
774 #if NUM_CORES > 1
775 if (UNLIKELY(thread != tstart))
777 /* Relock original if it changed */
778 LOCK_THREAD(tstart);
780 #endif
782 return cores[CURRENT_CORE].running;
785 /*---------------------------------------------------------------------------
786 * Transfer ownership to a thread waiting for an objects and transfer
787 * inherited priority boost from other waiters. This algorithm knows that
788 * blocking chains may only unblock from the very end.
790 * Only the owning thread itself may call this and so the assumption that
791 * it is the running thread is made.
792 *---------------------------------------------------------------------------
794 struct thread_entry *
795 wakeup_priority_protocol_transfer(struct thread_entry *thread)
797 /* Waking thread inherits priority boost from object owner */
798 struct blocker *bl = thread->blocker;
799 struct thread_entry *bl_t = bl->thread;
800 struct thread_entry *next;
801 int bl_pr;
803 THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t,
804 "UPPT->wrong thread", cores[CURRENT_CORE].running);
806 LOCK_THREAD(bl_t);
808 bl_pr = bl->priority;
810 /* Remove the object's boost from the owning thread */
811 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
812 bl_pr <= bl_t->priority)
814 /* No more threads at this priority are waiting and the old level is
815 * at least the thread level */
816 int priority = find_first_set_bit(bl_t->pdist.mask);
818 if (priority != bl_t->priority)
820 /* Adjust this thread's priority */
821 set_running_thread_priority(bl_t, priority);
825 next = *thread->bqp;
827 if (LIKELY(next == NULL))
829 /* Expected shortcut - no more waiters */
830 bl_pr = PRIORITY_IDLE;
832 else
834 if (thread->priority <= bl_pr)
836 /* Need to scan threads remaining in queue */
837 bl_pr = find_highest_priority_in_list_l(next);
840 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
841 bl_pr < thread->priority)
843 /* Thread priority must be raised */
844 thread->priority = bl_pr;
848 bl->thread = thread; /* This thread pwns */
849 bl->priority = bl_pr; /* Save highest blocked priority */
850 thread->blocker = NULL; /* Thread not blocked */
852 UNLOCK_THREAD(bl_t);
854 return bl_t;
857 /*---------------------------------------------------------------------------
858 * No threads must be blocked waiting for this thread except for it to exit.
859 * The alternative is more elaborate cleanup and object registration code.
860 * Check this for risk of silent data corruption when objects with
861 * inheritable blocking are abandoned by the owner - not precise but may
862 * catch something.
863 *---------------------------------------------------------------------------
865 static void __attribute__((noinline)) check_for_obj_waiters(
866 const char *function, struct thread_entry *thread)
868 /* Only one bit in the mask should be set with a frequency on 1 which
869 * represents the thread's own base priority */
870 uint32_t mask = thread->pdist.mask;
871 if ((mask & (mask - 1)) != 0 ||
872 thread->pdist.hist[find_first_set_bit(mask)] > 1)
874 unsigned char name[32];
875 thread_get_name(name, 32, thread);
876 panicf("%s->%s with obj. waiters", function, name);
879 #endif /* HAVE_PRIORITY_SCHEDULING */
881 /*---------------------------------------------------------------------------
882 * Move a thread back to a running state on its core.
883 *---------------------------------------------------------------------------
885 static void core_schedule_wakeup(struct thread_entry *thread)
887 const unsigned int core = IF_COP_CORE(thread->core);
889 RTR_LOCK(core);
891 thread->state = STATE_RUNNING;
893 add_to_list_l(&cores[core].running, thread);
894 rtr_add_entry(core, thread->priority);
896 RTR_UNLOCK(core);
898 #if NUM_CORES > 1
899 if (core != CURRENT_CORE)
900 core_wake(core);
901 #endif
904 /*---------------------------------------------------------------------------
905 * Check the core's timeout list when at least one thread is due to wake.
906 * Filtering for the condition is done before making the call. Resets the
907 * tick when the next check will occur.
908 *---------------------------------------------------------------------------
910 void check_tmo_threads(void)
912 const unsigned int core = CURRENT_CORE;
913 const long tick = current_tick; /* snapshot the current tick */
914 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
915 struct thread_entry *next = cores[core].timeout;
917 /* If there are no processes waiting for a timeout, just keep the check
918 tick from falling into the past. */
920 /* Break the loop once we have walked through the list of all
921 * sleeping processes or have removed them all. */
922 while (next != NULL)
924 /* Check sleeping threads. Allow interrupts between checks. */
925 enable_irq();
927 struct thread_entry *curr = next;
929 next = curr->tmo.next;
931 /* Lock thread slot against explicit wakeup */
932 disable_irq();
933 LOCK_THREAD(curr);
935 unsigned state = curr->state;
937 if (state < TIMEOUT_STATE_FIRST)
939 /* Cleanup threads no longer on a timeout but still on the
940 * list. */
941 remove_from_list_tmo(curr);
943 else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick)))
945 /* Timeout still pending - this will be the usual case */
946 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
948 /* Earliest timeout found so far - move the next check up
949 to its time */
950 next_tmo_check = curr->tmo_tick;
953 else
955 /* Sleep timeout has been reached so bring the thread back to
956 * life again. */
957 if (state == STATE_BLOCKED_W_TMO)
959 #if NUM_CORES > 1
960 /* Lock the waiting thread's kernel object */
961 struct corelock *ocl = curr->obj_cl;
963 if (UNLIKELY(corelock_try_lock(ocl) == 0))
965 /* Need to retry in the correct order though the need is
966 * unlikely */
967 UNLOCK_THREAD(curr);
968 corelock_lock(ocl);
969 LOCK_THREAD(curr);
971 if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO))
973 /* Thread was woken or removed explicitely while slot
974 * was unlocked */
975 corelock_unlock(ocl);
976 remove_from_list_tmo(curr);
977 UNLOCK_THREAD(curr);
978 continue;
981 #endif /* NUM_CORES */
983 remove_from_list_l(curr->bqp, curr);
985 #ifdef HAVE_WAKEUP_EXT_CB
986 if (curr->wakeup_ext_cb != NULL)
987 curr->wakeup_ext_cb(curr);
988 #endif
990 #ifdef HAVE_PRIORITY_SCHEDULING
991 if (curr->blocker != NULL)
992 wakeup_priority_protocol_release(curr);
993 #endif
994 corelock_unlock(ocl);
996 /* else state == STATE_SLEEPING */
998 remove_from_list_tmo(curr);
1000 RTR_LOCK(core);
1002 curr->state = STATE_RUNNING;
1004 add_to_list_l(&cores[core].running, curr);
1005 rtr_add_entry(core, curr->priority);
1007 RTR_UNLOCK(core);
1010 UNLOCK_THREAD(curr);
1013 cores[core].next_tmo_check = next_tmo_check;
1016 /*---------------------------------------------------------------------------
1017 * Performs operations that must be done before blocking a thread but after
1018 * the state is saved.
1019 *---------------------------------------------------------------------------
1021 #if NUM_CORES > 1
1022 static inline void run_blocking_ops(
1023 unsigned int core, struct thread_entry *thread)
1025 struct thread_blk_ops *ops = &cores[core].blk_ops;
1026 const unsigned flags = ops->flags;
1028 if (LIKELY(flags == TBOP_CLEAR))
1029 return;
1031 switch (flags)
1033 case TBOP_SWITCH_CORE:
1034 core_switch_blk_op(core, thread);
1035 /* Fall-through */
1036 case TBOP_UNLOCK_CORELOCK:
1037 corelock_unlock(ops->cl_p);
1038 break;
1041 ops->flags = TBOP_CLEAR;
1043 #endif /* NUM_CORES > 1 */
1045 #ifdef RB_PROFILE
1046 void profile_thread(void)
1048 profstart(cores[CURRENT_CORE].running - threads);
1050 #endif
1052 /*---------------------------------------------------------------------------
1053 * Prepares a thread to block on an object's list and/or for a specified
1054 * duration - expects object and slot to be appropriately locked if needed
1055 * and interrupts to be masked.
1056 *---------------------------------------------------------------------------
1058 static inline void block_thread_on_l(struct thread_entry *thread,
1059 unsigned state)
1061 /* If inlined, unreachable branches will be pruned with no size penalty
1062 because state is passed as a constant parameter. */
1063 const unsigned int core = IF_COP_CORE(thread->core);
1065 /* Remove the thread from the list of running threads. */
1066 RTR_LOCK(core);
1067 remove_from_list_l(&cores[core].running, thread);
1068 rtr_subtract_entry(core, thread->priority);
1069 RTR_UNLOCK(core);
1071 /* Add a timeout to the block if not infinite */
1072 switch (state)
1074 case STATE_BLOCKED:
1075 case STATE_BLOCKED_W_TMO:
1076 /* Put the thread into a new list of inactive threads. */
1077 add_to_list_l(thread->bqp, thread);
1079 if (state == STATE_BLOCKED)
1080 break;
1082 /* Fall-through */
1083 case STATE_SLEEPING:
1084 /* If this thread times out sooner than any other thread, update
1085 next_tmo_check to its timeout */
1086 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1088 cores[core].next_tmo_check = thread->tmo_tick;
1091 if (thread->tmo.prev == NULL)
1093 add_to_list_tmo(thread);
1095 /* else thread was never removed from list - just keep it there */
1096 break;
1099 /* Remember the the next thread about to block. */
1100 cores[core].block_task = thread;
1102 /* Report new state. */
1103 thread->state = state;
1106 /*---------------------------------------------------------------------------
1107 * Switch thread in round robin fashion for any given priority. Any thread
1108 * that removed itself from the running list first must specify itself in
1109 * the paramter.
1111 * INTERNAL: Intended for use by kernel and not for programs.
1112 *---------------------------------------------------------------------------
1114 void switch_thread(void)
1117 const unsigned int core = CURRENT_CORE;
1118 struct thread_entry *block = cores[core].block_task;
1119 struct thread_entry *thread = cores[core].running;
1121 /* Get context to save - next thread to run is unknown until all wakeups
1122 * are evaluated */
1123 if (block != NULL)
1125 cores[core].block_task = NULL;
1127 #if NUM_CORES > 1
1128 if (UNLIKELY(thread == block))
1130 /* This was the last thread running and another core woke us before
1131 * reaching here. Force next thread selection to give tmo threads or
1132 * other threads woken before this block a first chance. */
1133 block = NULL;
1135 else
1136 #endif
1138 /* Blocking task is the old one */
1139 thread = block;
1143 #ifdef RB_PROFILE
1144 profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1145 #endif
1147 /* Begin task switching by saving our current context so that we can
1148 * restore the state of the current thread later to the point prior
1149 * to this call. */
1150 store_context(&thread->context);
1152 /* Check if the current thread stack is overflown */
1153 if (UNLIKELY(thread->stack[0] != DEADBEEF))
1154 thread_stkov(thread);
1156 #if NUM_CORES > 1
1157 /* Run any blocking operations requested before switching/sleeping */
1158 run_blocking_ops(core, thread);
1159 #endif
1161 #ifdef HAVE_PRIORITY_SCHEDULING
1162 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1163 /* Reset the value of thread's skip count */
1164 thread->skip_count = 0;
1165 #endif
1167 for (;;)
1169 /* If there are threads on a timeout and the earliest wakeup is due,
1170 * check the list and wake any threads that need to start running
1171 * again. */
1172 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1174 check_tmo_threads();
1177 disable_irq();
1178 RTR_LOCK(core);
1180 thread = cores[core].running;
1182 if (UNLIKELY(thread == NULL))
1184 /* Enter sleep mode to reduce power usage - woken up on interrupt
1185 * or wakeup request from another core - expected to enable
1186 * interrupts. */
1187 RTR_UNLOCK(core);
1188 core_sleep(IF_COP(core));
1190 else
1192 #ifdef HAVE_PRIORITY_SCHEDULING
1193 /* Select the new task based on priorities and the last time a
1194 * process got CPU time relative to the highest priority runnable
1195 * task. */
1196 struct priority_distribution *pd = &cores[core].rtr;
1197 int max = find_first_set_bit(pd->mask);
1199 if (block == NULL)
1201 /* Not switching on a block, tentatively select next thread */
1202 thread = thread->l.next;
1205 for (;;)
1207 int priority = thread->priority;
1208 int diff;
1210 /* This ridiculously simple method of aging seems to work
1211 * suspiciously well. It does tend to reward CPU hogs (under
1212 * yielding) but that's generally not desirable at all. On
1213 * the plus side, it, relatively to other threads, penalizes
1214 * excess yielding which is good if some high priority thread
1215 * is performing no useful work such as polling for a device
1216 * to be ready. Of course, aging is only employed when higher
1217 * and lower priority threads are runnable. The highest
1218 * priority runnable thread(s) are never skipped unless a
1219 * lower-priority process has aged sufficiently. Priorities
1220 * of REALTIME class are run strictly according to priority
1221 * thus are not subject to switchout due to lower-priority
1222 * processes aging; they must give up the processor by going
1223 * off the run list. */
1224 if (LIKELY(priority <= max) ||
1225 IF_NO_SKIP_YIELD( thread->skip_count == -1 || )
1226 (priority > PRIORITY_REALTIME &&
1227 (diff = priority - max,
1228 ++thread->skip_count > diff*diff)))
1230 cores[core].running = thread;
1231 break;
1234 thread = thread->l.next;
1236 #else
1237 /* Without priority use a simple FCFS algorithm */
1238 if (block == NULL)
1240 /* Not switching on a block, select next thread */
1241 thread = thread->l.next;
1242 cores[core].running = thread;
1244 #endif /* HAVE_PRIORITY_SCHEDULING */
1246 RTR_UNLOCK(core);
1247 enable_irq();
1248 break;
1252 /* And finally give control to the next thread. */
1253 load_context(&thread->context);
1255 #ifdef RB_PROFILE
1256 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
1257 #endif
1261 /*---------------------------------------------------------------------------
1262 * Sleeps a thread for at least a specified number of ticks with zero being
1263 * a wait until the next tick.
1265 * INTERNAL: Intended for use by kernel and not for programs.
1266 *---------------------------------------------------------------------------
1268 void sleep_thread(int ticks)
1270 struct thread_entry *current = cores[CURRENT_CORE].running;
1272 LOCK_THREAD(current);
1274 /* Set our timeout, remove from run list and join timeout list. */
1275 current->tmo_tick = current_tick + ticks + 1;
1276 block_thread_on_l(current, STATE_SLEEPING);
1278 UNLOCK_THREAD(current);
1281 /*---------------------------------------------------------------------------
1282 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1284 * INTERNAL: Intended for use by kernel objects and not for programs.
1285 *---------------------------------------------------------------------------
1287 void block_thread(struct thread_entry *current)
1289 /* Set the state to blocked and take us off of the run queue until we
1290 * are explicitly woken */
1291 LOCK_THREAD(current);
1293 /* Set the list for explicit wakeup */
1294 block_thread_on_l(current, STATE_BLOCKED);
1296 #ifdef HAVE_PRIORITY_SCHEDULING
1297 if (current->blocker != NULL)
1299 /* Object supports PIP */
1300 current = blocker_inherit_priority(current);
1302 #endif
1304 UNLOCK_THREAD(current);
1307 /*---------------------------------------------------------------------------
1308 * Block a thread on a blocking queue for a specified time interval or until
1309 * explicitly woken - whichever happens first.
1311 * INTERNAL: Intended for use by kernel objects and not for programs.
1312 *---------------------------------------------------------------------------
1314 void block_thread_w_tmo(struct thread_entry *current, int timeout)
1316 /* Get the entry for the current running thread. */
1317 LOCK_THREAD(current);
1319 /* Set the state to blocked with the specified timeout */
1320 current->tmo_tick = current_tick + timeout;
1322 /* Set the list for explicit wakeup */
1323 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1325 #ifdef HAVE_PRIORITY_SCHEDULING
1326 if (current->blocker != NULL)
1328 /* Object supports PIP */
1329 current = blocker_inherit_priority(current);
1331 #endif
1333 UNLOCK_THREAD(current);
1336 /*---------------------------------------------------------------------------
1337 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1338 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1340 * This code should be considered a critical section by the caller meaning
1341 * that the object's corelock should be held.
1343 * INTERNAL: Intended for use by kernel objects and not for programs.
1344 *---------------------------------------------------------------------------
1346 unsigned int wakeup_thread(struct thread_entry **list)
1348 struct thread_entry *thread = *list;
1349 unsigned int result = THREAD_NONE;
1351 /* Check if there is a blocked thread at all. */
1352 if (thread == NULL)
1353 return result;
1355 LOCK_THREAD(thread);
1357 /* Determine thread's current state. */
1358 switch (thread->state)
1360 case STATE_BLOCKED:
1361 case STATE_BLOCKED_W_TMO:
1362 remove_from_list_l(list, thread);
1364 result = THREAD_OK;
1366 #ifdef HAVE_PRIORITY_SCHEDULING
1367 struct thread_entry *current;
1368 struct blocker *bl = thread->blocker;
1370 if (bl == NULL)
1372 /* No inheritance - just boost the thread by aging */
1373 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1374 thread->skip_count = thread->priority;
1375 current = cores[CURRENT_CORE].running;
1377 else
1379 /* Call the specified unblocking PIP */
1380 current = bl->wakeup_protocol(thread);
1383 if (current != NULL &&
1384 find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask)
1385 < current->priority)
1387 /* There is a thread ready to run of higher or same priority on
1388 * the same core as the current one; recommend a task switch.
1389 * Knowing if this is an interrupt call would be helpful here. */
1390 result |= THREAD_SWITCH;
1392 #endif /* HAVE_PRIORITY_SCHEDULING */
1394 core_schedule_wakeup(thread);
1395 break;
1397 /* Nothing to do. State is not blocked. */
1398 #if THREAD_EXTRA_CHECKS
1399 default:
1400 THREAD_PANICF("wakeup_thread->block invalid", thread);
1401 case STATE_RUNNING:
1402 case STATE_KILLED:
1403 break;
1404 #endif
1407 UNLOCK_THREAD(thread);
1408 return result;
1411 /*---------------------------------------------------------------------------
1412 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1413 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1414 * the queue must be locked first.
1416 * INTERNAL: Intended for use by kernel objects and not for programs.
1417 *---------------------------------------------------------------------------
1419 unsigned int thread_queue_wake(struct thread_entry **list)
1421 unsigned result = THREAD_NONE;
1423 for (;;)
1425 unsigned int rc = wakeup_thread(list);
1427 if (rc == THREAD_NONE)
1428 break; /* No more threads */
1430 result |= rc;
1433 return result;
1436 /*---------------------------------------------------------------------------
1437 * Assign the thread slot a new ID. Version is 1-255.
1438 *---------------------------------------------------------------------------
1440 static void new_thread_id(unsigned int slot_num,
1441 struct thread_entry *thread)
1443 unsigned int version =
1444 (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
1445 & THREAD_ID_VERSION_MASK;
1447 /* If wrapped to 0, make it 1 */
1448 if (version == 0)
1449 version = 1u << THREAD_ID_VERSION_SHIFT;
1451 thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
1454 /*---------------------------------------------------------------------------
1455 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1456 * will be locked on multicore.
1457 *---------------------------------------------------------------------------
1459 static struct thread_entry * find_empty_thread_slot(void)
1461 /* Any slot could be on an interrupt-accessible list */
1462 IF_COP( int oldlevel = disable_irq_save(); )
1463 struct thread_entry *thread = NULL;
1464 int n;
1466 for (n = 0; n < MAXTHREADS; n++)
1468 /* Obtain current slot state - lock it on multicore */
1469 struct thread_entry *t = &threads[n];
1470 LOCK_THREAD(t);
1472 if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
1474 /* Slot is empty - leave it locked and caller will unlock */
1475 thread = t;
1476 break;
1479 /* Finished examining slot - no longer busy - unlock on multicore */
1480 UNLOCK_THREAD(t);
1483 IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
1484 not accesible to them yet */
1485 return thread;
1488 /*---------------------------------------------------------------------------
1489 * Return the thread_entry pointer for a thread_id. Return the current
1490 * thread if the ID is 0 (alias for current).
1491 *---------------------------------------------------------------------------
1493 struct thread_entry * thread_id_entry(unsigned int thread_id)
1495 return (thread_id == THREAD_ID_CURRENT) ?
1496 cores[CURRENT_CORE].running :
1497 &threads[thread_id & THREAD_ID_SLOT_MASK];
1500 /*---------------------------------------------------------------------------
1501 * Place the current core in idle mode - woken up on interrupt or wake
1502 * request from another core.
1503 *---------------------------------------------------------------------------
1505 void core_idle(void)
1507 IF_COP( const unsigned int core = CURRENT_CORE; )
1508 disable_irq();
1509 core_sleep(IF_COP(core));
1512 /*---------------------------------------------------------------------------
1513 * Create a thread. If using a dual core architecture, specify which core to
1514 * start the thread on.
1516 * Return ID if context area could be allocated, else NULL.
1517 *---------------------------------------------------------------------------
1519 unsigned int create_thread(void (*function)(void),
1520 void* stack, size_t stack_size,
1521 unsigned flags, const char *name
1522 IF_PRIO(, int priority)
1523 IF_COP(, unsigned int core))
1525 unsigned int i;
1526 unsigned int stack_words;
1527 uintptr_t stackptr, stackend;
1528 struct thread_entry *thread;
1529 unsigned state;
1530 int oldlevel;
1532 thread = find_empty_thread_slot();
1533 if (thread == NULL)
1535 return 0;
1538 oldlevel = disable_irq_save();
1540 /* Munge the stack to make it easy to spot stack overflows */
1541 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
1542 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
1543 stack_size = stackend - stackptr;
1544 stack_words = stack_size / sizeof (uintptr_t);
1546 for (i = 0; i < stack_words; i++)
1548 ((uintptr_t *)stackptr)[i] = DEADBEEF;
1551 /* Store interesting information */
1552 thread->name = name;
1553 thread->stack = (uintptr_t *)stackptr;
1554 thread->stack_size = stack_size;
1555 thread->queue = NULL;
1556 #ifdef HAVE_WAKEUP_EXT_CB
1557 thread->wakeup_ext_cb = NULL;
1558 #endif
1559 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1560 thread->cpu_boost = 0;
1561 #endif
1562 #ifdef HAVE_PRIORITY_SCHEDULING
1563 memset(&thread->pdist, 0, sizeof(thread->pdist));
1564 thread->blocker = NULL;
1565 thread->base_priority = priority;
1566 thread->priority = priority;
1567 thread->skip_count = priority;
1568 prio_add_entry(&thread->pdist, priority);
1569 #endif
1571 #ifdef HAVE_IO_PRIORITY
1572 /* Default to high (foreground) priority */
1573 thread->io_priority = IO_PRIORITY_IMMEDIATE;
1574 #endif
1576 #if NUM_CORES > 1
1577 thread->core = core;
1579 /* Writeback stack munging or anything else before starting */
1580 if (core != CURRENT_CORE)
1582 cpucache_flush();
1584 #endif
1586 /* Thread is not on any timeout list but be a bit paranoid */
1587 thread->tmo.prev = NULL;
1589 state = (flags & CREATE_THREAD_FROZEN) ?
1590 STATE_FROZEN : STATE_RUNNING;
1592 thread->context.sp = (typeof (thread->context.sp))stackend;
1594 /* Load the thread's context structure with needed startup information */
1595 THREAD_STARTUP_INIT(core, thread, function);
1597 thread->state = state;
1598 i = thread->id; /* Snapshot while locked */
1600 if (state == STATE_RUNNING)
1601 core_schedule_wakeup(thread);
1603 UNLOCK_THREAD(thread);
1604 restore_irq(oldlevel);
1606 return i;
1609 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1610 /*---------------------------------------------------------------------------
1611 * Change the boost state of a thread boosting or unboosting the CPU
1612 * as required.
1613 *---------------------------------------------------------------------------
1615 static inline void boost_thread(struct thread_entry *thread, bool boost)
1617 if ((thread->cpu_boost != 0) != boost)
1619 thread->cpu_boost = boost;
1620 cpu_boost(boost);
1624 void trigger_cpu_boost(void)
1626 struct thread_entry *current = cores[CURRENT_CORE].running;
1627 boost_thread(current, true);
1630 void cancel_cpu_boost(void)
1632 struct thread_entry *current = cores[CURRENT_CORE].running;
1633 boost_thread(current, false);
1635 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
1637 /*---------------------------------------------------------------------------
1638 * Block the current thread until another thread terminates. A thread may
1639 * wait on itself to terminate which prevents it from running again and it
1640 * will need to be killed externally.
1641 * Parameter is the ID as returned from create_thread().
1642 *---------------------------------------------------------------------------
1644 void thread_wait(unsigned int thread_id)
1646 struct thread_entry *current = cores[CURRENT_CORE].running;
1647 struct thread_entry *thread = thread_id_entry(thread_id);
1649 /* Lock thread-as-waitable-object lock */
1650 corelock_lock(&thread->waiter_cl);
1652 /* Be sure it hasn't been killed yet */
1653 if (thread_id == THREAD_ID_CURRENT ||
1654 (thread->id == thread_id && thread->state != STATE_KILLED))
1656 IF_COP( current->obj_cl = &thread->waiter_cl; )
1657 current->bqp = &thread->queue;
1659 disable_irq();
1660 block_thread(current);
1662 corelock_unlock(&thread->waiter_cl);
1664 switch_thread();
1665 return;
1668 corelock_unlock(&thread->waiter_cl);
1671 /*---------------------------------------------------------------------------
1672 * Exit the current thread. The Right Way to Do Things (TM).
1673 *---------------------------------------------------------------------------
1675 /* This is done to foil optimizations that may require the current stack,
1676 * such as optimizing subexpressions that put variables on the stack that
1677 * get used after switching stacks. */
1678 #if NUM_CORES > 1
1679 /* Called by ASM stub */
1680 static void thread_final_exit_do(struct thread_entry *current)
1681 #else
1682 /* No special procedure is required before calling */
1683 static inline void thread_final_exit(struct thread_entry *current)
1684 #endif
1686 /* At this point, this thread isn't using resources allocated for
1687 * execution except the slot itself. */
1689 /* Signal this thread */
1690 thread_queue_wake(&current->queue);
1691 corelock_unlock(&current->waiter_cl);
1692 switch_thread();
1693 /* This should never and must never be reached - if it is, the
1694 * state is corrupted */
1695 THREAD_PANICF("thread_exit->K:*R", current);
1696 while (1);
1699 void thread_exit(void)
1701 register struct thread_entry * current = cores[CURRENT_CORE].running;
1703 /* Cancel CPU boost if any */
1704 cancel_cpu_boost();
1706 disable_irq();
1708 corelock_lock(&current->waiter_cl);
1709 LOCK_THREAD(current);
1711 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1712 if (current->name == THREAD_DESTRUCT)
1714 /* Thread being killed - become a waiter */
1715 unsigned int id = current->id;
1716 UNLOCK_THREAD(current);
1717 corelock_unlock(&current->waiter_cl);
1718 thread_wait(id);
1719 THREAD_PANICF("thread_exit->WK:*R", current);
1721 #endif
1723 #ifdef HAVE_PRIORITY_SCHEDULING
1724 check_for_obj_waiters("thread_exit", current);
1725 #endif
1727 if (current->tmo.prev != NULL)
1729 /* Cancel pending timeout list removal */
1730 remove_from_list_tmo(current);
1733 /* Switch tasks and never return */
1734 block_thread_on_l(current, STATE_KILLED);
1736 /* Slot must be unusable until thread is really gone */
1737 UNLOCK_THREAD_AT_TASK_SWITCH(current);
1739 /* Update ID for this slot */
1740 new_thread_id(current->id, current);
1741 current->name = NULL;
1743 /* Do final cleanup and remove the thread */
1744 thread_final_exit(current);
1747 #ifdef ALLOW_REMOVE_THREAD
1748 /*---------------------------------------------------------------------------
1749 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1750 * normal programs.
1752 * Parameter is the ID as returned from create_thread().
1754 * Use with care on threads that are not under careful control as this may
1755 * leave various objects in an undefined state.
1756 *---------------------------------------------------------------------------
1758 void remove_thread(unsigned int thread_id)
1760 #if NUM_CORES > 1
1761 /* core is not constant here because of core switching */
1762 unsigned int core = CURRENT_CORE;
1763 unsigned int old_core = NUM_CORES;
1764 struct corelock *ocl = NULL;
1765 #else
1766 const unsigned int core = CURRENT_CORE;
1767 #endif
1768 struct thread_entry *current = cores[core].running;
1769 struct thread_entry *thread = thread_id_entry(thread_id);
1771 unsigned state;
1772 int oldlevel;
1774 if (thread == current)
1775 thread_exit(); /* Current thread - do normal exit */
1777 oldlevel = disable_irq_save();
1779 corelock_lock(&thread->waiter_cl);
1780 LOCK_THREAD(thread);
1782 state = thread->state;
1784 if (thread->id != thread_id || state == STATE_KILLED)
1785 goto thread_killed;
1787 #if NUM_CORES > 1
1788 if (thread->name == THREAD_DESTRUCT)
1790 /* Thread being killed - become a waiter */
1791 UNLOCK_THREAD(thread);
1792 corelock_unlock(&thread->waiter_cl);
1793 restore_irq(oldlevel);
1794 thread_wait(thread_id);
1795 return;
1798 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
1800 #ifdef HAVE_PRIORITY_SCHEDULING
1801 check_for_obj_waiters("remove_thread", thread);
1802 #endif
1804 if (thread->core != core)
1806 /* Switch cores and safely extract the thread there */
1807 /* Slot HAS to be unlocked or a deadlock could occur which means other
1808 * threads have to be guided into becoming thread waiters if they
1809 * attempt to remove it. */
1810 unsigned int new_core = thread->core;
1812 corelock_unlock(&thread->waiter_cl);
1814 UNLOCK_THREAD(thread);
1815 restore_irq(oldlevel);
1817 old_core = switch_core(new_core);
1819 oldlevel = disable_irq_save();
1821 corelock_lock(&thread->waiter_cl);
1822 LOCK_THREAD(thread);
1824 state = thread->state;
1825 core = new_core;
1826 /* Perform the extraction and switch ourselves back to the original
1827 processor */
1829 #endif /* NUM_CORES > 1 */
1831 if (thread->tmo.prev != NULL)
1833 /* Clean thread off the timeout list if a timeout check hasn't
1834 * run yet */
1835 remove_from_list_tmo(thread);
1838 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1839 /* Cancel CPU boost if any */
1840 boost_thread(thread, false);
1841 #endif
1843 IF_COP( retry_state: )
1845 switch (state)
1847 case STATE_RUNNING:
1848 RTR_LOCK(core);
1849 /* Remove thread from ready to run tasks */
1850 remove_from_list_l(&cores[core].running, thread);
1851 rtr_subtract_entry(core, thread->priority);
1852 RTR_UNLOCK(core);
1853 break;
1854 case STATE_BLOCKED:
1855 case STATE_BLOCKED_W_TMO:
1856 /* Remove thread from the queue it's blocked on - including its
1857 * own if waiting there */
1858 #if NUM_CORES > 1
1859 if (&thread->waiter_cl != thread->obj_cl)
1861 ocl = thread->obj_cl;
1863 if (UNLIKELY(corelock_try_lock(ocl) == 0))
1865 UNLOCK_THREAD(thread);
1866 corelock_lock(ocl);
1867 LOCK_THREAD(thread);
1869 if (UNLIKELY(thread->state != state))
1871 /* Something woke the thread */
1872 state = thread->state;
1873 corelock_unlock(ocl);
1874 goto retry_state;
1878 #endif
1879 remove_from_list_l(thread->bqp, thread);
1881 #ifdef HAVE_WAKEUP_EXT_CB
1882 if (thread->wakeup_ext_cb != NULL)
1883 thread->wakeup_ext_cb(thread);
1884 #endif
1886 #ifdef HAVE_PRIORITY_SCHEDULING
1887 if (thread->blocker != NULL)
1889 /* Remove thread's priority influence from its chain */
1890 wakeup_priority_protocol_release(thread);
1892 #endif
1894 #if NUM_CORES > 1
1895 if (ocl != NULL)
1896 corelock_unlock(ocl);
1897 #endif
1898 break;
1899 /* Otherwise thread is frozen and hasn't run yet */
1902 new_thread_id(thread_id, thread);
1903 thread->state = STATE_KILLED;
1905 /* If thread was waiting on itself, it will have been removed above.
1906 * The wrong order would result in waking the thread first and deadlocking
1907 * since the slot is already locked. */
1908 thread_queue_wake(&thread->queue);
1910 thread->name = NULL;
1912 thread_killed: /* Thread was already killed */
1913 /* Removal complete - safe to unlock and reenable interrupts */
1914 corelock_unlock(&thread->waiter_cl);
1915 UNLOCK_THREAD(thread);
1916 restore_irq(oldlevel);
1918 #if NUM_CORES > 1
1919 if (old_core < NUM_CORES)
1921 /* Did a removal on another processor's thread - switch back to
1922 native core */
1923 switch_core(old_core);
1925 #endif
1927 #endif /* ALLOW_REMOVE_THREAD */
1929 #ifdef HAVE_PRIORITY_SCHEDULING
1930 /*---------------------------------------------------------------------------
1931 * Sets the thread's relative base priority for the core it runs on. Any
1932 * needed inheritance changes also may happen.
1933 *---------------------------------------------------------------------------
1935 int thread_set_priority(unsigned int thread_id, int priority)
1937 int old_base_priority = -1;
1938 struct thread_entry *thread = thread_id_entry(thread_id);
1940 /* A little safety measure */
1941 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
1942 return -1;
1944 /* Thread could be on any list and therefore on an interrupt accessible
1945 one - disable interrupts */
1946 int oldlevel = disable_irq_save();
1948 LOCK_THREAD(thread);
1950 /* Make sure it's not killed */
1951 if (thread_id == THREAD_ID_CURRENT ||
1952 (thread->id == thread_id && thread->state != STATE_KILLED))
1954 int old_priority = thread->priority;
1956 old_base_priority = thread->base_priority;
1957 thread->base_priority = priority;
1959 prio_move_entry(&thread->pdist, old_base_priority, priority);
1960 priority = find_first_set_bit(thread->pdist.mask);
1962 if (old_priority == priority)
1964 /* No priority change - do nothing */
1966 else if (thread->state == STATE_RUNNING)
1968 /* This thread is running - change location on the run
1969 * queue. No transitive inheritance needed. */
1970 set_running_thread_priority(thread, priority);
1972 else
1974 thread->priority = priority;
1976 if (thread->blocker != NULL)
1978 /* Bubble new priority down the chain */
1979 struct blocker *bl = thread->blocker; /* Blocker struct */
1980 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
1981 struct thread_entry * const tstart = thread; /* Initial thread */
1982 const int highest = MIN(priority, old_priority); /* Higher of new or old */
1984 for (;;)
1986 struct thread_entry *next; /* Next thread to check */
1987 int bl_pr; /* Highest blocked thread */
1988 int queue_pr; /* New highest blocked thread */
1989 #if NUM_CORES > 1
1990 /* Owner can change but thread cannot be dislodged - thread
1991 * may not be the first in the queue which allows other
1992 * threads ahead in the list to be given ownership during the
1993 * operation. If thread is next then the waker will have to
1994 * wait for us and the owner of the object will remain fixed.
1995 * If we successfully grab the owner -- which at some point
1996 * is guaranteed -- then the queue remains fixed until we
1997 * pass by. */
1998 for (;;)
2000 LOCK_THREAD(bl_t);
2002 /* Double-check the owner - retry if it changed */
2003 if (LIKELY(bl->thread == bl_t))
2004 break;
2006 UNLOCK_THREAD(bl_t);
2007 bl_t = bl->thread;
2009 #endif
2010 bl_pr = bl->priority;
2012 if (highest > bl_pr)
2013 break; /* Object priority won't change */
2015 /* This will include the thread being set */
2016 queue_pr = find_highest_priority_in_list_l(*thread->bqp);
2018 if (queue_pr == bl_pr)
2019 break; /* Object priority not changing */
2021 /* Update thread boost for this object */
2022 bl->priority = queue_pr;
2023 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2024 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2026 if (bl_t->priority == bl_pr)
2027 break; /* Blocking thread priority not changing */
2029 if (bl_t->state == STATE_RUNNING)
2031 /* Thread not blocked - we're done */
2032 set_running_thread_priority(bl_t, bl_pr);
2033 break;
2036 bl_t->priority = bl_pr;
2037 bl = bl_t->blocker; /* Blocking thread has a blocker? */
2039 if (bl == NULL)
2040 break; /* End of chain */
2042 next = bl->thread;
2044 if (UNLIKELY(next == tstart))
2045 break; /* Full-circle */
2047 UNLOCK_THREAD(thread);
2049 thread = bl_t;
2050 bl_t = next;
2051 } /* for (;;) */
2053 UNLOCK_THREAD(bl_t);
2058 UNLOCK_THREAD(thread);
2060 restore_irq(oldlevel);
2062 return old_base_priority;
2065 /*---------------------------------------------------------------------------
2066 * Returns the current base priority for a thread.
2067 *---------------------------------------------------------------------------
2069 int thread_get_priority(unsigned int thread_id)
2071 struct thread_entry *thread = thread_id_entry(thread_id);
2072 int base_priority = thread->base_priority;
2074 /* Simply check without locking slot. It may or may not be valid by the
2075 * time the function returns anyway. If all tests pass, it is the
2076 * correct value for when it was valid. */
2077 if (thread_id != THREAD_ID_CURRENT &&
2078 (thread->id != thread_id || thread->state == STATE_KILLED))
2079 base_priority = -1;
2081 return base_priority;
2083 #endif /* HAVE_PRIORITY_SCHEDULING */
2085 #ifdef HAVE_IO_PRIORITY
2086 int thread_get_io_priority(unsigned int thread_id)
2088 struct thread_entry *thread = thread_id_entry(thread_id);
2089 return thread->io_priority;
2092 void thread_set_io_priority(unsigned int thread_id,int io_priority)
2094 struct thread_entry *thread = thread_id_entry(thread_id);
2095 thread->io_priority = io_priority;
2097 #endif
2099 /*---------------------------------------------------------------------------
2100 * Starts a frozen thread - similar semantics to wakeup_thread except that
2101 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2102 * virtue of the slot having a state of STATE_FROZEN.
2103 *---------------------------------------------------------------------------
2105 void thread_thaw(unsigned int thread_id)
2107 struct thread_entry *thread = thread_id_entry(thread_id);
2108 int oldlevel = disable_irq_save();
2110 LOCK_THREAD(thread);
2112 /* If thread is the current one, it cannot be frozen, therefore
2113 * there is no need to check that. */
2114 if (thread->id == thread_id && thread->state == STATE_FROZEN)
2115 core_schedule_wakeup(thread);
2117 UNLOCK_THREAD(thread);
2118 restore_irq(oldlevel);
2121 /*---------------------------------------------------------------------------
2122 * Return the ID of the currently executing thread.
2123 *---------------------------------------------------------------------------
2125 unsigned int thread_get_current(void)
2127 return cores[CURRENT_CORE].running->id;
2130 #if NUM_CORES > 1
2131 /*---------------------------------------------------------------------------
2132 * Switch the processor that the currently executing thread runs on.
2133 *---------------------------------------------------------------------------
2135 unsigned int switch_core(unsigned int new_core)
2137 const unsigned int core = CURRENT_CORE;
2138 struct thread_entry *current = cores[core].running;
2140 if (core == new_core)
2142 /* No change - just return same core */
2143 return core;
2146 int oldlevel = disable_irq_save();
2147 LOCK_THREAD(current);
2149 if (current->name == THREAD_DESTRUCT)
2151 /* Thread being killed - deactivate and let process complete */
2152 unsigned int id = current->id;
2153 UNLOCK_THREAD(current);
2154 restore_irq(oldlevel);
2155 thread_wait(id);
2156 /* Should never be reached */
2157 THREAD_PANICF("switch_core->D:*R", current);
2160 /* Get us off the running list for the current core */
2161 RTR_LOCK(core);
2162 remove_from_list_l(&cores[core].running, current);
2163 rtr_subtract_entry(core, current->priority);
2164 RTR_UNLOCK(core);
2166 /* Stash return value (old core) in a safe place */
2167 current->retval = core;
2169 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2170 * the other core will likely attempt a removal from the wrong list! */
2171 if (current->tmo.prev != NULL)
2173 remove_from_list_tmo(current);
2176 /* Change the core number for this thread slot */
2177 current->core = new_core;
2179 /* Do not use core_schedule_wakeup here since this will result in
2180 * the thread starting to run on the other core before being finished on
2181 * this one. Delay the list unlock to keep the other core stuck
2182 * until this thread is ready. */
2183 RTR_LOCK(new_core);
2185 rtr_add_entry(new_core, current->priority);
2186 add_to_list_l(&cores[new_core].running, current);
2188 /* Make a callback into device-specific code, unlock the wakeup list so
2189 * that execution may resume on the new core, unlock our slot and finally
2190 * restore the interrupt level */
2191 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
2192 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
2193 cores[core].block_task = current;
2195 UNLOCK_THREAD(current);
2197 /* Alert other core to activity */
2198 core_wake(new_core);
2200 /* Do the stack switching, cache_maintenence and switch_thread call -
2201 requires native code */
2202 switch_thread_core(core, current);
2204 /* Finally return the old core to caller */
2205 return current->retval;
2207 #endif /* NUM_CORES > 1 */
2209 /*---------------------------------------------------------------------------
2210 * Initialize threading API. This assumes interrupts are not yet enabled. On
2211 * multicore setups, no core is allowed to proceed until create_thread calls
2212 * are safe to perform.
2213 *---------------------------------------------------------------------------
2215 void init_threads(void)
2217 const unsigned int core = CURRENT_CORE;
2218 struct thread_entry *thread;
2220 if (core == CPU)
2222 /* Initialize core locks and IDs in all slots */
2223 int n;
2224 for (n = 0; n < MAXTHREADS; n++)
2226 thread = &threads[n];
2227 corelock_init(&thread->waiter_cl);
2228 corelock_init(&thread->slot_cl);
2229 thread->id = THREAD_ID_INIT(n);
2233 /* CPU will initialize first and then sleep */
2234 thread = find_empty_thread_slot();
2236 if (thread == NULL)
2238 /* WTF? There really must be a slot available at this stage.
2239 * This can fail if, for example, .bss isn't zero'ed out by the loader
2240 * or threads is in the wrong section. */
2241 THREAD_PANICF("init_threads->no slot", NULL);
2244 /* Initialize initially non-zero members of core */
2245 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2247 /* Initialize initially non-zero members of slot */
2248 UNLOCK_THREAD(thread); /* No sync worries yet */
2249 thread->name = main_thread_name;
2250 thread->state = STATE_RUNNING;
2251 IF_COP( thread->core = core; )
2252 #ifdef HAVE_PRIORITY_SCHEDULING
2253 corelock_init(&cores[core].rtr_cl);
2254 thread->base_priority = PRIORITY_USER_INTERFACE;
2255 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2256 thread->priority = PRIORITY_USER_INTERFACE;
2257 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2258 #endif
2260 add_to_list_l(&cores[core].running, thread);
2262 if (core == CPU)
2264 thread->stack = stackbegin;
2265 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
2266 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2267 /* Wait for other processors to finish their inits since create_thread
2268 * isn't safe to call until the kernel inits are done. The first
2269 * threads created in the system must of course be created by CPU.
2270 * Another possible approach is to initialize all cores and slots
2271 * for each core by CPU, let the remainder proceed in parallel and
2272 * signal CPU when all are finished. */
2273 core_thread_init(CPU);
2275 else
2277 /* Initial stack is the idle stack */
2278 thread->stack = idle_stacks[core];
2279 thread->stack_size = IDLE_STACK_SIZE;
2280 /* After last processor completes, it should signal all others to
2281 * proceed or may signal the next and call thread_exit(). The last one
2282 * to finish will signal CPU. */
2283 core_thread_init(core);
2284 /* Other cores do not have a main thread - go idle inside switch_thread
2285 * until a thread can run on the core. */
2286 thread_exit();
2287 #endif /* NUM_CORES */
2291 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2292 #if NUM_CORES == 1
2293 static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
2294 #else
2295 static int stack_usage(uintptr_t *stackptr, size_t stack_size)
2296 #endif
2298 unsigned int stack_words = stack_size / sizeof (uintptr_t);
2299 unsigned int i;
2300 int usage = 0;
2302 for (i = 0; i < stack_words; i++)
2304 if (stackptr[i] != DEADBEEF)
2306 usage = ((stack_words - i) * 100) / stack_words;
2307 break;
2311 return usage;
2314 /*---------------------------------------------------------------------------
2315 * Returns the maximum percentage of stack a thread ever used while running.
2316 * NOTE: Some large buffer allocations that don't use enough the buffer to
2317 * overwrite stackptr[0] will not be seen.
2318 *---------------------------------------------------------------------------
2320 int thread_stack_usage(const struct thread_entry *thread)
2322 return stack_usage(thread->stack, thread->stack_size);
2325 #if NUM_CORES > 1
2326 /*---------------------------------------------------------------------------
2327 * Returns the maximum percentage of the core's idle stack ever used during
2328 * runtime.
2329 *---------------------------------------------------------------------------
2331 int idle_stack_usage(unsigned int core)
2333 return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
2335 #endif
2337 /*---------------------------------------------------------------------------
2338 * Fills in the buffer with the specified thread's name. If the name is NULL,
2339 * empty, or the thread is in destruct state a formatted ID is written
2340 * instead.
2341 *---------------------------------------------------------------------------
2343 void thread_get_name(char *buffer, int size,
2344 struct thread_entry *thread)
2346 if (size <= 0)
2347 return;
2349 *buffer = '\0';
2351 if (thread)
2353 /* Display thread name if one or ID if none */
2354 const char *name = thread->name;
2355 const char *fmt = "%s";
2356 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2358 name = (const char *)thread;
2359 fmt = "%08lX";
2361 snprintf(buffer, size, fmt, name);