1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
33 /****************************************************************************
35 * See notes below on implementing processor-specific portions! *
36 ***************************************************************************/
38 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
40 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
42 #define THREAD_EXTRA_CHECKS 0
46 * General locking order to guarantee progress. Order must be observed but
47 * all stages are not nescessarily obligatory. Going from 1) to 3) is
51 * This is first because of the likelyhood of having an interrupt occur that
52 * also accesses one of the objects farther down the list. Any non-blocking
53 * synchronization done may already have a lock on something during normal
54 * execution and if an interrupt handler running on the same processor as
55 * the one that has the resource locked were to attempt to access the
56 * resource, the interrupt handler would wait forever waiting for an unlock
57 * that will never happen. There is no danger if the interrupt occurs on
58 * a different processor because the one that has the lock will eventually
59 * unlock and the other processor's handler may proceed at that time. Not
60 * nescessary when the resource in question is definitely not available to
64 * 1) May be needed beforehand if the kernel object allows dual-use such as
65 * event queues. The kernel object must have a scheme to protect itself from
66 * access by another processor and is responsible for serializing the calls
67 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
68 * other. Objects' queues are also protected here.
71 * This locks access to the thread's slot such that its state cannot be
72 * altered by another processor when a state change is in progress such as
73 * when it is in the process of going on a blocked list. An attempt to wake
74 * a thread while it is still blocking will likely desync its state with
75 * the other resources used for that state.
78 * These lists are specific to a particular processor core and are accessible
79 * by all processor cores and interrupt handlers. The running (rtr) list is
80 * the prime example where a thread may be added by any means.
83 /*---------------------------------------------------------------------------
84 * Processor specific: core_sleep/core_wake/misc. notes
87 * FIQ is not dealt with by the scheduler code and is simply restored if it
88 * must by masked for some reason - because threading modifies a register
89 * that FIQ may also modify and there's no way to accomplish it atomically.
90 * s3c2440 is such a case.
92 * Audio interrupts are generally treated at a higher priority than others
93 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
94 * are not in general safe. Special cases may be constructed on a per-
95 * source basis and blocking operations are not available.
97 * core_sleep procedure to implement for any CPU to ensure an asychronous
98 * wakup never results in requiring a wait until the next tick (up to
99 * 10000uS!). May require assembly and careful instruction ordering.
101 * 1) On multicore, stay awake if directed to do so by another. If so, goto
103 * 2) If processor requires, atomically reenable interrupts and perform step
105 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
106 * on Coldfire) goto step 5.
107 * 4) Enable interrupts.
110 * core_wake and multprocessor notes for sleep/wake coordination:
111 * If possible, to wake up another processor, the forcing of an interrupt on
112 * the woken core by the waker core is the easiest way to ensure a non-
113 * delayed wake and immediate execution of any woken threads. If that isn't
114 * available then some careful non-blocking synchonization is needed (as on
115 * PP targets at the moment).
116 *---------------------------------------------------------------------------
119 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
121 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
122 static struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
123 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
125 static const char main_thread_name
[] = "main";
126 #if (CONFIG_PLATFORM & PLATFORM_NATIVE)
127 extern uintptr_t stackbegin
[];
128 extern uintptr_t stackend
[];
130 extern uintptr_t *stackbegin
;
131 extern uintptr_t *stackend
;
134 static inline void core_sleep(IF_COP_VOID(unsigned int core
))
135 __attribute__((always_inline
));
137 void check_tmo_threads(void)
138 __attribute__((noinline
));
140 static inline void block_thread_on_l(struct thread_entry
*thread
, unsigned state
)
141 __attribute__((always_inline
));
143 static void add_to_list_tmo(struct thread_entry
*thread
)
144 __attribute__((noinline
));
146 static void core_schedule_wakeup(struct thread_entry
*thread
)
147 __attribute__((noinline
));
150 static inline void run_blocking_ops(
151 unsigned int core
, struct thread_entry
*thread
)
152 __attribute__((always_inline
));
155 static void thread_stkov(struct thread_entry
*thread
)
156 __attribute__((noinline
));
158 static inline void store_context(void* addr
)
159 __attribute__((always_inline
));
161 static inline void load_context(const void* addr
)
162 __attribute__((always_inline
));
165 static void thread_final_exit_do(struct thread_entry
*current
)
166 __attribute__((noinline
, noreturn
, used
));
168 static inline void thread_final_exit(struct thread_entry
*current
)
169 __attribute__((always_inline
, noreturn
));
172 void switch_thread(void)
173 __attribute__((noinline
));
175 /****************************************************************************
176 * Processor-specific section - include necessary core support
179 #include "thread-android-arm.c"
180 #elif defined(CPU_ARM)
181 #include "thread-arm.c"
183 #include "thread-pp.c"
185 #elif defined(CPU_COLDFIRE)
186 #include "thread-coldfire.c"
187 #elif CONFIG_CPU == SH7034
188 #include "thread-sh.c"
189 #elif defined(CPU_MIPS) && CPU_MIPS == 32
190 #include "thread-mips32.c"
192 /* Wouldn't compile anyway */
193 #error Processor not implemented.
194 #endif /* CONFIG_CPU == */
196 #ifndef IF_NO_SKIP_YIELD
197 #define IF_NO_SKIP_YIELD(...)
201 * End Processor-specific section
202 ***************************************************************************/
204 #if THREAD_EXTRA_CHECKS
205 static void thread_panicf(const char *msg
, struct thread_entry
*thread
)
207 IF_COP( const unsigned int core
= thread
->core
; )
208 static char name
[32];
209 thread_get_name(name
, 32, thread
);
211 panicf ("%s %s %d" IF_COP(" (%d)"), msg
, name
, thread
->stack_size
IF_COP(, core
));
213 static void thread_stkov(struct thread_entry
*thread
)
215 thread_panicf("Stkov", thread
);
217 #define THREAD_PANICF(msg, thread) \
218 thread_panicf(msg, thread)
219 #define THREAD_ASSERT(exp, msg, thread) \
220 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
222 static void thread_stkov(struct thread_entry
*thread
)
224 IF_COP( const unsigned int core
= thread
->core
; )
225 static char name
[32];
226 thread_get_name(name
, 32, thread
);
227 panicf("Stkov %s" IF_COP(" (%d)"), name
IF_COP(, core
));
229 #define THREAD_PANICF(msg, thread)
230 #define THREAD_ASSERT(exp, msg, thread)
231 #endif /* THREAD_EXTRA_CHECKS */
235 #define LOCK_THREAD(thread) \
236 ({ corelock_lock(&(thread)->slot_cl); })
237 #define TRY_LOCK_THREAD(thread) \
238 ({ corelock_try_lock(&(thread)->slot_cl); })
239 #define UNLOCK_THREAD(thread) \
240 ({ corelock_unlock(&(thread)->slot_cl); })
241 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
242 ({ unsigned int _core = (thread)->core; \
243 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
244 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
246 #define LOCK_THREAD(thread) \
248 #define TRY_LOCK_THREAD(thread) \
250 #define UNLOCK_THREAD(thread) \
252 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
257 #define RTR_LOCK(core) \
258 ({ corelock_lock(&cores[core].rtr_cl); })
259 #define RTR_UNLOCK(core) \
260 ({ corelock_unlock(&cores[core].rtr_cl); })
262 #ifdef HAVE_PRIORITY_SCHEDULING
263 #define rtr_add_entry(core, priority) \
264 prio_add_entry(&cores[core].rtr, (priority))
266 #define rtr_subtract_entry(core, priority) \
267 prio_subtract_entry(&cores[core].rtr, (priority))
269 #define rtr_move_entry(core, from, to) \
270 prio_move_entry(&cores[core].rtr, (from), (to))
272 #define rtr_add_entry(core, priority)
273 #define rtr_add_entry_inl(core, priority)
274 #define rtr_subtract_entry(core, priority)
275 #define rtr_subtract_entry_inl(core, priotity)
276 #define rtr_move_entry(core, from, to)
277 #define rtr_move_entry_inl(core, from, to)
280 /*---------------------------------------------------------------------------
281 * Thread list structure - circular:
282 * +------------------------------+
284 * +--+---+<-+---+<-+---+<-+---+<-+
285 * Head->| T | | T | | T | | T |
286 * +->+---+->+---+->+---+->+---+--+
288 * +------------------------------+
289 *---------------------------------------------------------------------------
292 /*---------------------------------------------------------------------------
293 * Adds a thread to a list of threads using "insert last". Uses the "l"
295 *---------------------------------------------------------------------------
297 static void add_to_list_l(struct thread_entry
**list
,
298 struct thread_entry
*thread
)
300 struct thread_entry
*l
= *list
;
304 /* Insert into unoccupied list */
305 thread
->l
.prev
= thread
;
306 thread
->l
.next
= thread
;
312 thread
->l
.prev
= l
->l
.prev
;
314 l
->l
.prev
->l
.next
= thread
;
318 /*---------------------------------------------------------------------------
319 * Removes a thread from a list of threads. Uses the "l" links.
320 *---------------------------------------------------------------------------
322 static void remove_from_list_l(struct thread_entry
**list
,
323 struct thread_entry
*thread
)
325 struct thread_entry
*prev
, *next
;
327 next
= thread
->l
.next
;
338 /* List becomes next item */
342 prev
= thread
->l
.prev
;
344 /* Fix links to jump over the removed entry. */
349 /*---------------------------------------------------------------------------
350 * Timeout list structure - circular reverse (to make "remove item" O(1)),
351 * NULL-terminated forward (to ease the far more common forward traversal):
352 * +------------------------------+
354 * +--+---+<-+---+<-+---+<-+---+<-+
355 * Head->| T | | T | | T | | T |
356 * +---+->+---+->+---+->+---+-X
357 *---------------------------------------------------------------------------
360 /*---------------------------------------------------------------------------
361 * Add a thread from the core's timout list by linking the pointers in its
363 *---------------------------------------------------------------------------
365 static void add_to_list_tmo(struct thread_entry
*thread
)
367 struct thread_entry
*tmo
= cores
[IF_COP_CORE(thread
->core
)].timeout
;
368 THREAD_ASSERT(thread
->tmo
.prev
== NULL
,
369 "add_to_list_tmo->already listed", thread
);
371 thread
->tmo
.next
= NULL
;
375 /* Insert into unoccupied list */
376 thread
->tmo
.prev
= thread
;
377 cores
[IF_COP_CORE(thread
->core
)].timeout
= thread
;
382 thread
->tmo
.prev
= tmo
->tmo
.prev
;
383 tmo
->tmo
.prev
->tmo
.next
= thread
;
384 tmo
->tmo
.prev
= thread
;
387 /*---------------------------------------------------------------------------
388 * Remove a thread from the core's timout list by unlinking the pointers in
389 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
391 *---------------------------------------------------------------------------
393 static void remove_from_list_tmo(struct thread_entry
*thread
)
395 struct thread_entry
**list
= &cores
[IF_COP_CORE(thread
->core
)].timeout
;
396 struct thread_entry
*prev
= thread
->tmo
.prev
;
397 struct thread_entry
*next
= thread
->tmo
.next
;
399 THREAD_ASSERT(prev
!= NULL
, "remove_from_list_tmo->not listed", thread
);
402 next
->tmo
.prev
= prev
;
406 /* List becomes next item and empty if next == NULL */
408 /* Mark as unlisted */
409 thread
->tmo
.prev
= NULL
;
414 (*list
)->tmo
.prev
= prev
;
415 prev
->tmo
.next
= next
;
416 /* Mark as unlisted */
417 thread
->tmo
.prev
= NULL
;
422 #ifdef HAVE_PRIORITY_SCHEDULING
423 /*---------------------------------------------------------------------------
424 * Priority distribution structure (one category for each possible priority):
426 * +----+----+----+ ... +-----+
427 * hist: | F0 | F1 | F2 | | F31 |
428 * +----+----+----+ ... +-----+
429 * mask: | b0 | b1 | b2 | | b31 |
430 * +----+----+----+ ... +-----+
432 * F = count of threads at priority category n (frequency)
433 * b = bitmask of non-zero priority categories (occupancy)
439 *---------------------------------------------------------------------------
440 * Basic priority inheritance priotocol (PIP):
442 * Mn = mutex n, Tn = thread n
444 * A lower priority thread inherits the priority of the highest priority
445 * thread blocked waiting for it to complete an action (such as release a
446 * mutex or respond to a message via queue_send):
450 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
451 * priority than T1 then T1 inherits the priority of T2.
457 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
458 * T1 inherits the higher of T2 and T3.
460 * 3) T3->M2->T2->M1->T1
462 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
463 * then T1 inherits the priority of T3 through T2.
465 * Blocking chains can grow arbitrarily complex (though it's best that they
466 * not form at all very often :) and build-up from these units.
467 *---------------------------------------------------------------------------
470 /*---------------------------------------------------------------------------
471 * Increment frequency at category "priority"
472 *---------------------------------------------------------------------------
474 static inline unsigned int prio_add_entry(
475 struct priority_distribution
*pd
, int priority
)
478 /* Enough size/instruction count difference for ARM makes it worth it to
479 * use different code (192 bytes for ARM). Only thing better is ASM. */
481 count
= pd
->hist
[priority
];
483 pd
->mask
|= 1 << priority
;
484 pd
->hist
[priority
] = count
;
485 #else /* This one's better for Coldfire */
486 if ((count
= ++pd
->hist
[priority
]) == 1)
487 pd
->mask
|= 1 << priority
;
493 /*---------------------------------------------------------------------------
494 * Decrement frequency at category "priority"
495 *---------------------------------------------------------------------------
497 static inline unsigned int prio_subtract_entry(
498 struct priority_distribution
*pd
, int priority
)
503 count
= pd
->hist
[priority
];
505 pd
->mask
&= ~(1 << priority
);
506 pd
->hist
[priority
] = count
;
508 if ((count
= --pd
->hist
[priority
]) == 0)
509 pd
->mask
&= ~(1 << priority
);
515 /*---------------------------------------------------------------------------
516 * Remove from one category and add to another
517 *---------------------------------------------------------------------------
519 static inline void prio_move_entry(
520 struct priority_distribution
*pd
, int from
, int to
)
522 uint32_t mask
= pd
->mask
;
527 count
= pd
->hist
[from
];
529 mask
&= ~(1 << from
);
530 pd
->hist
[from
] = count
;
532 count
= pd
->hist
[to
];
535 pd
->hist
[to
] = count
;
537 if (--pd
->hist
[from
] == 0)
538 mask
&= ~(1 << from
);
540 if (++pd
->hist
[to
] == 1)
547 /*---------------------------------------------------------------------------
548 * Change the priority and rtr entry for a running thread
549 *---------------------------------------------------------------------------
551 static inline void set_running_thread_priority(
552 struct thread_entry
*thread
, int priority
)
554 const unsigned int core
= IF_COP_CORE(thread
->core
);
556 rtr_move_entry(core
, thread
->priority
, priority
);
557 thread
->priority
= priority
;
561 /*---------------------------------------------------------------------------
562 * Finds the highest priority thread in a list of threads. If the list is
563 * empty, the PRIORITY_IDLE is returned.
565 * It is possible to use the struct priority_distribution within an object
566 * instead of scanning the remaining threads in the list but as a compromise,
567 * the resulting per-object memory overhead is saved at a slight speed
568 * penalty under high contention.
569 *---------------------------------------------------------------------------
571 static int find_highest_priority_in_list_l(
572 struct thread_entry
* const thread
)
574 if (LIKELY(thread
!= NULL
))
576 /* Go though list until the ending up at the initial thread */
577 int highest_priority
= thread
->priority
;
578 struct thread_entry
*curr
= thread
;
582 int priority
= curr
->priority
;
584 if (priority
< highest_priority
)
585 highest_priority
= priority
;
589 while (curr
!= thread
);
591 return highest_priority
;
594 return PRIORITY_IDLE
;
597 /*---------------------------------------------------------------------------
598 * Register priority with blocking system and bubble it down the chain if
599 * any until we reach the end or something is already equal or higher.
601 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
602 * targets but that same action also guarantees a circular block anyway and
603 * those are prevented, right? :-)
604 *---------------------------------------------------------------------------
606 static struct thread_entry
*
607 blocker_inherit_priority(struct thread_entry
*current
)
609 const int priority
= current
->priority
;
610 struct blocker
*bl
= current
->blocker
;
611 struct thread_entry
* const tstart
= current
;
612 struct thread_entry
*bl_t
= bl
->thread
;
614 /* Blocker cannot change since the object protection is held */
619 struct thread_entry
*next
;
620 int bl_pr
= bl
->priority
;
622 if (priority
>= bl_pr
)
623 break; /* Object priority already high enough */
625 bl
->priority
= priority
;
628 prio_add_entry(&bl_t
->pdist
, priority
);
630 if (bl_pr
< PRIORITY_IDLE
)
632 /* Not first waiter - subtract old one */
633 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
636 if (priority
>= bl_t
->priority
)
637 break; /* Thread priority high enough */
639 if (bl_t
->state
== STATE_RUNNING
)
641 /* Blocking thread is a running thread therefore there are no
642 * further blockers. Change the "run queue" on which it
644 set_running_thread_priority(bl_t
, priority
);
648 bl_t
->priority
= priority
;
650 /* If blocking thread has a blocker, apply transitive inheritance */
654 break; /* End of chain or object doesn't support inheritance */
658 if (UNLIKELY(next
== tstart
))
659 break; /* Full-circle - deadlock! */
661 UNLOCK_THREAD(current
);
668 /* Blocker could change - retest condition */
669 if (LIKELY(bl
->thread
== next
))
685 /*---------------------------------------------------------------------------
686 * Readjust priorities when waking a thread blocked waiting for another
687 * in essence "releasing" the thread's effect on the object owner. Can be
688 * performed from any context.
689 *---------------------------------------------------------------------------
691 struct thread_entry
*
692 wakeup_priority_protocol_release(struct thread_entry
*thread
)
694 const int priority
= thread
->priority
;
695 struct blocker
*bl
= thread
->blocker
;
696 struct thread_entry
* const tstart
= thread
;
697 struct thread_entry
*bl_t
= bl
->thread
;
699 /* Blocker cannot change since object will be locked */
702 thread
->blocker
= NULL
; /* Thread not blocked */
706 struct thread_entry
*next
;
707 int bl_pr
= bl
->priority
;
709 if (priority
> bl_pr
)
710 break; /* Object priority higher */
716 /* No more threads in queue */
717 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
718 bl
->priority
= PRIORITY_IDLE
;
722 /* Check list for highest remaining priority */
723 int queue_pr
= find_highest_priority_in_list_l(next
);
725 if (queue_pr
== bl_pr
)
726 break; /* Object priority not changing */
728 /* Change queue priority */
729 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
730 bl
->priority
= queue_pr
;
733 if (bl_pr
> bl_t
->priority
)
734 break; /* thread priority is higher */
736 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
738 if (bl_pr
== bl_t
->priority
)
739 break; /* Thread priority not changing */
741 if (bl_t
->state
== STATE_RUNNING
)
743 /* No further blockers */
744 set_running_thread_priority(bl_t
, bl_pr
);
748 bl_t
->priority
= bl_pr
;
750 /* If blocking thread has a blocker, apply transitive inheritance */
754 break; /* End of chain or object doesn't support inheritance */
758 if (UNLIKELY(next
== tstart
))
759 break; /* Full-circle - deadlock! */
761 UNLOCK_THREAD(thread
);
768 /* Blocker could change - retest condition */
769 if (LIKELY(bl
->thread
== next
))
783 if (UNLIKELY(thread
!= tstart
))
785 /* Relock original if it changed */
790 return cores
[CURRENT_CORE
].running
;
793 /*---------------------------------------------------------------------------
794 * Transfer ownership to a thread waiting for an objects and transfer
795 * inherited priority boost from other waiters. This algorithm knows that
796 * blocking chains may only unblock from the very end.
798 * Only the owning thread itself may call this and so the assumption that
799 * it is the running thread is made.
800 *---------------------------------------------------------------------------
802 struct thread_entry
*
803 wakeup_priority_protocol_transfer(struct thread_entry
*thread
)
805 /* Waking thread inherits priority boost from object owner */
806 struct blocker
*bl
= thread
->blocker
;
807 struct thread_entry
*bl_t
= bl
->thread
;
808 struct thread_entry
*next
;
811 THREAD_ASSERT(cores
[CURRENT_CORE
].running
== bl_t
,
812 "UPPT->wrong thread", cores
[CURRENT_CORE
].running
);
816 bl_pr
= bl
->priority
;
818 /* Remove the object's boost from the owning thread */
819 if (prio_subtract_entry(&bl_t
->pdist
, bl_pr
) == 0 &&
820 bl_pr
<= bl_t
->priority
)
822 /* No more threads at this priority are waiting and the old level is
823 * at least the thread level */
824 int priority
= find_first_set_bit(bl_t
->pdist
.mask
);
826 if (priority
!= bl_t
->priority
)
828 /* Adjust this thread's priority */
829 set_running_thread_priority(bl_t
, priority
);
835 if (LIKELY(next
== NULL
))
837 /* Expected shortcut - no more waiters */
838 bl_pr
= PRIORITY_IDLE
;
842 if (thread
->priority
<= bl_pr
)
844 /* Need to scan threads remaining in queue */
845 bl_pr
= find_highest_priority_in_list_l(next
);
848 if (prio_add_entry(&thread
->pdist
, bl_pr
) == 1 &&
849 bl_pr
< thread
->priority
)
851 /* Thread priority must be raised */
852 thread
->priority
= bl_pr
;
856 bl
->thread
= thread
; /* This thread pwns */
857 bl
->priority
= bl_pr
; /* Save highest blocked priority */
858 thread
->blocker
= NULL
; /* Thread not blocked */
865 /*---------------------------------------------------------------------------
866 * No threads must be blocked waiting for this thread except for it to exit.
867 * The alternative is more elaborate cleanup and object registration code.
868 * Check this for risk of silent data corruption when objects with
869 * inheritable blocking are abandoned by the owner - not precise but may
871 *---------------------------------------------------------------------------
873 static void __attribute__((noinline
)) check_for_obj_waiters(
874 const char *function
, struct thread_entry
*thread
)
876 /* Only one bit in the mask should be set with a frequency on 1 which
877 * represents the thread's own base priority */
878 uint32_t mask
= thread
->pdist
.mask
;
879 if ((mask
& (mask
- 1)) != 0 ||
880 thread
->pdist
.hist
[find_first_set_bit(mask
)] > 1)
882 unsigned char name
[32];
883 thread_get_name(name
, 32, thread
);
884 panicf("%s->%s with obj. waiters", function
, name
);
887 #endif /* HAVE_PRIORITY_SCHEDULING */
889 /*---------------------------------------------------------------------------
890 * Move a thread back to a running state on its core.
891 *---------------------------------------------------------------------------
893 static void core_schedule_wakeup(struct thread_entry
*thread
)
895 const unsigned int core
= IF_COP_CORE(thread
->core
);
899 thread
->state
= STATE_RUNNING
;
901 add_to_list_l(&cores
[core
].running
, thread
);
902 rtr_add_entry(core
, thread
->priority
);
907 if (core
!= CURRENT_CORE
)
912 /*---------------------------------------------------------------------------
913 * Check the core's timeout list when at least one thread is due to wake.
914 * Filtering for the condition is done before making the call. Resets the
915 * tick when the next check will occur.
916 *---------------------------------------------------------------------------
918 void check_tmo_threads(void)
920 const unsigned int core
= CURRENT_CORE
;
921 const long tick
= current_tick
; /* snapshot the current tick */
922 long next_tmo_check
= tick
+ 60*HZ
; /* minimum duration: once/minute */
923 struct thread_entry
*next
= cores
[core
].timeout
;
925 /* If there are no processes waiting for a timeout, just keep the check
926 tick from falling into the past. */
928 /* Break the loop once we have walked through the list of all
929 * sleeping processes or have removed them all. */
932 /* Check sleeping threads. Allow interrupts between checks. */
935 struct thread_entry
*curr
= next
;
937 next
= curr
->tmo
.next
;
939 /* Lock thread slot against explicit wakeup */
943 unsigned state
= curr
->state
;
945 if (state
< TIMEOUT_STATE_FIRST
)
947 /* Cleanup threads no longer on a timeout but still on the
949 remove_from_list_tmo(curr
);
951 else if (LIKELY(TIME_BEFORE(tick
, curr
->tmo_tick
)))
953 /* Timeout still pending - this will be the usual case */
954 if (TIME_BEFORE(curr
->tmo_tick
, next_tmo_check
))
956 /* Earliest timeout found so far - move the next check up
958 next_tmo_check
= curr
->tmo_tick
;
963 /* Sleep timeout has been reached so bring the thread back to
965 if (state
== STATE_BLOCKED_W_TMO
)
968 /* Lock the waiting thread's kernel object */
969 struct corelock
*ocl
= curr
->obj_cl
;
971 if (UNLIKELY(corelock_try_lock(ocl
) == 0))
973 /* Need to retry in the correct order though the need is
979 if (UNLIKELY(curr
->state
!= STATE_BLOCKED_W_TMO
))
981 /* Thread was woken or removed explicitely while slot
983 corelock_unlock(ocl
);
984 remove_from_list_tmo(curr
);
989 #endif /* NUM_CORES */
991 remove_from_list_l(curr
->bqp
, curr
);
993 #ifdef HAVE_WAKEUP_EXT_CB
994 if (curr
->wakeup_ext_cb
!= NULL
)
995 curr
->wakeup_ext_cb(curr
);
998 #ifdef HAVE_PRIORITY_SCHEDULING
999 if (curr
->blocker
!= NULL
)
1000 wakeup_priority_protocol_release(curr
);
1002 corelock_unlock(ocl
);
1004 /* else state == STATE_SLEEPING */
1006 remove_from_list_tmo(curr
);
1010 curr
->state
= STATE_RUNNING
;
1012 add_to_list_l(&cores
[core
].running
, curr
);
1013 rtr_add_entry(core
, curr
->priority
);
1018 UNLOCK_THREAD(curr
);
1021 cores
[core
].next_tmo_check
= next_tmo_check
;
1024 /*---------------------------------------------------------------------------
1025 * Performs operations that must be done before blocking a thread but after
1026 * the state is saved.
1027 *---------------------------------------------------------------------------
1030 static inline void run_blocking_ops(
1031 unsigned int core
, struct thread_entry
*thread
)
1033 struct thread_blk_ops
*ops
= &cores
[core
].blk_ops
;
1034 const unsigned flags
= ops
->flags
;
1036 if (LIKELY(flags
== TBOP_CLEAR
))
1041 case TBOP_SWITCH_CORE
:
1042 core_switch_blk_op(core
, thread
);
1044 case TBOP_UNLOCK_CORELOCK
:
1045 corelock_unlock(ops
->cl_p
);
1049 ops
->flags
= TBOP_CLEAR
;
1051 #endif /* NUM_CORES > 1 */
1054 void profile_thread(void)
1056 profstart(cores
[CURRENT_CORE
].running
- threads
);
1060 /*---------------------------------------------------------------------------
1061 * Prepares a thread to block on an object's list and/or for a specified
1062 * duration - expects object and slot to be appropriately locked if needed
1063 * and interrupts to be masked.
1064 *---------------------------------------------------------------------------
1066 static inline void block_thread_on_l(struct thread_entry
*thread
,
1069 /* If inlined, unreachable branches will be pruned with no size penalty
1070 because state is passed as a constant parameter. */
1071 const unsigned int core
= IF_COP_CORE(thread
->core
);
1073 /* Remove the thread from the list of running threads. */
1075 remove_from_list_l(&cores
[core
].running
, thread
);
1076 rtr_subtract_entry(core
, thread
->priority
);
1079 /* Add a timeout to the block if not infinite */
1083 case STATE_BLOCKED_W_TMO
:
1084 /* Put the thread into a new list of inactive threads. */
1085 add_to_list_l(thread
->bqp
, thread
);
1087 if (state
== STATE_BLOCKED
)
1091 case STATE_SLEEPING
:
1092 /* If this thread times out sooner than any other thread, update
1093 next_tmo_check to its timeout */
1094 if (TIME_BEFORE(thread
->tmo_tick
, cores
[core
].next_tmo_check
))
1096 cores
[core
].next_tmo_check
= thread
->tmo_tick
;
1099 if (thread
->tmo
.prev
== NULL
)
1101 add_to_list_tmo(thread
);
1103 /* else thread was never removed from list - just keep it there */
1107 /* Remember the the next thread about to block. */
1108 cores
[core
].block_task
= thread
;
1110 /* Report new state. */
1111 thread
->state
= state
;
1114 /*---------------------------------------------------------------------------
1115 * Switch thread in round robin fashion for any given priority. Any thread
1116 * that removed itself from the running list first must specify itself in
1119 * INTERNAL: Intended for use by kernel and not for programs.
1120 *---------------------------------------------------------------------------
1122 void switch_thread(void)
1125 const unsigned int core
= CURRENT_CORE
;
1126 struct thread_entry
*block
= cores
[core
].block_task
;
1127 struct thread_entry
*thread
= cores
[core
].running
;
1129 /* Get context to save - next thread to run is unknown until all wakeups
1133 cores
[core
].block_task
= NULL
;
1136 if (UNLIKELY(thread
== block
))
1138 /* This was the last thread running and another core woke us before
1139 * reaching here. Force next thread selection to give tmo threads or
1140 * other threads woken before this block a first chance. */
1146 /* Blocking task is the old one */
1152 profile_thread_stopped(thread
->id
& THREAD_ID_SLOT_MASK
);
1155 /* Begin task switching by saving our current context so that we can
1156 * restore the state of the current thread later to the point prior
1158 store_context(&thread
->context
);
1160 /* Check if the current thread stack is overflown */
1161 if (UNLIKELY(thread
->stack
[0] != DEADBEEF
) && thread
->stack_size
> 0)
1162 thread_stkov(thread
);
1165 /* Run any blocking operations requested before switching/sleeping */
1166 run_blocking_ops(core
, thread
);
1169 #ifdef HAVE_PRIORITY_SCHEDULING
1170 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1171 /* Reset the value of thread's skip count */
1172 thread
->skip_count
= 0;
1177 /* If there are threads on a timeout and the earliest wakeup is due,
1178 * check the list and wake any threads that need to start running
1180 if (!TIME_BEFORE(current_tick
, cores
[core
].next_tmo_check
))
1182 check_tmo_threads();
1188 thread
= cores
[core
].running
;
1190 if (UNLIKELY(thread
== NULL
))
1192 /* Enter sleep mode to reduce power usage - woken up on interrupt
1193 * or wakeup request from another core - expected to enable
1196 core_sleep(IF_COP(core
));
1200 #ifdef HAVE_PRIORITY_SCHEDULING
1201 /* Select the new task based on priorities and the last time a
1202 * process got CPU time relative to the highest priority runnable
1204 struct priority_distribution
*pd
= &cores
[core
].rtr
;
1205 int max
= find_first_set_bit(pd
->mask
);
1209 /* Not switching on a block, tentatively select next thread */
1210 thread
= thread
->l
.next
;
1215 int priority
= thread
->priority
;
1218 /* This ridiculously simple method of aging seems to work
1219 * suspiciously well. It does tend to reward CPU hogs (under
1220 * yielding) but that's generally not desirable at all. On
1221 * the plus side, it, relatively to other threads, penalizes
1222 * excess yielding which is good if some high priority thread
1223 * is performing no useful work such as polling for a device
1224 * to be ready. Of course, aging is only employed when higher
1225 * and lower priority threads are runnable. The highest
1226 * priority runnable thread(s) are never skipped unless a
1227 * lower-priority process has aged sufficiently. Priorities
1228 * of REALTIME class are run strictly according to priority
1229 * thus are not subject to switchout due to lower-priority
1230 * processes aging; they must give up the processor by going
1231 * off the run list. */
1232 if (LIKELY(priority
<= max
) ||
1233 IF_NO_SKIP_YIELD( thread
->skip_count
== -1 || )
1234 (priority
> PRIORITY_REALTIME
&&
1235 (diff
= priority
- max
,
1236 ++thread
->skip_count
> diff
*diff
)))
1238 cores
[core
].running
= thread
;
1242 thread
= thread
->l
.next
;
1245 /* Without priority use a simple FCFS algorithm */
1248 /* Not switching on a block, select next thread */
1249 thread
= thread
->l
.next
;
1250 cores
[core
].running
= thread
;
1252 #endif /* HAVE_PRIORITY_SCHEDULING */
1260 /* And finally give control to the next thread. */
1261 load_context(&thread
->context
);
1264 profile_thread_started(thread
->id
& THREAD_ID_SLOT_MASK
);
1269 /*---------------------------------------------------------------------------
1270 * Sleeps a thread for at least a specified number of ticks with zero being
1271 * a wait until the next tick.
1273 * INTERNAL: Intended for use by kernel and not for programs.
1274 *---------------------------------------------------------------------------
1276 void sleep_thread(int ticks
)
1278 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1280 LOCK_THREAD(current
);
1282 /* Set our timeout, remove from run list and join timeout list. */
1283 current
->tmo_tick
= current_tick
+ ticks
+ 1;
1284 block_thread_on_l(current
, STATE_SLEEPING
);
1286 UNLOCK_THREAD(current
);
1289 /*---------------------------------------------------------------------------
1290 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1292 * INTERNAL: Intended for use by kernel objects and not for programs.
1293 *---------------------------------------------------------------------------
1295 void block_thread(struct thread_entry
*current
)
1297 /* Set the state to blocked and take us off of the run queue until we
1298 * are explicitly woken */
1299 LOCK_THREAD(current
);
1301 /* Set the list for explicit wakeup */
1302 block_thread_on_l(current
, STATE_BLOCKED
);
1304 #ifdef HAVE_PRIORITY_SCHEDULING
1305 if (current
->blocker
!= NULL
)
1307 /* Object supports PIP */
1308 current
= blocker_inherit_priority(current
);
1312 UNLOCK_THREAD(current
);
1315 /*---------------------------------------------------------------------------
1316 * Block a thread on a blocking queue for a specified time interval or until
1317 * explicitly woken - whichever happens first.
1319 * INTERNAL: Intended for use by kernel objects and not for programs.
1320 *---------------------------------------------------------------------------
1322 void block_thread_w_tmo(struct thread_entry
*current
, int timeout
)
1324 /* Get the entry for the current running thread. */
1325 LOCK_THREAD(current
);
1327 /* Set the state to blocked with the specified timeout */
1328 current
->tmo_tick
= current_tick
+ timeout
;
1330 /* Set the list for explicit wakeup */
1331 block_thread_on_l(current
, STATE_BLOCKED_W_TMO
);
1333 #ifdef HAVE_PRIORITY_SCHEDULING
1334 if (current
->blocker
!= NULL
)
1336 /* Object supports PIP */
1337 current
= blocker_inherit_priority(current
);
1341 UNLOCK_THREAD(current
);
1344 /*---------------------------------------------------------------------------
1345 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1346 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1348 * This code should be considered a critical section by the caller meaning
1349 * that the object's corelock should be held.
1351 * INTERNAL: Intended for use by kernel objects and not for programs.
1352 *---------------------------------------------------------------------------
1354 unsigned int wakeup_thread(struct thread_entry
**list
)
1356 struct thread_entry
*thread
= *list
;
1357 unsigned int result
= THREAD_NONE
;
1359 /* Check if there is a blocked thread at all. */
1363 LOCK_THREAD(thread
);
1365 /* Determine thread's current state. */
1366 switch (thread
->state
)
1369 case STATE_BLOCKED_W_TMO
:
1370 remove_from_list_l(list
, thread
);
1374 #ifdef HAVE_PRIORITY_SCHEDULING
1375 struct thread_entry
*current
;
1376 struct blocker
*bl
= thread
->blocker
;
1380 /* No inheritance - just boost the thread by aging */
1381 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1382 thread
->skip_count
= thread
->priority
;
1383 current
= cores
[CURRENT_CORE
].running
;
1387 /* Call the specified unblocking PIP */
1388 current
= bl
->wakeup_protocol(thread
);
1391 if (current
!= NULL
&&
1392 find_first_set_bit(cores
[IF_COP_CORE(current
->core
)].rtr
.mask
)
1393 < current
->priority
)
1395 /* There is a thread ready to run of higher or same priority on
1396 * the same core as the current one; recommend a task switch.
1397 * Knowing if this is an interrupt call would be helpful here. */
1398 result
|= THREAD_SWITCH
;
1400 #endif /* HAVE_PRIORITY_SCHEDULING */
1402 core_schedule_wakeup(thread
);
1405 /* Nothing to do. State is not blocked. */
1406 #if THREAD_EXTRA_CHECKS
1408 THREAD_PANICF("wakeup_thread->block invalid", thread
);
1415 UNLOCK_THREAD(thread
);
1419 /*---------------------------------------------------------------------------
1420 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1421 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1422 * the queue must be locked first.
1424 * INTERNAL: Intended for use by kernel objects and not for programs.
1425 *---------------------------------------------------------------------------
1427 unsigned int thread_queue_wake(struct thread_entry
**list
)
1429 unsigned result
= THREAD_NONE
;
1433 unsigned int rc
= wakeup_thread(list
);
1435 if (rc
== THREAD_NONE
)
1436 break; /* No more threads */
1444 /*---------------------------------------------------------------------------
1445 * Assign the thread slot a new ID. Version is 1-255.
1446 *---------------------------------------------------------------------------
1448 static void new_thread_id(unsigned int slot_num
,
1449 struct thread_entry
*thread
)
1451 unsigned int version
=
1452 (thread
->id
+ (1u << THREAD_ID_VERSION_SHIFT
))
1453 & THREAD_ID_VERSION_MASK
;
1455 /* If wrapped to 0, make it 1 */
1457 version
= 1u << THREAD_ID_VERSION_SHIFT
;
1459 thread
->id
= version
| (slot_num
& THREAD_ID_SLOT_MASK
);
1462 /*---------------------------------------------------------------------------
1463 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1464 * will be locked on multicore.
1465 *---------------------------------------------------------------------------
1467 static struct thread_entry
* find_empty_thread_slot(void)
1469 /* Any slot could be on an interrupt-accessible list */
1470 IF_COP( int oldlevel
= disable_irq_save(); )
1471 struct thread_entry
*thread
= NULL
;
1474 for (n
= 0; n
< MAXTHREADS
; n
++)
1476 /* Obtain current slot state - lock it on multicore */
1477 struct thread_entry
*t
= &threads
[n
];
1480 if (t
->state
== STATE_KILLED
IF_COP( && t
->name
!= THREAD_DESTRUCT
))
1482 /* Slot is empty - leave it locked and caller will unlock */
1487 /* Finished examining slot - no longer busy - unlock on multicore */
1491 IF_COP( restore_irq(oldlevel
); ) /* Reenable interrups - this slot is
1492 not accesible to them yet */
1496 /*---------------------------------------------------------------------------
1497 * Return the thread_entry pointer for a thread_id. Return the current
1498 * thread if the ID is 0 (alias for current).
1499 *---------------------------------------------------------------------------
1501 struct thread_entry
* thread_id_entry(unsigned int thread_id
)
1503 return (thread_id
== THREAD_ID_CURRENT
) ?
1504 cores
[CURRENT_CORE
].running
:
1505 &threads
[thread_id
& THREAD_ID_SLOT_MASK
];
1508 /*---------------------------------------------------------------------------
1509 * Place the current core in idle mode - woken up on interrupt or wake
1510 * request from another core.
1511 *---------------------------------------------------------------------------
1513 void core_idle(void)
1515 IF_COP( const unsigned int core
= CURRENT_CORE
; )
1517 core_sleep(IF_COP(core
));
1520 /*---------------------------------------------------------------------------
1521 * Create a thread. If using a dual core architecture, specify which core to
1522 * start the thread on.
1524 * Return ID if context area could be allocated, else NULL.
1525 *---------------------------------------------------------------------------
1527 unsigned int create_thread(void (*function
)(void),
1528 void* stack
, size_t stack_size
,
1529 unsigned flags
, const char *name
1530 IF_PRIO(, int priority
)
1531 IF_COP(, unsigned int core
))
1534 unsigned int stack_words
;
1535 uintptr_t stackptr
, stackend
;
1536 struct thread_entry
*thread
;
1540 thread
= find_empty_thread_slot();
1546 oldlevel
= disable_irq_save();
1548 /* Munge the stack to make it easy to spot stack overflows */
1549 stackptr
= ALIGN_UP((uintptr_t)stack
, sizeof (uintptr_t));
1550 stackend
= ALIGN_DOWN((uintptr_t)stack
+ stack_size
, sizeof (uintptr_t));
1551 stack_size
= stackend
- stackptr
;
1552 stack_words
= stack_size
/ sizeof (uintptr_t);
1554 for (i
= 0; i
< stack_words
; i
++)
1556 ((uintptr_t *)stackptr
)[i
] = DEADBEEF
;
1559 /* Store interesting information */
1560 thread
->name
= name
;
1561 thread
->stack
= (uintptr_t *)stackptr
;
1562 thread
->stack_size
= stack_size
;
1563 thread
->queue
= NULL
;
1564 #ifdef HAVE_WAKEUP_EXT_CB
1565 thread
->wakeup_ext_cb
= NULL
;
1567 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1568 thread
->cpu_boost
= 0;
1570 #ifdef HAVE_PRIORITY_SCHEDULING
1571 memset(&thread
->pdist
, 0, sizeof(thread
->pdist
));
1572 thread
->blocker
= NULL
;
1573 thread
->base_priority
= priority
;
1574 thread
->priority
= priority
;
1575 thread
->skip_count
= priority
;
1576 prio_add_entry(&thread
->pdist
, priority
);
1579 #ifdef HAVE_IO_PRIORITY
1580 /* Default to high (foreground) priority */
1581 thread
->io_priority
= IO_PRIORITY_IMMEDIATE
;
1585 thread
->core
= core
;
1587 /* Writeback stack munging or anything else before starting */
1588 if (core
!= CURRENT_CORE
)
1594 /* Thread is not on any timeout list but be a bit paranoid */
1595 thread
->tmo
.prev
= NULL
;
1597 state
= (flags
& CREATE_THREAD_FROZEN
) ?
1598 STATE_FROZEN
: STATE_RUNNING
;
1600 thread
->context
.sp
= (typeof (thread
->context
.sp
))stackend
;
1602 /* Load the thread's context structure with needed startup information */
1603 THREAD_STARTUP_INIT(core
, thread
, function
);
1605 thread
->state
= state
;
1606 i
= thread
->id
; /* Snapshot while locked */
1608 if (state
== STATE_RUNNING
)
1609 core_schedule_wakeup(thread
);
1611 UNLOCK_THREAD(thread
);
1612 restore_irq(oldlevel
);
1617 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1618 /*---------------------------------------------------------------------------
1619 * Change the boost state of a thread boosting or unboosting the CPU
1621 *---------------------------------------------------------------------------
1623 static inline void boost_thread(struct thread_entry
*thread
, bool boost
)
1625 if ((thread
->cpu_boost
!= 0) != boost
)
1627 thread
->cpu_boost
= boost
;
1632 void trigger_cpu_boost(void)
1634 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1635 boost_thread(current
, true);
1638 void cancel_cpu_boost(void)
1640 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1641 boost_thread(current
, false);
1643 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
1645 /*---------------------------------------------------------------------------
1646 * Block the current thread until another thread terminates. A thread may
1647 * wait on itself to terminate which prevents it from running again and it
1648 * will need to be killed externally.
1649 * Parameter is the ID as returned from create_thread().
1650 *---------------------------------------------------------------------------
1652 void thread_wait(unsigned int thread_id
)
1654 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1655 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1657 /* Lock thread-as-waitable-object lock */
1658 corelock_lock(&thread
->waiter_cl
);
1660 /* Be sure it hasn't been killed yet */
1661 if (thread_id
== THREAD_ID_CURRENT
||
1662 (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
))
1664 IF_COP( current
->obj_cl
= &thread
->waiter_cl
; )
1665 current
->bqp
= &thread
->queue
;
1668 block_thread(current
);
1670 corelock_unlock(&thread
->waiter_cl
);
1676 corelock_unlock(&thread
->waiter_cl
);
1679 /*---------------------------------------------------------------------------
1680 * Exit the current thread. The Right Way to Do Things (TM).
1681 *---------------------------------------------------------------------------
1683 /* This is done to foil optimizations that may require the current stack,
1684 * such as optimizing subexpressions that put variables on the stack that
1685 * get used after switching stacks. */
1687 /* Called by ASM stub */
1688 static void thread_final_exit_do(struct thread_entry
*current
)
1690 /* No special procedure is required before calling */
1691 static inline void thread_final_exit(struct thread_entry
*current
)
1694 /* At this point, this thread isn't using resources allocated for
1695 * execution except the slot itself. */
1697 /* Signal this thread */
1698 thread_queue_wake(¤t
->queue
);
1699 corelock_unlock(¤t
->waiter_cl
);
1701 /* This should never and must never be reached - if it is, the
1702 * state is corrupted */
1703 THREAD_PANICF("thread_exit->K:*R", current
);
1707 void thread_exit(void)
1709 register struct thread_entry
* current
= cores
[CURRENT_CORE
].running
;
1711 /* Cancel CPU boost if any */
1716 corelock_lock(¤t
->waiter_cl
);
1717 LOCK_THREAD(current
);
1719 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1720 if (current
->name
== THREAD_DESTRUCT
)
1722 /* Thread being killed - become a waiter */
1723 unsigned int id
= current
->id
;
1724 UNLOCK_THREAD(current
);
1725 corelock_unlock(¤t
->waiter_cl
);
1727 THREAD_PANICF("thread_exit->WK:*R", current
);
1731 #ifdef HAVE_PRIORITY_SCHEDULING
1732 check_for_obj_waiters("thread_exit", current
);
1735 if (current
->tmo
.prev
!= NULL
)
1737 /* Cancel pending timeout list removal */
1738 remove_from_list_tmo(current
);
1741 /* Switch tasks and never return */
1742 block_thread_on_l(current
, STATE_KILLED
);
1744 /* Slot must be unusable until thread is really gone */
1745 UNLOCK_THREAD_AT_TASK_SWITCH(current
);
1747 /* Update ID for this slot */
1748 new_thread_id(current
->id
, current
);
1749 current
->name
= NULL
;
1751 /* Do final cleanup and remove the thread */
1752 thread_final_exit(current
);
1755 #ifdef ALLOW_REMOVE_THREAD
1756 /*---------------------------------------------------------------------------
1757 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1760 * Parameter is the ID as returned from create_thread().
1762 * Use with care on threads that are not under careful control as this may
1763 * leave various objects in an undefined state.
1764 *---------------------------------------------------------------------------
1766 void remove_thread(unsigned int thread_id
)
1769 /* core is not constant here because of core switching */
1770 unsigned int core
= CURRENT_CORE
;
1771 unsigned int old_core
= NUM_CORES
;
1772 struct corelock
*ocl
= NULL
;
1774 const unsigned int core
= CURRENT_CORE
;
1776 struct thread_entry
*current
= cores
[core
].running
;
1777 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1782 if (thread
== current
)
1783 thread_exit(); /* Current thread - do normal exit */
1785 oldlevel
= disable_irq_save();
1787 corelock_lock(&thread
->waiter_cl
);
1788 LOCK_THREAD(thread
);
1790 state
= thread
->state
;
1792 if (thread
->id
!= thread_id
|| state
== STATE_KILLED
)
1796 if (thread
->name
== THREAD_DESTRUCT
)
1798 /* Thread being killed - become a waiter */
1799 UNLOCK_THREAD(thread
);
1800 corelock_unlock(&thread
->waiter_cl
);
1801 restore_irq(oldlevel
);
1802 thread_wait(thread_id
);
1806 thread
->name
= THREAD_DESTRUCT
; /* Slot can't be used for now */
1808 #ifdef HAVE_PRIORITY_SCHEDULING
1809 check_for_obj_waiters("remove_thread", thread
);
1812 if (thread
->core
!= core
)
1814 /* Switch cores and safely extract the thread there */
1815 /* Slot HAS to be unlocked or a deadlock could occur which means other
1816 * threads have to be guided into becoming thread waiters if they
1817 * attempt to remove it. */
1818 unsigned int new_core
= thread
->core
;
1820 corelock_unlock(&thread
->waiter_cl
);
1822 UNLOCK_THREAD(thread
);
1823 restore_irq(oldlevel
);
1825 old_core
= switch_core(new_core
);
1827 oldlevel
= disable_irq_save();
1829 corelock_lock(&thread
->waiter_cl
);
1830 LOCK_THREAD(thread
);
1832 state
= thread
->state
;
1834 /* Perform the extraction and switch ourselves back to the original
1837 #endif /* NUM_CORES > 1 */
1839 if (thread
->tmo
.prev
!= NULL
)
1841 /* Clean thread off the timeout list if a timeout check hasn't
1843 remove_from_list_tmo(thread
);
1846 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1847 /* Cancel CPU boost if any */
1848 boost_thread(thread
, false);
1851 IF_COP( retry_state
: )
1857 /* Remove thread from ready to run tasks */
1858 remove_from_list_l(&cores
[core
].running
, thread
);
1859 rtr_subtract_entry(core
, thread
->priority
);
1863 case STATE_BLOCKED_W_TMO
:
1864 /* Remove thread from the queue it's blocked on - including its
1865 * own if waiting there */
1867 if (&thread
->waiter_cl
!= thread
->obj_cl
)
1869 ocl
= thread
->obj_cl
;
1871 if (UNLIKELY(corelock_try_lock(ocl
) == 0))
1873 UNLOCK_THREAD(thread
);
1875 LOCK_THREAD(thread
);
1877 if (UNLIKELY(thread
->state
!= state
))
1879 /* Something woke the thread */
1880 state
= thread
->state
;
1881 corelock_unlock(ocl
);
1887 remove_from_list_l(thread
->bqp
, thread
);
1889 #ifdef HAVE_WAKEUP_EXT_CB
1890 if (thread
->wakeup_ext_cb
!= NULL
)
1891 thread
->wakeup_ext_cb(thread
);
1894 #ifdef HAVE_PRIORITY_SCHEDULING
1895 if (thread
->blocker
!= NULL
)
1897 /* Remove thread's priority influence from its chain */
1898 wakeup_priority_protocol_release(thread
);
1904 corelock_unlock(ocl
);
1907 /* Otherwise thread is frozen and hasn't run yet */
1910 new_thread_id(thread_id
, thread
);
1911 thread
->state
= STATE_KILLED
;
1913 /* If thread was waiting on itself, it will have been removed above.
1914 * The wrong order would result in waking the thread first and deadlocking
1915 * since the slot is already locked. */
1916 thread_queue_wake(&thread
->queue
);
1918 thread
->name
= NULL
;
1920 thread_killed
: /* Thread was already killed */
1921 /* Removal complete - safe to unlock and reenable interrupts */
1922 corelock_unlock(&thread
->waiter_cl
);
1923 UNLOCK_THREAD(thread
);
1924 restore_irq(oldlevel
);
1927 if (old_core
< NUM_CORES
)
1929 /* Did a removal on another processor's thread - switch back to
1931 switch_core(old_core
);
1935 #endif /* ALLOW_REMOVE_THREAD */
1937 #ifdef HAVE_PRIORITY_SCHEDULING
1938 /*---------------------------------------------------------------------------
1939 * Sets the thread's relative base priority for the core it runs on. Any
1940 * needed inheritance changes also may happen.
1941 *---------------------------------------------------------------------------
1943 int thread_set_priority(unsigned int thread_id
, int priority
)
1945 int old_base_priority
= -1;
1946 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1948 /* A little safety measure */
1949 if (priority
< HIGHEST_PRIORITY
|| priority
> LOWEST_PRIORITY
)
1952 /* Thread could be on any list and therefore on an interrupt accessible
1953 one - disable interrupts */
1954 int oldlevel
= disable_irq_save();
1956 LOCK_THREAD(thread
);
1958 /* Make sure it's not killed */
1959 if (thread_id
== THREAD_ID_CURRENT
||
1960 (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
))
1962 int old_priority
= thread
->priority
;
1964 old_base_priority
= thread
->base_priority
;
1965 thread
->base_priority
= priority
;
1967 prio_move_entry(&thread
->pdist
, old_base_priority
, priority
);
1968 priority
= find_first_set_bit(thread
->pdist
.mask
);
1970 if (old_priority
== priority
)
1972 /* No priority change - do nothing */
1974 else if (thread
->state
== STATE_RUNNING
)
1976 /* This thread is running - change location on the run
1977 * queue. No transitive inheritance needed. */
1978 set_running_thread_priority(thread
, priority
);
1982 thread
->priority
= priority
;
1984 if (thread
->blocker
!= NULL
)
1986 /* Bubble new priority down the chain */
1987 struct blocker
*bl
= thread
->blocker
; /* Blocker struct */
1988 struct thread_entry
*bl_t
= bl
->thread
; /* Blocking thread */
1989 struct thread_entry
* const tstart
= thread
; /* Initial thread */
1990 const int highest
= MIN(priority
, old_priority
); /* Higher of new or old */
1994 struct thread_entry
*next
; /* Next thread to check */
1995 int bl_pr
; /* Highest blocked thread */
1996 int queue_pr
; /* New highest blocked thread */
1998 /* Owner can change but thread cannot be dislodged - thread
1999 * may not be the first in the queue which allows other
2000 * threads ahead in the list to be given ownership during the
2001 * operation. If thread is next then the waker will have to
2002 * wait for us and the owner of the object will remain fixed.
2003 * If we successfully grab the owner -- which at some point
2004 * is guaranteed -- then the queue remains fixed until we
2010 /* Double-check the owner - retry if it changed */
2011 if (LIKELY(bl
->thread
== bl_t
))
2014 UNLOCK_THREAD(bl_t
);
2018 bl_pr
= bl
->priority
;
2020 if (highest
> bl_pr
)
2021 break; /* Object priority won't change */
2023 /* This will include the thread being set */
2024 queue_pr
= find_highest_priority_in_list_l(*thread
->bqp
);
2026 if (queue_pr
== bl_pr
)
2027 break; /* Object priority not changing */
2029 /* Update thread boost for this object */
2030 bl
->priority
= queue_pr
;
2031 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
2032 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
2034 if (bl_t
->priority
== bl_pr
)
2035 break; /* Blocking thread priority not changing */
2037 if (bl_t
->state
== STATE_RUNNING
)
2039 /* Thread not blocked - we're done */
2040 set_running_thread_priority(bl_t
, bl_pr
);
2044 bl_t
->priority
= bl_pr
;
2045 bl
= bl_t
->blocker
; /* Blocking thread has a blocker? */
2048 break; /* End of chain */
2052 if (UNLIKELY(next
== tstart
))
2053 break; /* Full-circle */
2055 UNLOCK_THREAD(thread
);
2061 UNLOCK_THREAD(bl_t
);
2066 UNLOCK_THREAD(thread
);
2068 restore_irq(oldlevel
);
2070 return old_base_priority
;
2073 /*---------------------------------------------------------------------------
2074 * Returns the current base priority for a thread.
2075 *---------------------------------------------------------------------------
2077 int thread_get_priority(unsigned int thread_id
)
2079 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2080 int base_priority
= thread
->base_priority
;
2082 /* Simply check without locking slot. It may or may not be valid by the
2083 * time the function returns anyway. If all tests pass, it is the
2084 * correct value for when it was valid. */
2085 if (thread_id
!= THREAD_ID_CURRENT
&&
2086 (thread
->id
!= thread_id
|| thread
->state
== STATE_KILLED
))
2089 return base_priority
;
2091 #endif /* HAVE_PRIORITY_SCHEDULING */
2093 #ifdef HAVE_IO_PRIORITY
2094 int thread_get_io_priority(unsigned int thread_id
)
2096 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2097 return thread
->io_priority
;
2100 void thread_set_io_priority(unsigned int thread_id
,int io_priority
)
2102 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2103 thread
->io_priority
= io_priority
;
2107 /*---------------------------------------------------------------------------
2108 * Starts a frozen thread - similar semantics to wakeup_thread except that
2109 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2110 * virtue of the slot having a state of STATE_FROZEN.
2111 *---------------------------------------------------------------------------
2113 void thread_thaw(unsigned int thread_id
)
2115 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2116 int oldlevel
= disable_irq_save();
2118 LOCK_THREAD(thread
);
2120 /* If thread is the current one, it cannot be frozen, therefore
2121 * there is no need to check that. */
2122 if (thread
->id
== thread_id
&& thread
->state
== STATE_FROZEN
)
2123 core_schedule_wakeup(thread
);
2125 UNLOCK_THREAD(thread
);
2126 restore_irq(oldlevel
);
2129 /*---------------------------------------------------------------------------
2130 * Return the ID of the currently executing thread.
2131 *---------------------------------------------------------------------------
2133 unsigned int thread_get_current(void)
2135 return cores
[CURRENT_CORE
].running
->id
;
2139 /*---------------------------------------------------------------------------
2140 * Switch the processor that the currently executing thread runs on.
2141 *---------------------------------------------------------------------------
2143 unsigned int switch_core(unsigned int new_core
)
2145 const unsigned int core
= CURRENT_CORE
;
2146 struct thread_entry
*current
= cores
[core
].running
;
2148 if (core
== new_core
)
2150 /* No change - just return same core */
2154 int oldlevel
= disable_irq_save();
2155 LOCK_THREAD(current
);
2157 if (current
->name
== THREAD_DESTRUCT
)
2159 /* Thread being killed - deactivate and let process complete */
2160 unsigned int id
= current
->id
;
2161 UNLOCK_THREAD(current
);
2162 restore_irq(oldlevel
);
2164 /* Should never be reached */
2165 THREAD_PANICF("switch_core->D:*R", current
);
2168 /* Get us off the running list for the current core */
2170 remove_from_list_l(&cores
[core
].running
, current
);
2171 rtr_subtract_entry(core
, current
->priority
);
2174 /* Stash return value (old core) in a safe place */
2175 current
->retval
= core
;
2177 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2178 * the other core will likely attempt a removal from the wrong list! */
2179 if (current
->tmo
.prev
!= NULL
)
2181 remove_from_list_tmo(current
);
2184 /* Change the core number for this thread slot */
2185 current
->core
= new_core
;
2187 /* Do not use core_schedule_wakeup here since this will result in
2188 * the thread starting to run on the other core before being finished on
2189 * this one. Delay the list unlock to keep the other core stuck
2190 * until this thread is ready. */
2193 rtr_add_entry(new_core
, current
->priority
);
2194 add_to_list_l(&cores
[new_core
].running
, current
);
2196 /* Make a callback into device-specific code, unlock the wakeup list so
2197 * that execution may resume on the new core, unlock our slot and finally
2198 * restore the interrupt level */
2199 cores
[core
].blk_ops
.flags
= TBOP_SWITCH_CORE
;
2200 cores
[core
].blk_ops
.cl_p
= &cores
[new_core
].rtr_cl
;
2201 cores
[core
].block_task
= current
;
2203 UNLOCK_THREAD(current
);
2205 /* Alert other core to activity */
2206 core_wake(new_core
);
2208 /* Do the stack switching, cache_maintenence and switch_thread call -
2209 requires native code */
2210 switch_thread_core(core
, current
);
2212 /* Finally return the old core to caller */
2213 return current
->retval
;
2215 #endif /* NUM_CORES > 1 */
2217 /*---------------------------------------------------------------------------
2218 * Initialize threading API. This assumes interrupts are not yet enabled. On
2219 * multicore setups, no core is allowed to proceed until create_thread calls
2220 * are safe to perform.
2221 *---------------------------------------------------------------------------
2223 void init_threads(void)
2225 const unsigned int core
= CURRENT_CORE
;
2226 struct thread_entry
*thread
;
2230 /* Initialize core locks and IDs in all slots */
2232 for (n
= 0; n
< MAXTHREADS
; n
++)
2234 thread
= &threads
[n
];
2235 corelock_init(&thread
->waiter_cl
);
2236 corelock_init(&thread
->slot_cl
);
2237 thread
->id
= THREAD_ID_INIT(n
);
2241 /* CPU will initialize first and then sleep */
2242 thread
= find_empty_thread_slot();
2246 /* WTF? There really must be a slot available at this stage.
2247 * This can fail if, for example, .bss isn't zero'ed out by the loader
2248 * or threads is in the wrong section. */
2249 THREAD_PANICF("init_threads->no slot", NULL
);
2252 /* Initialize initially non-zero members of core */
2253 cores
[core
].next_tmo_check
= current_tick
; /* Something not in the past */
2255 /* Initialize initially non-zero members of slot */
2256 UNLOCK_THREAD(thread
); /* No sync worries yet */
2257 thread
->name
= main_thread_name
;
2258 thread
->state
= STATE_RUNNING
;
2259 IF_COP( thread
->core
= core
; )
2260 #ifdef HAVE_PRIORITY_SCHEDULING
2261 corelock_init(&cores
[core
].rtr_cl
);
2262 thread
->base_priority
= PRIORITY_USER_INTERFACE
;
2263 prio_add_entry(&thread
->pdist
, PRIORITY_USER_INTERFACE
);
2264 thread
->priority
= PRIORITY_USER_INTERFACE
;
2265 rtr_add_entry(core
, PRIORITY_USER_INTERFACE
);
2268 add_to_list_l(&cores
[core
].running
, thread
);
2272 thread
->stack
= stackbegin
;
2273 thread
->stack_size
= (uintptr_t)stackend
- (uintptr_t)stackbegin
;
2274 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2275 /* Wait for other processors to finish their inits since create_thread
2276 * isn't safe to call until the kernel inits are done. The first
2277 * threads created in the system must of course be created by CPU.
2278 * Another possible approach is to initialize all cores and slots
2279 * for each core by CPU, let the remainder proceed in parallel and
2280 * signal CPU when all are finished. */
2281 core_thread_init(CPU
);
2285 /* Initial stack is the idle stack */
2286 thread
->stack
= idle_stacks
[core
];
2287 thread
->stack_size
= IDLE_STACK_SIZE
;
2288 /* After last processor completes, it should signal all others to
2289 * proceed or may signal the next and call thread_exit(). The last one
2290 * to finish will signal CPU. */
2291 core_thread_init(core
);
2292 /* Other cores do not have a main thread - go idle inside switch_thread
2293 * until a thread can run on the core. */
2295 #endif /* NUM_CORES */
2299 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2301 static inline int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2303 static int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2306 unsigned int stack_words
= stack_size
/ sizeof (uintptr_t);
2310 for (i
= 0; i
< stack_words
; i
++)
2312 if (stackptr
[i
] != DEADBEEF
)
2314 usage
= ((stack_words
- i
) * 100) / stack_words
;
2322 /*---------------------------------------------------------------------------
2323 * Returns the maximum percentage of stack a thread ever used while running.
2324 * NOTE: Some large buffer allocations that don't use enough the buffer to
2325 * overwrite stackptr[0] will not be seen.
2326 *---------------------------------------------------------------------------
2328 int thread_stack_usage(const struct thread_entry
*thread
)
2330 if (LIKELY(thread
->stack_size
> 0))
2331 return stack_usage(thread
->stack
, thread
->stack_size
);
2336 /*---------------------------------------------------------------------------
2337 * Returns the maximum percentage of the core's idle stack ever used during
2339 *---------------------------------------------------------------------------
2341 int idle_stack_usage(unsigned int core
)
2343 return stack_usage(idle_stacks
[core
], IDLE_STACK_SIZE
);
2347 /*---------------------------------------------------------------------------
2348 * Fills in the buffer with the specified thread's name. If the name is NULL,
2349 * empty, or the thread is in destruct state a formatted ID is written
2351 *---------------------------------------------------------------------------
2353 void thread_get_name(char *buffer
, int size
,
2354 struct thread_entry
*thread
)
2363 /* Display thread name if one or ID if none */
2364 const char *name
= thread
->name
;
2365 const char *fmt
= "%s";
2366 if (name
== NULL
IF_COP(|| name
== THREAD_DESTRUCT
) || *name
== '\0')
2368 name
= (const char *)thread
;
2371 snprintf(buffer
, size
, fmt
, name
);