1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
33 /****************************************************************************
35 * See notes below on implementing processor-specific portions! *
36 ***************************************************************************/
38 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
40 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
42 #define THREAD_EXTRA_CHECKS 0
46 * General locking order to guarantee progress. Order must be observed but
47 * all stages are not nescessarily obligatory. Going from 1) to 3) is
51 * This is first because of the likelyhood of having an interrupt occur that
52 * also accesses one of the objects farther down the list. Any non-blocking
53 * synchronization done may already have a lock on something during normal
54 * execution and if an interrupt handler running on the same processor as
55 * the one that has the resource locked were to attempt to access the
56 * resource, the interrupt handler would wait forever waiting for an unlock
57 * that will never happen. There is no danger if the interrupt occurs on
58 * a different processor because the one that has the lock will eventually
59 * unlock and the other processor's handler may proceed at that time. Not
60 * nescessary when the resource in question is definitely not available to
64 * 1) May be needed beforehand if the kernel object allows dual-use such as
65 * event queues. The kernel object must have a scheme to protect itself from
66 * access by another processor and is responsible for serializing the calls
67 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
68 * other. Objects' queues are also protected here.
71 * This locks access to the thread's slot such that its state cannot be
72 * altered by another processor when a state change is in progress such as
73 * when it is in the process of going on a blocked list. An attempt to wake
74 * a thread while it is still blocking will likely desync its state with
75 * the other resources used for that state.
78 * These lists are specific to a particular processor core and are accessible
79 * by all processor cores and interrupt handlers. The running (rtr) list is
80 * the prime example where a thread may be added by any means.
83 /*---------------------------------------------------------------------------
84 * Processor specific: core_sleep/core_wake/misc. notes
87 * FIQ is not dealt with by the scheduler code and is simply restored if it
88 * must by masked for some reason - because threading modifies a register
89 * that FIQ may also modify and there's no way to accomplish it atomically.
90 * s3c2440 is such a case.
92 * Audio interrupts are generally treated at a higher priority than others
93 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
94 * are not in general safe. Special cases may be constructed on a per-
95 * source basis and blocking operations are not available.
97 * core_sleep procedure to implement for any CPU to ensure an asychronous
98 * wakup never results in requiring a wait until the next tick (up to
99 * 10000uS!). May require assembly and careful instruction ordering.
101 * 1) On multicore, stay awake if directed to do so by another. If so, goto
103 * 2) If processor requires, atomically reenable interrupts and perform step
105 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
106 * on Coldfire) goto step 5.
107 * 4) Enable interrupts.
110 * core_wake and multprocessor notes for sleep/wake coordination:
111 * If possible, to wake up another processor, the forcing of an interrupt on
112 * the woken core by the waker core is the easiest way to ensure a non-
113 * delayed wake and immediate execution of any woken threads. If that isn't
114 * available then some careful non-blocking synchonization is needed (as on
115 * PP targets at the moment).
116 *---------------------------------------------------------------------------
119 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
121 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
122 static struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
123 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
125 static const char main_thread_name
[] = "main";
126 extern uintptr_t stackbegin
[];
127 extern uintptr_t stackend
[];
129 static inline void core_sleep(IF_COP_VOID(unsigned int core
))
130 __attribute__((always_inline
));
132 void check_tmo_threads(void)
133 __attribute__((noinline
));
135 static inline void block_thread_on_l(struct thread_entry
*thread
, unsigned state
)
136 __attribute__((always_inline
));
138 static void add_to_list_tmo(struct thread_entry
*thread
)
139 __attribute__((noinline
));
141 static void core_schedule_wakeup(struct thread_entry
*thread
)
142 __attribute__((noinline
));
145 static inline void run_blocking_ops(
146 unsigned int core
, struct thread_entry
*thread
)
147 __attribute__((always_inline
));
150 static void thread_stkov(struct thread_entry
*thread
)
151 __attribute__((noinline
));
153 static inline void store_context(void* addr
)
154 __attribute__((always_inline
));
156 static inline void load_context(const void* addr
)
157 __attribute__((always_inline
));
160 static void thread_final_exit_do(struct thread_entry
*current
)
161 __attribute__((noinline
, noreturn
, used
));
163 static inline void thread_final_exit(struct thread_entry
*current
)
164 __attribute__((always_inline
, noreturn
));
167 void switch_thread(void)
168 __attribute__((noinline
));
170 /****************************************************************************
171 * Processor-specific section - include necessary core support
173 #if (CONFIG_PLATFORM & PLATFORM_ANDROID)
174 #include "thread-android-arm.c"
175 #elif defined(CPU_ARM)
176 #include "thread-arm.c"
178 #include "thread-pp.c"
180 #elif defined(CPU_COLDFIRE)
181 #include "thread-coldfire.c"
182 #elif CONFIG_CPU == SH7034
183 #include "thread-sh.c"
184 #elif defined(CPU_MIPS) && CPU_MIPS == 32
185 #include "thread-mips32.c"
187 /* Wouldn't compile anyway */
188 #error Processor not implemented.
189 #endif /* CONFIG_CPU == */
191 #ifndef IF_NO_SKIP_YIELD
192 #define IF_NO_SKIP_YIELD(...)
196 * End Processor-specific section
197 ***************************************************************************/
199 #if THREAD_EXTRA_CHECKS
200 static void thread_panicf(const char *msg
, struct thread_entry
*thread
)
202 IF_COP( const unsigned int core
= thread
->core
; )
203 static char name
[32];
204 thread_get_name(name
, 32, thread
);
205 panicf ("%s %s" IF_COP(" (%d)"), msg
, name
IF_COP(, core
));
207 static void thread_stkov(struct thread_entry
*thread
)
209 thread_panicf("Stkov", thread
);
211 #define THREAD_PANICF(msg, thread) \
212 thread_panicf(msg, thread)
213 #define THREAD_ASSERT(exp, msg, thread) \
214 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
216 static void thread_stkov(struct thread_entry
*thread
)
218 IF_COP( const unsigned int core
= thread
->core
; )
219 static char name
[32];
220 thread_get_name(name
, 32, thread
);
221 panicf("Stkov %s" IF_COP(" (%d)"), name
IF_COP(, core
));
223 #define THREAD_PANICF(msg, thread)
224 #define THREAD_ASSERT(exp, msg, thread)
225 #endif /* THREAD_EXTRA_CHECKS */
229 #define LOCK_THREAD(thread) \
230 ({ corelock_lock(&(thread)->slot_cl); })
231 #define TRY_LOCK_THREAD(thread) \
232 ({ corelock_try_lock(&(thread)->slot_cl); })
233 #define UNLOCK_THREAD(thread) \
234 ({ corelock_unlock(&(thread)->slot_cl); })
235 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
236 ({ unsigned int _core = (thread)->core; \
237 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
238 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
240 #define LOCK_THREAD(thread) \
242 #define TRY_LOCK_THREAD(thread) \
244 #define UNLOCK_THREAD(thread) \
246 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
251 #define RTR_LOCK(core) \
252 ({ corelock_lock(&cores[core].rtr_cl); })
253 #define RTR_UNLOCK(core) \
254 ({ corelock_unlock(&cores[core].rtr_cl); })
256 #ifdef HAVE_PRIORITY_SCHEDULING
257 #define rtr_add_entry(core, priority) \
258 prio_add_entry(&cores[core].rtr, (priority))
260 #define rtr_subtract_entry(core, priority) \
261 prio_subtract_entry(&cores[core].rtr, (priority))
263 #define rtr_move_entry(core, from, to) \
264 prio_move_entry(&cores[core].rtr, (from), (to))
266 #define rtr_add_entry(core, priority)
267 #define rtr_add_entry_inl(core, priority)
268 #define rtr_subtract_entry(core, priority)
269 #define rtr_subtract_entry_inl(core, priotity)
270 #define rtr_move_entry(core, from, to)
271 #define rtr_move_entry_inl(core, from, to)
274 /*---------------------------------------------------------------------------
275 * Thread list structure - circular:
276 * +------------------------------+
278 * +--+---+<-+---+<-+---+<-+---+<-+
279 * Head->| T | | T | | T | | T |
280 * +->+---+->+---+->+---+->+---+--+
282 * +------------------------------+
283 *---------------------------------------------------------------------------
286 /*---------------------------------------------------------------------------
287 * Adds a thread to a list of threads using "insert last". Uses the "l"
289 *---------------------------------------------------------------------------
291 static void add_to_list_l(struct thread_entry
**list
,
292 struct thread_entry
*thread
)
294 struct thread_entry
*l
= *list
;
298 /* Insert into unoccupied list */
299 thread
->l
.prev
= thread
;
300 thread
->l
.next
= thread
;
306 thread
->l
.prev
= l
->l
.prev
;
308 l
->l
.prev
->l
.next
= thread
;
312 /*---------------------------------------------------------------------------
313 * Removes a thread from a list of threads. Uses the "l" links.
314 *---------------------------------------------------------------------------
316 static void remove_from_list_l(struct thread_entry
**list
,
317 struct thread_entry
*thread
)
319 struct thread_entry
*prev
, *next
;
321 next
= thread
->l
.next
;
332 /* List becomes next item */
336 prev
= thread
->l
.prev
;
338 /* Fix links to jump over the removed entry. */
343 /*---------------------------------------------------------------------------
344 * Timeout list structure - circular reverse (to make "remove item" O(1)),
345 * NULL-terminated forward (to ease the far more common forward traversal):
346 * +------------------------------+
348 * +--+---+<-+---+<-+---+<-+---+<-+
349 * Head->| T | | T | | T | | T |
350 * +---+->+---+->+---+->+---+-X
351 *---------------------------------------------------------------------------
354 /*---------------------------------------------------------------------------
355 * Add a thread from the core's timout list by linking the pointers in its
357 *---------------------------------------------------------------------------
359 static void add_to_list_tmo(struct thread_entry
*thread
)
361 struct thread_entry
*tmo
= cores
[IF_COP_CORE(thread
->core
)].timeout
;
362 THREAD_ASSERT(thread
->tmo
.prev
== NULL
,
363 "add_to_list_tmo->already listed", thread
);
365 thread
->tmo
.next
= NULL
;
369 /* Insert into unoccupied list */
370 thread
->tmo
.prev
= thread
;
371 cores
[IF_COP_CORE(thread
->core
)].timeout
= thread
;
376 thread
->tmo
.prev
= tmo
->tmo
.prev
;
377 tmo
->tmo
.prev
->tmo
.next
= thread
;
378 tmo
->tmo
.prev
= thread
;
381 /*---------------------------------------------------------------------------
382 * Remove a thread from the core's timout list by unlinking the pointers in
383 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
385 *---------------------------------------------------------------------------
387 static void remove_from_list_tmo(struct thread_entry
*thread
)
389 struct thread_entry
**list
= &cores
[IF_COP_CORE(thread
->core
)].timeout
;
390 struct thread_entry
*prev
= thread
->tmo
.prev
;
391 struct thread_entry
*next
= thread
->tmo
.next
;
393 THREAD_ASSERT(prev
!= NULL
, "remove_from_list_tmo->not listed", thread
);
396 next
->tmo
.prev
= prev
;
400 /* List becomes next item and empty if next == NULL */
402 /* Mark as unlisted */
403 thread
->tmo
.prev
= NULL
;
408 (*list
)->tmo
.prev
= prev
;
409 prev
->tmo
.next
= next
;
410 /* Mark as unlisted */
411 thread
->tmo
.prev
= NULL
;
416 #ifdef HAVE_PRIORITY_SCHEDULING
417 /*---------------------------------------------------------------------------
418 * Priority distribution structure (one category for each possible priority):
420 * +----+----+----+ ... +-----+
421 * hist: | F0 | F1 | F2 | | F31 |
422 * +----+----+----+ ... +-----+
423 * mask: | b0 | b1 | b2 | | b31 |
424 * +----+----+----+ ... +-----+
426 * F = count of threads at priority category n (frequency)
427 * b = bitmask of non-zero priority categories (occupancy)
433 *---------------------------------------------------------------------------
434 * Basic priority inheritance priotocol (PIP):
436 * Mn = mutex n, Tn = thread n
438 * A lower priority thread inherits the priority of the highest priority
439 * thread blocked waiting for it to complete an action (such as release a
440 * mutex or respond to a message via queue_send):
444 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
445 * priority than T1 then T1 inherits the priority of T2.
451 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
452 * T1 inherits the higher of T2 and T3.
454 * 3) T3->M2->T2->M1->T1
456 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
457 * then T1 inherits the priority of T3 through T2.
459 * Blocking chains can grow arbitrarily complex (though it's best that they
460 * not form at all very often :) and build-up from these units.
461 *---------------------------------------------------------------------------
464 /*---------------------------------------------------------------------------
465 * Increment frequency at category "priority"
466 *---------------------------------------------------------------------------
468 static inline unsigned int prio_add_entry(
469 struct priority_distribution
*pd
, int priority
)
472 /* Enough size/instruction count difference for ARM makes it worth it to
473 * use different code (192 bytes for ARM). Only thing better is ASM. */
475 count
= pd
->hist
[priority
];
477 pd
->mask
|= 1 << priority
;
478 pd
->hist
[priority
] = count
;
479 #else /* This one's better for Coldfire */
480 if ((count
= ++pd
->hist
[priority
]) == 1)
481 pd
->mask
|= 1 << priority
;
487 /*---------------------------------------------------------------------------
488 * Decrement frequency at category "priority"
489 *---------------------------------------------------------------------------
491 static inline unsigned int prio_subtract_entry(
492 struct priority_distribution
*pd
, int priority
)
497 count
= pd
->hist
[priority
];
499 pd
->mask
&= ~(1 << priority
);
500 pd
->hist
[priority
] = count
;
502 if ((count
= --pd
->hist
[priority
]) == 0)
503 pd
->mask
&= ~(1 << priority
);
509 /*---------------------------------------------------------------------------
510 * Remove from one category and add to another
511 *---------------------------------------------------------------------------
513 static inline void prio_move_entry(
514 struct priority_distribution
*pd
, int from
, int to
)
516 uint32_t mask
= pd
->mask
;
521 count
= pd
->hist
[from
];
523 mask
&= ~(1 << from
);
524 pd
->hist
[from
] = count
;
526 count
= pd
->hist
[to
];
529 pd
->hist
[to
] = count
;
531 if (--pd
->hist
[from
] == 0)
532 mask
&= ~(1 << from
);
534 if (++pd
->hist
[to
] == 1)
541 /*---------------------------------------------------------------------------
542 * Change the priority and rtr entry for a running thread
543 *---------------------------------------------------------------------------
545 static inline void set_running_thread_priority(
546 struct thread_entry
*thread
, int priority
)
548 const unsigned int core
= IF_COP_CORE(thread
->core
);
550 rtr_move_entry(core
, thread
->priority
, priority
);
551 thread
->priority
= priority
;
555 /*---------------------------------------------------------------------------
556 * Finds the highest priority thread in a list of threads. If the list is
557 * empty, the PRIORITY_IDLE is returned.
559 * It is possible to use the struct priority_distribution within an object
560 * instead of scanning the remaining threads in the list but as a compromise,
561 * the resulting per-object memory overhead is saved at a slight speed
562 * penalty under high contention.
563 *---------------------------------------------------------------------------
565 static int find_highest_priority_in_list_l(
566 struct thread_entry
* const thread
)
568 if (LIKELY(thread
!= NULL
))
570 /* Go though list until the ending up at the initial thread */
571 int highest_priority
= thread
->priority
;
572 struct thread_entry
*curr
= thread
;
576 int priority
= curr
->priority
;
578 if (priority
< highest_priority
)
579 highest_priority
= priority
;
583 while (curr
!= thread
);
585 return highest_priority
;
588 return PRIORITY_IDLE
;
591 /*---------------------------------------------------------------------------
592 * Register priority with blocking system and bubble it down the chain if
593 * any until we reach the end or something is already equal or higher.
595 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
596 * targets but that same action also guarantees a circular block anyway and
597 * those are prevented, right? :-)
598 *---------------------------------------------------------------------------
600 static struct thread_entry
*
601 blocker_inherit_priority(struct thread_entry
*current
)
603 const int priority
= current
->priority
;
604 struct blocker
*bl
= current
->blocker
;
605 struct thread_entry
* const tstart
= current
;
606 struct thread_entry
*bl_t
= bl
->thread
;
608 /* Blocker cannot change since the object protection is held */
613 struct thread_entry
*next
;
614 int bl_pr
= bl
->priority
;
616 if (priority
>= bl_pr
)
617 break; /* Object priority already high enough */
619 bl
->priority
= priority
;
622 prio_add_entry(&bl_t
->pdist
, priority
);
624 if (bl_pr
< PRIORITY_IDLE
)
626 /* Not first waiter - subtract old one */
627 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
630 if (priority
>= bl_t
->priority
)
631 break; /* Thread priority high enough */
633 if (bl_t
->state
== STATE_RUNNING
)
635 /* Blocking thread is a running thread therefore there are no
636 * further blockers. Change the "run queue" on which it
638 set_running_thread_priority(bl_t
, priority
);
642 bl_t
->priority
= priority
;
644 /* If blocking thread has a blocker, apply transitive inheritance */
648 break; /* End of chain or object doesn't support inheritance */
652 if (UNLIKELY(next
== tstart
))
653 break; /* Full-circle - deadlock! */
655 UNLOCK_THREAD(current
);
662 /* Blocker could change - retest condition */
663 if (LIKELY(bl
->thread
== next
))
679 /*---------------------------------------------------------------------------
680 * Readjust priorities when waking a thread blocked waiting for another
681 * in essence "releasing" the thread's effect on the object owner. Can be
682 * performed from any context.
683 *---------------------------------------------------------------------------
685 struct thread_entry
*
686 wakeup_priority_protocol_release(struct thread_entry
*thread
)
688 const int priority
= thread
->priority
;
689 struct blocker
*bl
= thread
->blocker
;
690 struct thread_entry
* const tstart
= thread
;
691 struct thread_entry
*bl_t
= bl
->thread
;
693 /* Blocker cannot change since object will be locked */
696 thread
->blocker
= NULL
; /* Thread not blocked */
700 struct thread_entry
*next
;
701 int bl_pr
= bl
->priority
;
703 if (priority
> bl_pr
)
704 break; /* Object priority higher */
710 /* No more threads in queue */
711 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
712 bl
->priority
= PRIORITY_IDLE
;
716 /* Check list for highest remaining priority */
717 int queue_pr
= find_highest_priority_in_list_l(next
);
719 if (queue_pr
== bl_pr
)
720 break; /* Object priority not changing */
722 /* Change queue priority */
723 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
724 bl
->priority
= queue_pr
;
727 if (bl_pr
> bl_t
->priority
)
728 break; /* thread priority is higher */
730 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
732 if (bl_pr
== bl_t
->priority
)
733 break; /* Thread priority not changing */
735 if (bl_t
->state
== STATE_RUNNING
)
737 /* No further blockers */
738 set_running_thread_priority(bl_t
, bl_pr
);
742 bl_t
->priority
= bl_pr
;
744 /* If blocking thread has a blocker, apply transitive inheritance */
748 break; /* End of chain or object doesn't support inheritance */
752 if (UNLIKELY(next
== tstart
))
753 break; /* Full-circle - deadlock! */
755 UNLOCK_THREAD(thread
);
762 /* Blocker could change - retest condition */
763 if (LIKELY(bl
->thread
== next
))
777 if (UNLIKELY(thread
!= tstart
))
779 /* Relock original if it changed */
784 return cores
[CURRENT_CORE
].running
;
787 /*---------------------------------------------------------------------------
788 * Transfer ownership to a thread waiting for an objects and transfer
789 * inherited priority boost from other waiters. This algorithm knows that
790 * blocking chains may only unblock from the very end.
792 * Only the owning thread itself may call this and so the assumption that
793 * it is the running thread is made.
794 *---------------------------------------------------------------------------
796 struct thread_entry
*
797 wakeup_priority_protocol_transfer(struct thread_entry
*thread
)
799 /* Waking thread inherits priority boost from object owner */
800 struct blocker
*bl
= thread
->blocker
;
801 struct thread_entry
*bl_t
= bl
->thread
;
802 struct thread_entry
*next
;
805 THREAD_ASSERT(cores
[CURRENT_CORE
].running
== bl_t
,
806 "UPPT->wrong thread", cores
[CURRENT_CORE
].running
);
810 bl_pr
= bl
->priority
;
812 /* Remove the object's boost from the owning thread */
813 if (prio_subtract_entry(&bl_t
->pdist
, bl_pr
) == 0 &&
814 bl_pr
<= bl_t
->priority
)
816 /* No more threads at this priority are waiting and the old level is
817 * at least the thread level */
818 int priority
= find_first_set_bit(bl_t
->pdist
.mask
);
820 if (priority
!= bl_t
->priority
)
822 /* Adjust this thread's priority */
823 set_running_thread_priority(bl_t
, priority
);
829 if (LIKELY(next
== NULL
))
831 /* Expected shortcut - no more waiters */
832 bl_pr
= PRIORITY_IDLE
;
836 if (thread
->priority
<= bl_pr
)
838 /* Need to scan threads remaining in queue */
839 bl_pr
= find_highest_priority_in_list_l(next
);
842 if (prio_add_entry(&thread
->pdist
, bl_pr
) == 1 &&
843 bl_pr
< thread
->priority
)
845 /* Thread priority must be raised */
846 thread
->priority
= bl_pr
;
850 bl
->thread
= thread
; /* This thread pwns */
851 bl
->priority
= bl_pr
; /* Save highest blocked priority */
852 thread
->blocker
= NULL
; /* Thread not blocked */
859 /*---------------------------------------------------------------------------
860 * No threads must be blocked waiting for this thread except for it to exit.
861 * The alternative is more elaborate cleanup and object registration code.
862 * Check this for risk of silent data corruption when objects with
863 * inheritable blocking are abandoned by the owner - not precise but may
865 *---------------------------------------------------------------------------
867 static void __attribute__((noinline
)) check_for_obj_waiters(
868 const char *function
, struct thread_entry
*thread
)
870 /* Only one bit in the mask should be set with a frequency on 1 which
871 * represents the thread's own base priority */
872 uint32_t mask
= thread
->pdist
.mask
;
873 if ((mask
& (mask
- 1)) != 0 ||
874 thread
->pdist
.hist
[find_first_set_bit(mask
)] > 1)
876 unsigned char name
[32];
877 thread_get_name(name
, 32, thread
);
878 panicf("%s->%s with obj. waiters", function
, name
);
881 #endif /* HAVE_PRIORITY_SCHEDULING */
883 /*---------------------------------------------------------------------------
884 * Move a thread back to a running state on its core.
885 *---------------------------------------------------------------------------
887 static void core_schedule_wakeup(struct thread_entry
*thread
)
889 const unsigned int core
= IF_COP_CORE(thread
->core
);
893 thread
->state
= STATE_RUNNING
;
895 add_to_list_l(&cores
[core
].running
, thread
);
896 rtr_add_entry(core
, thread
->priority
);
901 if (core
!= CURRENT_CORE
)
906 /*---------------------------------------------------------------------------
907 * Check the core's timeout list when at least one thread is due to wake.
908 * Filtering for the condition is done before making the call. Resets the
909 * tick when the next check will occur.
910 *---------------------------------------------------------------------------
912 void check_tmo_threads(void)
914 const unsigned int core
= CURRENT_CORE
;
915 const long tick
= current_tick
; /* snapshot the current tick */
916 long next_tmo_check
= tick
+ 60*HZ
; /* minimum duration: once/minute */
917 struct thread_entry
*next
= cores
[core
].timeout
;
919 /* If there are no processes waiting for a timeout, just keep the check
920 tick from falling into the past. */
922 /* Break the loop once we have walked through the list of all
923 * sleeping processes or have removed them all. */
926 /* Check sleeping threads. Allow interrupts between checks. */
929 struct thread_entry
*curr
= next
;
931 next
= curr
->tmo
.next
;
933 /* Lock thread slot against explicit wakeup */
937 unsigned state
= curr
->state
;
939 if (state
< TIMEOUT_STATE_FIRST
)
941 /* Cleanup threads no longer on a timeout but still on the
943 remove_from_list_tmo(curr
);
945 else if (LIKELY(TIME_BEFORE(tick
, curr
->tmo_tick
)))
947 /* Timeout still pending - this will be the usual case */
948 if (TIME_BEFORE(curr
->tmo_tick
, next_tmo_check
))
950 /* Earliest timeout found so far - move the next check up
952 next_tmo_check
= curr
->tmo_tick
;
957 /* Sleep timeout has been reached so bring the thread back to
959 if (state
== STATE_BLOCKED_W_TMO
)
962 /* Lock the waiting thread's kernel object */
963 struct corelock
*ocl
= curr
->obj_cl
;
965 if (UNLIKELY(corelock_try_lock(ocl
) == 0))
967 /* Need to retry in the correct order though the need is
973 if (UNLIKELY(curr
->state
!= STATE_BLOCKED_W_TMO
))
975 /* Thread was woken or removed explicitely while slot
977 corelock_unlock(ocl
);
978 remove_from_list_tmo(curr
);
983 #endif /* NUM_CORES */
985 remove_from_list_l(curr
->bqp
, curr
);
987 #ifdef HAVE_WAKEUP_EXT_CB
988 if (curr
->wakeup_ext_cb
!= NULL
)
989 curr
->wakeup_ext_cb(curr
);
992 #ifdef HAVE_PRIORITY_SCHEDULING
993 if (curr
->blocker
!= NULL
)
994 wakeup_priority_protocol_release(curr
);
996 corelock_unlock(ocl
);
998 /* else state == STATE_SLEEPING */
1000 remove_from_list_tmo(curr
);
1004 curr
->state
= STATE_RUNNING
;
1006 add_to_list_l(&cores
[core
].running
, curr
);
1007 rtr_add_entry(core
, curr
->priority
);
1012 UNLOCK_THREAD(curr
);
1015 cores
[core
].next_tmo_check
= next_tmo_check
;
1018 /*---------------------------------------------------------------------------
1019 * Performs operations that must be done before blocking a thread but after
1020 * the state is saved.
1021 *---------------------------------------------------------------------------
1024 static inline void run_blocking_ops(
1025 unsigned int core
, struct thread_entry
*thread
)
1027 struct thread_blk_ops
*ops
= &cores
[core
].blk_ops
;
1028 const unsigned flags
= ops
->flags
;
1030 if (LIKELY(flags
== TBOP_CLEAR
))
1035 case TBOP_SWITCH_CORE
:
1036 core_switch_blk_op(core
, thread
);
1038 case TBOP_UNLOCK_CORELOCK
:
1039 corelock_unlock(ops
->cl_p
);
1043 ops
->flags
= TBOP_CLEAR
;
1045 #endif /* NUM_CORES > 1 */
1048 void profile_thread(void)
1050 profstart(cores
[CURRENT_CORE
].running
- threads
);
1054 /*---------------------------------------------------------------------------
1055 * Prepares a thread to block on an object's list and/or for a specified
1056 * duration - expects object and slot to be appropriately locked if needed
1057 * and interrupts to be masked.
1058 *---------------------------------------------------------------------------
1060 static inline void block_thread_on_l(struct thread_entry
*thread
,
1063 /* If inlined, unreachable branches will be pruned with no size penalty
1064 because state is passed as a constant parameter. */
1065 const unsigned int core
= IF_COP_CORE(thread
->core
);
1067 /* Remove the thread from the list of running threads. */
1069 remove_from_list_l(&cores
[core
].running
, thread
);
1070 rtr_subtract_entry(core
, thread
->priority
);
1073 /* Add a timeout to the block if not infinite */
1077 case STATE_BLOCKED_W_TMO
:
1078 /* Put the thread into a new list of inactive threads. */
1079 add_to_list_l(thread
->bqp
, thread
);
1081 if (state
== STATE_BLOCKED
)
1085 case STATE_SLEEPING
:
1086 /* If this thread times out sooner than any other thread, update
1087 next_tmo_check to its timeout */
1088 if (TIME_BEFORE(thread
->tmo_tick
, cores
[core
].next_tmo_check
))
1090 cores
[core
].next_tmo_check
= thread
->tmo_tick
;
1093 if (thread
->tmo
.prev
== NULL
)
1095 add_to_list_tmo(thread
);
1097 /* else thread was never removed from list - just keep it there */
1101 /* Remember the the next thread about to block. */
1102 cores
[core
].block_task
= thread
;
1104 /* Report new state. */
1105 thread
->state
= state
;
1108 /*---------------------------------------------------------------------------
1109 * Switch thread in round robin fashion for any given priority. Any thread
1110 * that removed itself from the running list first must specify itself in
1113 * INTERNAL: Intended for use by kernel and not for programs.
1114 *---------------------------------------------------------------------------
1116 void switch_thread(void)
1119 const unsigned int core
= CURRENT_CORE
;
1120 struct thread_entry
*block
= cores
[core
].block_task
;
1121 struct thread_entry
*thread
= cores
[core
].running
;
1123 /* Get context to save - next thread to run is unknown until all wakeups
1127 cores
[core
].block_task
= NULL
;
1130 if (UNLIKELY(thread
== block
))
1132 /* This was the last thread running and another core woke us before
1133 * reaching here. Force next thread selection to give tmo threads or
1134 * other threads woken before this block a first chance. */
1140 /* Blocking task is the old one */
1146 profile_thread_stopped(thread
->id
& THREAD_ID_SLOT_MASK
);
1149 /* Begin task switching by saving our current context so that we can
1150 * restore the state of the current thread later to the point prior
1152 store_context(&thread
->context
);
1154 /* Check if the current thread stack is overflown */
1155 if (UNLIKELY(thread
->stack
[0] != DEADBEEF
))
1156 thread_stkov(thread
);
1159 /* Run any blocking operations requested before switching/sleeping */
1160 run_blocking_ops(core
, thread
);
1163 #ifdef HAVE_PRIORITY_SCHEDULING
1164 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1165 /* Reset the value of thread's skip count */
1166 thread
->skip_count
= 0;
1171 /* If there are threads on a timeout and the earliest wakeup is due,
1172 * check the list and wake any threads that need to start running
1174 if (!TIME_BEFORE(current_tick
, cores
[core
].next_tmo_check
))
1176 check_tmo_threads();
1182 thread
= cores
[core
].running
;
1184 if (UNLIKELY(thread
== NULL
))
1186 /* Enter sleep mode to reduce power usage - woken up on interrupt
1187 * or wakeup request from another core - expected to enable
1190 core_sleep(IF_COP(core
));
1194 #ifdef HAVE_PRIORITY_SCHEDULING
1195 /* Select the new task based on priorities and the last time a
1196 * process got CPU time relative to the highest priority runnable
1198 struct priority_distribution
*pd
= &cores
[core
].rtr
;
1199 int max
= find_first_set_bit(pd
->mask
);
1203 /* Not switching on a block, tentatively select next thread */
1204 thread
= thread
->l
.next
;
1209 int priority
= thread
->priority
;
1212 /* This ridiculously simple method of aging seems to work
1213 * suspiciously well. It does tend to reward CPU hogs (under
1214 * yielding) but that's generally not desirable at all. On
1215 * the plus side, it, relatively to other threads, penalizes
1216 * excess yielding which is good if some high priority thread
1217 * is performing no useful work such as polling for a device
1218 * to be ready. Of course, aging is only employed when higher
1219 * and lower priority threads are runnable. The highest
1220 * priority runnable thread(s) are never skipped unless a
1221 * lower-priority process has aged sufficiently. Priorities
1222 * of REALTIME class are run strictly according to priority
1223 * thus are not subject to switchout due to lower-priority
1224 * processes aging; they must give up the processor by going
1225 * off the run list. */
1226 if (LIKELY(priority
<= max
) ||
1227 IF_NO_SKIP_YIELD( thread
->skip_count
== -1 || )
1228 (priority
> PRIORITY_REALTIME
&&
1229 (diff
= priority
- max
,
1230 ++thread
->skip_count
> diff
*diff
)))
1232 cores
[core
].running
= thread
;
1236 thread
= thread
->l
.next
;
1239 /* Without priority use a simple FCFS algorithm */
1242 /* Not switching on a block, select next thread */
1243 thread
= thread
->l
.next
;
1244 cores
[core
].running
= thread
;
1246 #endif /* HAVE_PRIORITY_SCHEDULING */
1254 /* And finally give control to the next thread. */
1255 load_context(&thread
->context
);
1258 profile_thread_started(thread
->id
& THREAD_ID_SLOT_MASK
);
1263 /*---------------------------------------------------------------------------
1264 * Sleeps a thread for at least a specified number of ticks with zero being
1265 * a wait until the next tick.
1267 * INTERNAL: Intended for use by kernel and not for programs.
1268 *---------------------------------------------------------------------------
1270 void sleep_thread(int ticks
)
1272 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1274 LOCK_THREAD(current
);
1276 /* Set our timeout, remove from run list and join timeout list. */
1277 current
->tmo_tick
= current_tick
+ ticks
+ 1;
1278 block_thread_on_l(current
, STATE_SLEEPING
);
1280 UNLOCK_THREAD(current
);
1283 /*---------------------------------------------------------------------------
1284 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1286 * INTERNAL: Intended for use by kernel objects and not for programs.
1287 *---------------------------------------------------------------------------
1289 void block_thread(struct thread_entry
*current
)
1291 /* Set the state to blocked and take us off of the run queue until we
1292 * are explicitly woken */
1293 LOCK_THREAD(current
);
1295 /* Set the list for explicit wakeup */
1296 block_thread_on_l(current
, STATE_BLOCKED
);
1298 #ifdef HAVE_PRIORITY_SCHEDULING
1299 if (current
->blocker
!= NULL
)
1301 /* Object supports PIP */
1302 current
= blocker_inherit_priority(current
);
1306 UNLOCK_THREAD(current
);
1309 /*---------------------------------------------------------------------------
1310 * Block a thread on a blocking queue for a specified time interval or until
1311 * explicitly woken - whichever happens first.
1313 * INTERNAL: Intended for use by kernel objects and not for programs.
1314 *---------------------------------------------------------------------------
1316 void block_thread_w_tmo(struct thread_entry
*current
, int timeout
)
1318 /* Get the entry for the current running thread. */
1319 LOCK_THREAD(current
);
1321 /* Set the state to blocked with the specified timeout */
1322 current
->tmo_tick
= current_tick
+ timeout
;
1324 /* Set the list for explicit wakeup */
1325 block_thread_on_l(current
, STATE_BLOCKED_W_TMO
);
1327 #ifdef HAVE_PRIORITY_SCHEDULING
1328 if (current
->blocker
!= NULL
)
1330 /* Object supports PIP */
1331 current
= blocker_inherit_priority(current
);
1335 UNLOCK_THREAD(current
);
1338 /*---------------------------------------------------------------------------
1339 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1340 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1342 * This code should be considered a critical section by the caller meaning
1343 * that the object's corelock should be held.
1345 * INTERNAL: Intended for use by kernel objects and not for programs.
1346 *---------------------------------------------------------------------------
1348 unsigned int wakeup_thread(struct thread_entry
**list
)
1350 struct thread_entry
*thread
= *list
;
1351 unsigned int result
= THREAD_NONE
;
1353 /* Check if there is a blocked thread at all. */
1357 LOCK_THREAD(thread
);
1359 /* Determine thread's current state. */
1360 switch (thread
->state
)
1363 case STATE_BLOCKED_W_TMO
:
1364 remove_from_list_l(list
, thread
);
1368 #ifdef HAVE_PRIORITY_SCHEDULING
1369 struct thread_entry
*current
;
1370 struct blocker
*bl
= thread
->blocker
;
1374 /* No inheritance - just boost the thread by aging */
1375 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1376 thread
->skip_count
= thread
->priority
;
1377 current
= cores
[CURRENT_CORE
].running
;
1381 /* Call the specified unblocking PIP */
1382 current
= bl
->wakeup_protocol(thread
);
1385 if (current
!= NULL
&&
1386 find_first_set_bit(cores
[IF_COP_CORE(current
->core
)].rtr
.mask
)
1387 < current
->priority
)
1389 /* There is a thread ready to run of higher or same priority on
1390 * the same core as the current one; recommend a task switch.
1391 * Knowing if this is an interrupt call would be helpful here. */
1392 result
|= THREAD_SWITCH
;
1394 #endif /* HAVE_PRIORITY_SCHEDULING */
1396 core_schedule_wakeup(thread
);
1399 /* Nothing to do. State is not blocked. */
1400 #if THREAD_EXTRA_CHECKS
1402 THREAD_PANICF("wakeup_thread->block invalid", thread
);
1409 UNLOCK_THREAD(thread
);
1413 /*---------------------------------------------------------------------------
1414 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1415 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1416 * the queue must be locked first.
1418 * INTERNAL: Intended for use by kernel objects and not for programs.
1419 *---------------------------------------------------------------------------
1421 unsigned int thread_queue_wake(struct thread_entry
**list
)
1423 unsigned result
= THREAD_NONE
;
1427 unsigned int rc
= wakeup_thread(list
);
1429 if (rc
== THREAD_NONE
)
1430 break; /* No more threads */
1438 /*---------------------------------------------------------------------------
1439 * Assign the thread slot a new ID. Version is 1-255.
1440 *---------------------------------------------------------------------------
1442 static void new_thread_id(unsigned int slot_num
,
1443 struct thread_entry
*thread
)
1445 unsigned int version
=
1446 (thread
->id
+ (1u << THREAD_ID_VERSION_SHIFT
))
1447 & THREAD_ID_VERSION_MASK
;
1449 /* If wrapped to 0, make it 1 */
1451 version
= 1u << THREAD_ID_VERSION_SHIFT
;
1453 thread
->id
= version
| (slot_num
& THREAD_ID_SLOT_MASK
);
1456 /*---------------------------------------------------------------------------
1457 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1458 * will be locked on multicore.
1459 *---------------------------------------------------------------------------
1461 static struct thread_entry
* find_empty_thread_slot(void)
1463 /* Any slot could be on an interrupt-accessible list */
1464 IF_COP( int oldlevel
= disable_irq_save(); )
1465 struct thread_entry
*thread
= NULL
;
1468 for (n
= 0; n
< MAXTHREADS
; n
++)
1470 /* Obtain current slot state - lock it on multicore */
1471 struct thread_entry
*t
= &threads
[n
];
1474 if (t
->state
== STATE_KILLED
IF_COP( && t
->name
!= THREAD_DESTRUCT
))
1476 /* Slot is empty - leave it locked and caller will unlock */
1481 /* Finished examining slot - no longer busy - unlock on multicore */
1485 IF_COP( restore_irq(oldlevel
); ) /* Reenable interrups - this slot is
1486 not accesible to them yet */
1490 /*---------------------------------------------------------------------------
1491 * Return the thread_entry pointer for a thread_id. Return the current
1492 * thread if the ID is 0 (alias for current).
1493 *---------------------------------------------------------------------------
1495 struct thread_entry
* thread_id_entry(unsigned int thread_id
)
1497 return (thread_id
== THREAD_ID_CURRENT
) ?
1498 cores
[CURRENT_CORE
].running
:
1499 &threads
[thread_id
& THREAD_ID_SLOT_MASK
];
1502 /*---------------------------------------------------------------------------
1503 * Place the current core in idle mode - woken up on interrupt or wake
1504 * request from another core.
1505 *---------------------------------------------------------------------------
1507 void core_idle(void)
1509 IF_COP( const unsigned int core
= CURRENT_CORE
; )
1511 core_sleep(IF_COP(core
));
1514 /*---------------------------------------------------------------------------
1515 * Create a thread. If using a dual core architecture, specify which core to
1516 * start the thread on.
1518 * Return ID if context area could be allocated, else NULL.
1519 *---------------------------------------------------------------------------
1521 unsigned int create_thread(void (*function
)(void),
1522 void* stack
, size_t stack_size
,
1523 unsigned flags
, const char *name
1524 IF_PRIO(, int priority
)
1525 IF_COP(, unsigned int core
))
1528 unsigned int stack_words
;
1529 uintptr_t stackptr
, stackend
;
1530 struct thread_entry
*thread
;
1534 thread
= find_empty_thread_slot();
1540 oldlevel
= disable_irq_save();
1542 /* Munge the stack to make it easy to spot stack overflows */
1543 stackptr
= ALIGN_UP((uintptr_t)stack
, sizeof (uintptr_t));
1544 stackend
= ALIGN_DOWN((uintptr_t)stack
+ stack_size
, sizeof (uintptr_t));
1545 stack_size
= stackend
- stackptr
;
1546 stack_words
= stack_size
/ sizeof (uintptr_t);
1548 for (i
= 0; i
< stack_words
; i
++)
1550 ((uintptr_t *)stackptr
)[i
] = DEADBEEF
;
1553 /* Store interesting information */
1554 thread
->name
= name
;
1555 thread
->stack
= (uintptr_t *)stackptr
;
1556 thread
->stack_size
= stack_size
;
1557 thread
->queue
= NULL
;
1558 #ifdef HAVE_WAKEUP_EXT_CB
1559 thread
->wakeup_ext_cb
= NULL
;
1561 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1562 thread
->cpu_boost
= 0;
1564 #ifdef HAVE_PRIORITY_SCHEDULING
1565 memset(&thread
->pdist
, 0, sizeof(thread
->pdist
));
1566 thread
->blocker
= NULL
;
1567 thread
->base_priority
= priority
;
1568 thread
->priority
= priority
;
1569 thread
->skip_count
= priority
;
1570 prio_add_entry(&thread
->pdist
, priority
);
1573 #ifdef HAVE_IO_PRIORITY
1574 /* Default to high (foreground) priority */
1575 thread
->io_priority
= IO_PRIORITY_IMMEDIATE
;
1579 thread
->core
= core
;
1581 /* Writeback stack munging or anything else before starting */
1582 if (core
!= CURRENT_CORE
)
1588 /* Thread is not on any timeout list but be a bit paranoid */
1589 thread
->tmo
.prev
= NULL
;
1591 state
= (flags
& CREATE_THREAD_FROZEN
) ?
1592 STATE_FROZEN
: STATE_RUNNING
;
1594 thread
->context
.sp
= (typeof (thread
->context
.sp
))stackend
;
1596 /* Load the thread's context structure with needed startup information */
1597 THREAD_STARTUP_INIT(core
, thread
, function
);
1599 thread
->state
= state
;
1600 i
= thread
->id
; /* Snapshot while locked */
1602 if (state
== STATE_RUNNING
)
1603 core_schedule_wakeup(thread
);
1605 UNLOCK_THREAD(thread
);
1606 restore_irq(oldlevel
);
1611 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1612 /*---------------------------------------------------------------------------
1613 * Change the boost state of a thread boosting or unboosting the CPU
1615 *---------------------------------------------------------------------------
1617 static inline void boost_thread(struct thread_entry
*thread
, bool boost
)
1619 if ((thread
->cpu_boost
!= 0) != boost
)
1621 thread
->cpu_boost
= boost
;
1626 void trigger_cpu_boost(void)
1628 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1629 boost_thread(current
, true);
1632 void cancel_cpu_boost(void)
1634 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1635 boost_thread(current
, false);
1637 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
1639 /*---------------------------------------------------------------------------
1640 * Block the current thread until another thread terminates. A thread may
1641 * wait on itself to terminate which prevents it from running again and it
1642 * will need to be killed externally.
1643 * Parameter is the ID as returned from create_thread().
1644 *---------------------------------------------------------------------------
1646 void thread_wait(unsigned int thread_id
)
1648 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1649 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1651 /* Lock thread-as-waitable-object lock */
1652 corelock_lock(&thread
->waiter_cl
);
1654 /* Be sure it hasn't been killed yet */
1655 if (thread_id
== THREAD_ID_CURRENT
||
1656 (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
))
1658 IF_COP( current
->obj_cl
= &thread
->waiter_cl
; )
1659 current
->bqp
= &thread
->queue
;
1662 block_thread(current
);
1664 corelock_unlock(&thread
->waiter_cl
);
1670 corelock_unlock(&thread
->waiter_cl
);
1673 /*---------------------------------------------------------------------------
1674 * Exit the current thread. The Right Way to Do Things (TM).
1675 *---------------------------------------------------------------------------
1677 /* This is done to foil optimizations that may require the current stack,
1678 * such as optimizing subexpressions that put variables on the stack that
1679 * get used after switching stacks. */
1681 /* Called by ASM stub */
1682 static void thread_final_exit_do(struct thread_entry
*current
)
1684 /* No special procedure is required before calling */
1685 static inline void thread_final_exit(struct thread_entry
*current
)
1688 /* At this point, this thread isn't using resources allocated for
1689 * execution except the slot itself. */
1691 /* Signal this thread */
1692 thread_queue_wake(¤t
->queue
);
1693 corelock_unlock(¤t
->waiter_cl
);
1695 /* This should never and must never be reached - if it is, the
1696 * state is corrupted */
1697 THREAD_PANICF("thread_exit->K:*R", current
);
1701 void thread_exit(void)
1703 register struct thread_entry
* current
= cores
[CURRENT_CORE
].running
;
1705 /* Cancel CPU boost if any */
1710 corelock_lock(¤t
->waiter_cl
);
1711 LOCK_THREAD(current
);
1713 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1714 if (current
->name
== THREAD_DESTRUCT
)
1716 /* Thread being killed - become a waiter */
1717 unsigned int id
= current
->id
;
1718 UNLOCK_THREAD(current
);
1719 corelock_unlock(¤t
->waiter_cl
);
1721 THREAD_PANICF("thread_exit->WK:*R", current
);
1725 #ifdef HAVE_PRIORITY_SCHEDULING
1726 check_for_obj_waiters("thread_exit", current
);
1729 if (current
->tmo
.prev
!= NULL
)
1731 /* Cancel pending timeout list removal */
1732 remove_from_list_tmo(current
);
1735 /* Switch tasks and never return */
1736 block_thread_on_l(current
, STATE_KILLED
);
1738 /* Slot must be unusable until thread is really gone */
1739 UNLOCK_THREAD_AT_TASK_SWITCH(current
);
1741 /* Update ID for this slot */
1742 new_thread_id(current
->id
, current
);
1743 current
->name
= NULL
;
1745 /* Do final cleanup and remove the thread */
1746 thread_final_exit(current
);
1749 #ifdef ALLOW_REMOVE_THREAD
1750 /*---------------------------------------------------------------------------
1751 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1754 * Parameter is the ID as returned from create_thread().
1756 * Use with care on threads that are not under careful control as this may
1757 * leave various objects in an undefined state.
1758 *---------------------------------------------------------------------------
1760 void remove_thread(unsigned int thread_id
)
1763 /* core is not constant here because of core switching */
1764 unsigned int core
= CURRENT_CORE
;
1765 unsigned int old_core
= NUM_CORES
;
1766 struct corelock
*ocl
= NULL
;
1768 const unsigned int core
= CURRENT_CORE
;
1770 struct thread_entry
*current
= cores
[core
].running
;
1771 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1776 if (thread
== current
)
1777 thread_exit(); /* Current thread - do normal exit */
1779 oldlevel
= disable_irq_save();
1781 corelock_lock(&thread
->waiter_cl
);
1782 LOCK_THREAD(thread
);
1784 state
= thread
->state
;
1786 if (thread
->id
!= thread_id
|| state
== STATE_KILLED
)
1790 if (thread
->name
== THREAD_DESTRUCT
)
1792 /* Thread being killed - become a waiter */
1793 UNLOCK_THREAD(thread
);
1794 corelock_unlock(&thread
->waiter_cl
);
1795 restore_irq(oldlevel
);
1796 thread_wait(thread_id
);
1800 thread
->name
= THREAD_DESTRUCT
; /* Slot can't be used for now */
1802 #ifdef HAVE_PRIORITY_SCHEDULING
1803 check_for_obj_waiters("remove_thread", thread
);
1806 if (thread
->core
!= core
)
1808 /* Switch cores and safely extract the thread there */
1809 /* Slot HAS to be unlocked or a deadlock could occur which means other
1810 * threads have to be guided into becoming thread waiters if they
1811 * attempt to remove it. */
1812 unsigned int new_core
= thread
->core
;
1814 corelock_unlock(&thread
->waiter_cl
);
1816 UNLOCK_THREAD(thread
);
1817 restore_irq(oldlevel
);
1819 old_core
= switch_core(new_core
);
1821 oldlevel
= disable_irq_save();
1823 corelock_lock(&thread
->waiter_cl
);
1824 LOCK_THREAD(thread
);
1826 state
= thread
->state
;
1828 /* Perform the extraction and switch ourselves back to the original
1831 #endif /* NUM_CORES > 1 */
1833 if (thread
->tmo
.prev
!= NULL
)
1835 /* Clean thread off the timeout list if a timeout check hasn't
1837 remove_from_list_tmo(thread
);
1840 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1841 /* Cancel CPU boost if any */
1842 boost_thread(thread
, false);
1845 IF_COP( retry_state
: )
1851 /* Remove thread from ready to run tasks */
1852 remove_from_list_l(&cores
[core
].running
, thread
);
1853 rtr_subtract_entry(core
, thread
->priority
);
1857 case STATE_BLOCKED_W_TMO
:
1858 /* Remove thread from the queue it's blocked on - including its
1859 * own if waiting there */
1861 if (&thread
->waiter_cl
!= thread
->obj_cl
)
1863 ocl
= thread
->obj_cl
;
1865 if (UNLIKELY(corelock_try_lock(ocl
) == 0))
1867 UNLOCK_THREAD(thread
);
1869 LOCK_THREAD(thread
);
1871 if (UNLIKELY(thread
->state
!= state
))
1873 /* Something woke the thread */
1874 state
= thread
->state
;
1875 corelock_unlock(ocl
);
1881 remove_from_list_l(thread
->bqp
, thread
);
1883 #ifdef HAVE_WAKEUP_EXT_CB
1884 if (thread
->wakeup_ext_cb
!= NULL
)
1885 thread
->wakeup_ext_cb(thread
);
1888 #ifdef HAVE_PRIORITY_SCHEDULING
1889 if (thread
->blocker
!= NULL
)
1891 /* Remove thread's priority influence from its chain */
1892 wakeup_priority_protocol_release(thread
);
1898 corelock_unlock(ocl
);
1901 /* Otherwise thread is frozen and hasn't run yet */
1904 new_thread_id(thread_id
, thread
);
1905 thread
->state
= STATE_KILLED
;
1907 /* If thread was waiting on itself, it will have been removed above.
1908 * The wrong order would result in waking the thread first and deadlocking
1909 * since the slot is already locked. */
1910 thread_queue_wake(&thread
->queue
);
1912 thread
->name
= NULL
;
1914 thread_killed
: /* Thread was already killed */
1915 /* Removal complete - safe to unlock and reenable interrupts */
1916 corelock_unlock(&thread
->waiter_cl
);
1917 UNLOCK_THREAD(thread
);
1918 restore_irq(oldlevel
);
1921 if (old_core
< NUM_CORES
)
1923 /* Did a removal on another processor's thread - switch back to
1925 switch_core(old_core
);
1929 #endif /* ALLOW_REMOVE_THREAD */
1931 #ifdef HAVE_PRIORITY_SCHEDULING
1932 /*---------------------------------------------------------------------------
1933 * Sets the thread's relative base priority for the core it runs on. Any
1934 * needed inheritance changes also may happen.
1935 *---------------------------------------------------------------------------
1937 int thread_set_priority(unsigned int thread_id
, int priority
)
1939 int old_base_priority
= -1;
1940 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1942 /* A little safety measure */
1943 if (priority
< HIGHEST_PRIORITY
|| priority
> LOWEST_PRIORITY
)
1946 /* Thread could be on any list and therefore on an interrupt accessible
1947 one - disable interrupts */
1948 int oldlevel
= disable_irq_save();
1950 LOCK_THREAD(thread
);
1952 /* Make sure it's not killed */
1953 if (thread_id
== THREAD_ID_CURRENT
||
1954 (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
))
1956 int old_priority
= thread
->priority
;
1958 old_base_priority
= thread
->base_priority
;
1959 thread
->base_priority
= priority
;
1961 prio_move_entry(&thread
->pdist
, old_base_priority
, priority
);
1962 priority
= find_first_set_bit(thread
->pdist
.mask
);
1964 if (old_priority
== priority
)
1966 /* No priority change - do nothing */
1968 else if (thread
->state
== STATE_RUNNING
)
1970 /* This thread is running - change location on the run
1971 * queue. No transitive inheritance needed. */
1972 set_running_thread_priority(thread
, priority
);
1976 thread
->priority
= priority
;
1978 if (thread
->blocker
!= NULL
)
1980 /* Bubble new priority down the chain */
1981 struct blocker
*bl
= thread
->blocker
; /* Blocker struct */
1982 struct thread_entry
*bl_t
= bl
->thread
; /* Blocking thread */
1983 struct thread_entry
* const tstart
= thread
; /* Initial thread */
1984 const int highest
= MIN(priority
, old_priority
); /* Higher of new or old */
1988 struct thread_entry
*next
; /* Next thread to check */
1989 int bl_pr
; /* Highest blocked thread */
1990 int queue_pr
; /* New highest blocked thread */
1992 /* Owner can change but thread cannot be dislodged - thread
1993 * may not be the first in the queue which allows other
1994 * threads ahead in the list to be given ownership during the
1995 * operation. If thread is next then the waker will have to
1996 * wait for us and the owner of the object will remain fixed.
1997 * If we successfully grab the owner -- which at some point
1998 * is guaranteed -- then the queue remains fixed until we
2004 /* Double-check the owner - retry if it changed */
2005 if (LIKELY(bl
->thread
== bl_t
))
2008 UNLOCK_THREAD(bl_t
);
2012 bl_pr
= bl
->priority
;
2014 if (highest
> bl_pr
)
2015 break; /* Object priority won't change */
2017 /* This will include the thread being set */
2018 queue_pr
= find_highest_priority_in_list_l(*thread
->bqp
);
2020 if (queue_pr
== bl_pr
)
2021 break; /* Object priority not changing */
2023 /* Update thread boost for this object */
2024 bl
->priority
= queue_pr
;
2025 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
2026 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
2028 if (bl_t
->priority
== bl_pr
)
2029 break; /* Blocking thread priority not changing */
2031 if (bl_t
->state
== STATE_RUNNING
)
2033 /* Thread not blocked - we're done */
2034 set_running_thread_priority(bl_t
, bl_pr
);
2038 bl_t
->priority
= bl_pr
;
2039 bl
= bl_t
->blocker
; /* Blocking thread has a blocker? */
2042 break; /* End of chain */
2046 if (UNLIKELY(next
== tstart
))
2047 break; /* Full-circle */
2049 UNLOCK_THREAD(thread
);
2055 UNLOCK_THREAD(bl_t
);
2060 UNLOCK_THREAD(thread
);
2062 restore_irq(oldlevel
);
2064 return old_base_priority
;
2067 /*---------------------------------------------------------------------------
2068 * Returns the current base priority for a thread.
2069 *---------------------------------------------------------------------------
2071 int thread_get_priority(unsigned int thread_id
)
2073 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2074 int base_priority
= thread
->base_priority
;
2076 /* Simply check without locking slot. It may or may not be valid by the
2077 * time the function returns anyway. If all tests pass, it is the
2078 * correct value for when it was valid. */
2079 if (thread_id
!= THREAD_ID_CURRENT
&&
2080 (thread
->id
!= thread_id
|| thread
->state
== STATE_KILLED
))
2083 return base_priority
;
2085 #endif /* HAVE_PRIORITY_SCHEDULING */
2087 #ifdef HAVE_IO_PRIORITY
2088 int thread_get_io_priority(unsigned int thread_id
)
2090 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2091 return thread
->io_priority
;
2094 void thread_set_io_priority(unsigned int thread_id
,int io_priority
)
2096 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2097 thread
->io_priority
= io_priority
;
2101 /*---------------------------------------------------------------------------
2102 * Starts a frozen thread - similar semantics to wakeup_thread except that
2103 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2104 * virtue of the slot having a state of STATE_FROZEN.
2105 *---------------------------------------------------------------------------
2107 void thread_thaw(unsigned int thread_id
)
2109 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2110 int oldlevel
= disable_irq_save();
2112 LOCK_THREAD(thread
);
2114 /* If thread is the current one, it cannot be frozen, therefore
2115 * there is no need to check that. */
2116 if (thread
->id
== thread_id
&& thread
->state
== STATE_FROZEN
)
2117 core_schedule_wakeup(thread
);
2119 UNLOCK_THREAD(thread
);
2120 restore_irq(oldlevel
);
2123 /*---------------------------------------------------------------------------
2124 * Return the ID of the currently executing thread.
2125 *---------------------------------------------------------------------------
2127 unsigned int thread_get_current(void)
2129 return cores
[CURRENT_CORE
].running
->id
;
2133 /*---------------------------------------------------------------------------
2134 * Switch the processor that the currently executing thread runs on.
2135 *---------------------------------------------------------------------------
2137 unsigned int switch_core(unsigned int new_core
)
2139 const unsigned int core
= CURRENT_CORE
;
2140 struct thread_entry
*current
= cores
[core
].running
;
2142 if (core
== new_core
)
2144 /* No change - just return same core */
2148 int oldlevel
= disable_irq_save();
2149 LOCK_THREAD(current
);
2151 if (current
->name
== THREAD_DESTRUCT
)
2153 /* Thread being killed - deactivate and let process complete */
2154 unsigned int id
= current
->id
;
2155 UNLOCK_THREAD(current
);
2156 restore_irq(oldlevel
);
2158 /* Should never be reached */
2159 THREAD_PANICF("switch_core->D:*R", current
);
2162 /* Get us off the running list for the current core */
2164 remove_from_list_l(&cores
[core
].running
, current
);
2165 rtr_subtract_entry(core
, current
->priority
);
2168 /* Stash return value (old core) in a safe place */
2169 current
->retval
= core
;
2171 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2172 * the other core will likely attempt a removal from the wrong list! */
2173 if (current
->tmo
.prev
!= NULL
)
2175 remove_from_list_tmo(current
);
2178 /* Change the core number for this thread slot */
2179 current
->core
= new_core
;
2181 /* Do not use core_schedule_wakeup here since this will result in
2182 * the thread starting to run on the other core before being finished on
2183 * this one. Delay the list unlock to keep the other core stuck
2184 * until this thread is ready. */
2187 rtr_add_entry(new_core
, current
->priority
);
2188 add_to_list_l(&cores
[new_core
].running
, current
);
2190 /* Make a callback into device-specific code, unlock the wakeup list so
2191 * that execution may resume on the new core, unlock our slot and finally
2192 * restore the interrupt level */
2193 cores
[core
].blk_ops
.flags
= TBOP_SWITCH_CORE
;
2194 cores
[core
].blk_ops
.cl_p
= &cores
[new_core
].rtr_cl
;
2195 cores
[core
].block_task
= current
;
2197 UNLOCK_THREAD(current
);
2199 /* Alert other core to activity */
2200 core_wake(new_core
);
2202 /* Do the stack switching, cache_maintenence and switch_thread call -
2203 requires native code */
2204 switch_thread_core(core
, current
);
2206 /* Finally return the old core to caller */
2207 return current
->retval
;
2209 #endif /* NUM_CORES > 1 */
2211 /*---------------------------------------------------------------------------
2212 * Initialize threading API. This assumes interrupts are not yet enabled. On
2213 * multicore setups, no core is allowed to proceed until create_thread calls
2214 * are safe to perform.
2215 *---------------------------------------------------------------------------
2217 void init_threads(void)
2219 const unsigned int core
= CURRENT_CORE
;
2220 struct thread_entry
*thread
;
2224 /* Initialize core locks and IDs in all slots */
2226 for (n
= 0; n
< MAXTHREADS
; n
++)
2228 thread
= &threads
[n
];
2229 corelock_init(&thread
->waiter_cl
);
2230 corelock_init(&thread
->slot_cl
);
2231 thread
->id
= THREAD_ID_INIT(n
);
2235 /* CPU will initialize first and then sleep */
2236 thread
= find_empty_thread_slot();
2240 /* WTF? There really must be a slot available at this stage.
2241 * This can fail if, for example, .bss isn't zero'ed out by the loader
2242 * or threads is in the wrong section. */
2243 THREAD_PANICF("init_threads->no slot", NULL
);
2246 /* Initialize initially non-zero members of core */
2247 cores
[core
].next_tmo_check
= current_tick
; /* Something not in the past */
2249 /* Initialize initially non-zero members of slot */
2250 UNLOCK_THREAD(thread
); /* No sync worries yet */
2251 thread
->name
= main_thread_name
;
2252 thread
->state
= STATE_RUNNING
;
2253 IF_COP( thread
->core
= core
; )
2254 #ifdef HAVE_PRIORITY_SCHEDULING
2255 corelock_init(&cores
[core
].rtr_cl
);
2256 thread
->base_priority
= PRIORITY_USER_INTERFACE
;
2257 prio_add_entry(&thread
->pdist
, PRIORITY_USER_INTERFACE
);
2258 thread
->priority
= PRIORITY_USER_INTERFACE
;
2259 rtr_add_entry(core
, PRIORITY_USER_INTERFACE
);
2262 add_to_list_l(&cores
[core
].running
, thread
);
2266 thread
->stack
= stackbegin
;
2267 thread
->stack_size
= (uintptr_t)stackend
- (uintptr_t)stackbegin
;
2268 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2269 /* Wait for other processors to finish their inits since create_thread
2270 * isn't safe to call until the kernel inits are done. The first
2271 * threads created in the system must of course be created by CPU.
2272 * Another possible approach is to initialize all cores and slots
2273 * for each core by CPU, let the remainder proceed in parallel and
2274 * signal CPU when all are finished. */
2275 core_thread_init(CPU
);
2279 /* Initial stack is the idle stack */
2280 thread
->stack
= idle_stacks
[core
];
2281 thread
->stack_size
= IDLE_STACK_SIZE
;
2282 /* After last processor completes, it should signal all others to
2283 * proceed or may signal the next and call thread_exit(). The last one
2284 * to finish will signal CPU. */
2285 core_thread_init(core
);
2286 /* Other cores do not have a main thread - go idle inside switch_thread
2287 * until a thread can run on the core. */
2289 #endif /* NUM_CORES */
2293 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2295 static inline int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2297 static int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2300 unsigned int stack_words
= stack_size
/ sizeof (uintptr_t);
2304 for (i
= 0; i
< stack_words
; i
++)
2306 if (stackptr
[i
] != DEADBEEF
)
2308 usage
= ((stack_words
- i
) * 100) / stack_words
;
2316 /*---------------------------------------------------------------------------
2317 * Returns the maximum percentage of stack a thread ever used while running.
2318 * NOTE: Some large buffer allocations that don't use enough the buffer to
2319 * overwrite stackptr[0] will not be seen.
2320 *---------------------------------------------------------------------------
2322 int thread_stack_usage(const struct thread_entry
*thread
)
2324 return stack_usage(thread
->stack
, thread
->stack_size
);
2328 /*---------------------------------------------------------------------------
2329 * Returns the maximum percentage of the core's idle stack ever used during
2331 *---------------------------------------------------------------------------
2333 int idle_stack_usage(unsigned int core
)
2335 return stack_usage(idle_stacks
[core
], IDLE_STACK_SIZE
);
2339 /*---------------------------------------------------------------------------
2340 * Fills in the buffer with the specified thread's name. If the name is NULL,
2341 * empty, or the thread is in destruct state a formatted ID is written
2343 *---------------------------------------------------------------------------
2345 void thread_get_name(char *buffer
, int size
,
2346 struct thread_entry
*thread
)
2355 /* Display thread name if one or ID if none */
2356 const char *name
= thread
->name
;
2357 const char *fmt
= "%s";
2358 if (name
== NULL
IF_COP(|| name
== THREAD_DESTRUCT
) || *name
== '\0')
2360 name
= (const char *)thread
;
2363 snprintf(buffer
, size
, fmt
, name
);