1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
33 #include "gcc_extensions.h"
35 /****************************************************************************
37 * See notes below on implementing processor-specific portions! *
38 ***************************************************************************/
40 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
42 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
44 #define THREAD_EXTRA_CHECKS 0
48 * General locking order to guarantee progress. Order must be observed but
49 * all stages are not nescessarily obligatory. Going from 1) to 3) is
53 * This is first because of the likelyhood of having an interrupt occur that
54 * also accesses one of the objects farther down the list. Any non-blocking
55 * synchronization done may already have a lock on something during normal
56 * execution and if an interrupt handler running on the same processor as
57 * the one that has the resource locked were to attempt to access the
58 * resource, the interrupt handler would wait forever waiting for an unlock
59 * that will never happen. There is no danger if the interrupt occurs on
60 * a different processor because the one that has the lock will eventually
61 * unlock and the other processor's handler may proceed at that time. Not
62 * nescessary when the resource in question is definitely not available to
66 * 1) May be needed beforehand if the kernel object allows dual-use such as
67 * event queues. The kernel object must have a scheme to protect itself from
68 * access by another processor and is responsible for serializing the calls
69 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
70 * other. Objects' queues are also protected here.
73 * This locks access to the thread's slot such that its state cannot be
74 * altered by another processor when a state change is in progress such as
75 * when it is in the process of going on a blocked list. An attempt to wake
76 * a thread while it is still blocking will likely desync its state with
77 * the other resources used for that state.
80 * These lists are specific to a particular processor core and are accessible
81 * by all processor cores and interrupt handlers. The running (rtr) list is
82 * the prime example where a thread may be added by any means.
85 /*---------------------------------------------------------------------------
86 * Processor specific: core_sleep/core_wake/misc. notes
89 * FIQ is not dealt with by the scheduler code and is simply restored if it
90 * must by masked for some reason - because threading modifies a register
91 * that FIQ may also modify and there's no way to accomplish it atomically.
92 * s3c2440 is such a case.
94 * Audio interrupts are generally treated at a higher priority than others
95 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
96 * are not in general safe. Special cases may be constructed on a per-
97 * source basis and blocking operations are not available.
99 * core_sleep procedure to implement for any CPU to ensure an asychronous
100 * wakup never results in requiring a wait until the next tick (up to
101 * 10000uS!). May require assembly and careful instruction ordering.
103 * 1) On multicore, stay awake if directed to do so by another. If so, goto
105 * 2) If processor requires, atomically reenable interrupts and perform step
107 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
108 * on Coldfire) goto step 5.
109 * 4) Enable interrupts.
112 * core_wake and multprocessor notes for sleep/wake coordination:
113 * If possible, to wake up another processor, the forcing of an interrupt on
114 * the woken core by the waker core is the easiest way to ensure a non-
115 * delayed wake and immediate execution of any woken threads. If that isn't
116 * available then some careful non-blocking synchonization is needed (as on
117 * PP targets at the moment).
118 *---------------------------------------------------------------------------
121 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
123 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
124 static struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
125 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
127 static const char main_thread_name
[] = "main";
128 #if (CONFIG_PLATFORM & PLATFORM_NATIVE)
129 extern uintptr_t stackbegin
[];
130 extern uintptr_t stackend
[];
132 extern uintptr_t *stackbegin
;
133 extern uintptr_t *stackend
;
136 static inline void core_sleep(IF_COP_VOID(unsigned int core
))
137 __attribute__((always_inline
));
139 void check_tmo_threads(void)
140 __attribute__((noinline
));
142 static inline void block_thread_on_l(struct thread_entry
*thread
, unsigned state
)
143 __attribute__((always_inline
));
145 static void add_to_list_tmo(struct thread_entry
*thread
)
146 __attribute__((noinline
));
148 static void core_schedule_wakeup(struct thread_entry
*thread
)
149 __attribute__((noinline
));
152 static inline void run_blocking_ops(
153 unsigned int core
, struct thread_entry
*thread
)
154 __attribute__((always_inline
));
157 static void thread_stkov(struct thread_entry
*thread
)
158 __attribute__((noinline
));
160 static inline void store_context(void* addr
)
161 __attribute__((always_inline
));
163 static inline void load_context(const void* addr
)
164 __attribute__((always_inline
));
167 static void thread_final_exit_do(struct thread_entry
*current
)
168 __attribute__((noinline
)) NORETURN_ATTR USED_ATTR
;
170 static inline void thread_final_exit(struct thread_entry
*current
)
171 __attribute__((always_inline
)) NORETURN_ATTR
;
174 void switch_thread(void)
175 __attribute__((noinline
));
177 /****************************************************************************
178 * Processor/OS-specific section - include necessary core support
182 #include "asm/thread.c"
185 #include "thread-pp.c"
188 #ifndef IF_NO_SKIP_YIELD
189 #define IF_NO_SKIP_YIELD(...)
193 * End Processor-specific section
194 ***************************************************************************/
196 #if THREAD_EXTRA_CHECKS
197 static void thread_panicf(const char *msg
, struct thread_entry
*thread
)
199 IF_COP( const unsigned int core
= thread
->core
; )
200 static char name
[32];
201 thread_get_name(name
, 32, thread
);
202 panicf ("%s %s" IF_COP(" (%d)"), msg
, name
IF_COP(, core
));
204 static void thread_stkov(struct thread_entry
*thread
)
206 thread_panicf("Stkov", thread
);
208 #define THREAD_PANICF(msg, thread) \
209 thread_panicf(msg, thread)
210 #define THREAD_ASSERT(exp, msg, thread) \
211 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
213 static void thread_stkov(struct thread_entry
*thread
)
215 IF_COP( const unsigned int core
= thread
->core
; )
216 static char name
[32];
217 thread_get_name(name
, 32, thread
);
218 panicf("Stkov %s" IF_COP(" (%d)"), name
IF_COP(, core
));
220 #define THREAD_PANICF(msg, thread)
221 #define THREAD_ASSERT(exp, msg, thread)
222 #endif /* THREAD_EXTRA_CHECKS */
226 #define LOCK_THREAD(thread) \
227 ({ corelock_lock(&(thread)->slot_cl); })
228 #define TRY_LOCK_THREAD(thread) \
229 ({ corelock_try_lock(&(thread)->slot_cl); })
230 #define UNLOCK_THREAD(thread) \
231 ({ corelock_unlock(&(thread)->slot_cl); })
232 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
233 ({ unsigned int _core = (thread)->core; \
234 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
235 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
237 #define LOCK_THREAD(thread) \
239 #define TRY_LOCK_THREAD(thread) \
241 #define UNLOCK_THREAD(thread) \
243 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
248 #define RTR_LOCK(core) \
249 ({ corelock_lock(&cores[core].rtr_cl); })
250 #define RTR_UNLOCK(core) \
251 ({ corelock_unlock(&cores[core].rtr_cl); })
253 #ifdef HAVE_PRIORITY_SCHEDULING
254 #define rtr_add_entry(core, priority) \
255 prio_add_entry(&cores[core].rtr, (priority))
257 #define rtr_subtract_entry(core, priority) \
258 prio_subtract_entry(&cores[core].rtr, (priority))
260 #define rtr_move_entry(core, from, to) \
261 prio_move_entry(&cores[core].rtr, (from), (to))
263 #define rtr_add_entry(core, priority)
264 #define rtr_add_entry_inl(core, priority)
265 #define rtr_subtract_entry(core, priority)
266 #define rtr_subtract_entry_inl(core, priotity)
267 #define rtr_move_entry(core, from, to)
268 #define rtr_move_entry_inl(core, from, to)
271 /*---------------------------------------------------------------------------
272 * Thread list structure - circular:
273 * +------------------------------+
275 * +--+---+<-+---+<-+---+<-+---+<-+
276 * Head->| T | | T | | T | | T |
277 * +->+---+->+---+->+---+->+---+--+
279 * +------------------------------+
280 *---------------------------------------------------------------------------
283 /*---------------------------------------------------------------------------
284 * Adds a thread to a list of threads using "insert last". Uses the "l"
286 *---------------------------------------------------------------------------
288 static void add_to_list_l(struct thread_entry
**list
,
289 struct thread_entry
*thread
)
291 struct thread_entry
*l
= *list
;
295 /* Insert into unoccupied list */
296 thread
->l
.prev
= thread
;
297 thread
->l
.next
= thread
;
303 thread
->l
.prev
= l
->l
.prev
;
305 l
->l
.prev
->l
.next
= thread
;
309 /*---------------------------------------------------------------------------
310 * Removes a thread from a list of threads. Uses the "l" links.
311 *---------------------------------------------------------------------------
313 static void remove_from_list_l(struct thread_entry
**list
,
314 struct thread_entry
*thread
)
316 struct thread_entry
*prev
, *next
;
318 next
= thread
->l
.next
;
329 /* List becomes next item */
333 prev
= thread
->l
.prev
;
335 /* Fix links to jump over the removed entry. */
340 /*---------------------------------------------------------------------------
341 * Timeout list structure - circular reverse (to make "remove item" O(1)),
342 * NULL-terminated forward (to ease the far more common forward traversal):
343 * +------------------------------+
345 * +--+---+<-+---+<-+---+<-+---+<-+
346 * Head->| T | | T | | T | | T |
347 * +---+->+---+->+---+->+---+-X
348 *---------------------------------------------------------------------------
351 /*---------------------------------------------------------------------------
352 * Add a thread from the core's timout list by linking the pointers in its
354 *---------------------------------------------------------------------------
356 static void add_to_list_tmo(struct thread_entry
*thread
)
358 struct thread_entry
*tmo
= cores
[IF_COP_CORE(thread
->core
)].timeout
;
359 THREAD_ASSERT(thread
->tmo
.prev
== NULL
,
360 "add_to_list_tmo->already listed", thread
);
362 thread
->tmo
.next
= NULL
;
366 /* Insert into unoccupied list */
367 thread
->tmo
.prev
= thread
;
368 cores
[IF_COP_CORE(thread
->core
)].timeout
= thread
;
373 thread
->tmo
.prev
= tmo
->tmo
.prev
;
374 tmo
->tmo
.prev
->tmo
.next
= thread
;
375 tmo
->tmo
.prev
= thread
;
378 /*---------------------------------------------------------------------------
379 * Remove a thread from the core's timout list by unlinking the pointers in
380 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
382 *---------------------------------------------------------------------------
384 static void remove_from_list_tmo(struct thread_entry
*thread
)
386 struct thread_entry
**list
= &cores
[IF_COP_CORE(thread
->core
)].timeout
;
387 struct thread_entry
*prev
= thread
->tmo
.prev
;
388 struct thread_entry
*next
= thread
->tmo
.next
;
390 THREAD_ASSERT(prev
!= NULL
, "remove_from_list_tmo->not listed", thread
);
393 next
->tmo
.prev
= prev
;
397 /* List becomes next item and empty if next == NULL */
399 /* Mark as unlisted */
400 thread
->tmo
.prev
= NULL
;
405 (*list
)->tmo
.prev
= prev
;
406 prev
->tmo
.next
= next
;
407 /* Mark as unlisted */
408 thread
->tmo
.prev
= NULL
;
413 #ifdef HAVE_PRIORITY_SCHEDULING
414 /*---------------------------------------------------------------------------
415 * Priority distribution structure (one category for each possible priority):
417 * +----+----+----+ ... +-----+
418 * hist: | F0 | F1 | F2 | | F31 |
419 * +----+----+----+ ... +-----+
420 * mask: | b0 | b1 | b2 | | b31 |
421 * +----+----+----+ ... +-----+
423 * F = count of threads at priority category n (frequency)
424 * b = bitmask of non-zero priority categories (occupancy)
430 *---------------------------------------------------------------------------
431 * Basic priority inheritance priotocol (PIP):
433 * Mn = mutex n, Tn = thread n
435 * A lower priority thread inherits the priority of the highest priority
436 * thread blocked waiting for it to complete an action (such as release a
437 * mutex or respond to a message via queue_send):
441 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
442 * priority than T1 then T1 inherits the priority of T2.
448 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
449 * T1 inherits the higher of T2 and T3.
451 * 3) T3->M2->T2->M1->T1
453 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
454 * then T1 inherits the priority of T3 through T2.
456 * Blocking chains can grow arbitrarily complex (though it's best that they
457 * not form at all very often :) and build-up from these units.
458 *---------------------------------------------------------------------------
461 /*---------------------------------------------------------------------------
462 * Increment frequency at category "priority"
463 *---------------------------------------------------------------------------
465 static inline unsigned int prio_add_entry(
466 struct priority_distribution
*pd
, int priority
)
469 /* Enough size/instruction count difference for ARM makes it worth it to
470 * use different code (192 bytes for ARM). Only thing better is ASM. */
472 count
= pd
->hist
[priority
];
474 pd
->mask
|= 1 << priority
;
475 pd
->hist
[priority
] = count
;
476 #else /* This one's better for Coldfire */
477 if ((count
= ++pd
->hist
[priority
]) == 1)
478 pd
->mask
|= 1 << priority
;
484 /*---------------------------------------------------------------------------
485 * Decrement frequency at category "priority"
486 *---------------------------------------------------------------------------
488 static inline unsigned int prio_subtract_entry(
489 struct priority_distribution
*pd
, int priority
)
494 count
= pd
->hist
[priority
];
496 pd
->mask
&= ~(1 << priority
);
497 pd
->hist
[priority
] = count
;
499 if ((count
= --pd
->hist
[priority
]) == 0)
500 pd
->mask
&= ~(1 << priority
);
506 /*---------------------------------------------------------------------------
507 * Remove from one category and add to another
508 *---------------------------------------------------------------------------
510 static inline void prio_move_entry(
511 struct priority_distribution
*pd
, int from
, int to
)
513 uint32_t mask
= pd
->mask
;
518 count
= pd
->hist
[from
];
520 mask
&= ~(1 << from
);
521 pd
->hist
[from
] = count
;
523 count
= pd
->hist
[to
];
526 pd
->hist
[to
] = count
;
528 if (--pd
->hist
[from
] == 0)
529 mask
&= ~(1 << from
);
531 if (++pd
->hist
[to
] == 1)
538 /*---------------------------------------------------------------------------
539 * Change the priority and rtr entry for a running thread
540 *---------------------------------------------------------------------------
542 static inline void set_running_thread_priority(
543 struct thread_entry
*thread
, int priority
)
545 const unsigned int core
= IF_COP_CORE(thread
->core
);
547 rtr_move_entry(core
, thread
->priority
, priority
);
548 thread
->priority
= priority
;
552 /*---------------------------------------------------------------------------
553 * Finds the highest priority thread in a list of threads. If the list is
554 * empty, the PRIORITY_IDLE is returned.
556 * It is possible to use the struct priority_distribution within an object
557 * instead of scanning the remaining threads in the list but as a compromise,
558 * the resulting per-object memory overhead is saved at a slight speed
559 * penalty under high contention.
560 *---------------------------------------------------------------------------
562 static int find_highest_priority_in_list_l(
563 struct thread_entry
* const thread
)
565 if (LIKELY(thread
!= NULL
))
567 /* Go though list until the ending up at the initial thread */
568 int highest_priority
= thread
->priority
;
569 struct thread_entry
*curr
= thread
;
573 int priority
= curr
->priority
;
575 if (priority
< highest_priority
)
576 highest_priority
= priority
;
580 while (curr
!= thread
);
582 return highest_priority
;
585 return PRIORITY_IDLE
;
588 /*---------------------------------------------------------------------------
589 * Register priority with blocking system and bubble it down the chain if
590 * any until we reach the end or something is already equal or higher.
592 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
593 * targets but that same action also guarantees a circular block anyway and
594 * those are prevented, right? :-)
595 *---------------------------------------------------------------------------
597 static struct thread_entry
*
598 blocker_inherit_priority(struct thread_entry
*current
)
600 const int priority
= current
->priority
;
601 struct blocker
*bl
= current
->blocker
;
602 struct thread_entry
* const tstart
= current
;
603 struct thread_entry
*bl_t
= bl
->thread
;
605 /* Blocker cannot change since the object protection is held */
610 struct thread_entry
*next
;
611 int bl_pr
= bl
->priority
;
613 if (priority
>= bl_pr
)
614 break; /* Object priority already high enough */
616 bl
->priority
= priority
;
619 prio_add_entry(&bl_t
->pdist
, priority
);
621 if (bl_pr
< PRIORITY_IDLE
)
623 /* Not first waiter - subtract old one */
624 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
627 if (priority
>= bl_t
->priority
)
628 break; /* Thread priority high enough */
630 if (bl_t
->state
== STATE_RUNNING
)
632 /* Blocking thread is a running thread therefore there are no
633 * further blockers. Change the "run queue" on which it
635 set_running_thread_priority(bl_t
, priority
);
639 bl_t
->priority
= priority
;
641 /* If blocking thread has a blocker, apply transitive inheritance */
645 break; /* End of chain or object doesn't support inheritance */
649 if (UNLIKELY(next
== tstart
))
650 break; /* Full-circle - deadlock! */
652 UNLOCK_THREAD(current
);
659 /* Blocker could change - retest condition */
660 if (LIKELY(bl
->thread
== next
))
676 /*---------------------------------------------------------------------------
677 * Readjust priorities when waking a thread blocked waiting for another
678 * in essence "releasing" the thread's effect on the object owner. Can be
679 * performed from any context.
680 *---------------------------------------------------------------------------
682 struct thread_entry
*
683 wakeup_priority_protocol_release(struct thread_entry
*thread
)
685 const int priority
= thread
->priority
;
686 struct blocker
*bl
= thread
->blocker
;
687 struct thread_entry
* const tstart
= thread
;
688 struct thread_entry
*bl_t
= bl
->thread
;
690 /* Blocker cannot change since object will be locked */
693 thread
->blocker
= NULL
; /* Thread not blocked */
697 struct thread_entry
*next
;
698 int bl_pr
= bl
->priority
;
700 if (priority
> bl_pr
)
701 break; /* Object priority higher */
707 /* No more threads in queue */
708 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
709 bl
->priority
= PRIORITY_IDLE
;
713 /* Check list for highest remaining priority */
714 int queue_pr
= find_highest_priority_in_list_l(next
);
716 if (queue_pr
== bl_pr
)
717 break; /* Object priority not changing */
719 /* Change queue priority */
720 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
721 bl
->priority
= queue_pr
;
724 if (bl_pr
> bl_t
->priority
)
725 break; /* thread priority is higher */
727 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
729 if (bl_pr
== bl_t
->priority
)
730 break; /* Thread priority not changing */
732 if (bl_t
->state
== STATE_RUNNING
)
734 /* No further blockers */
735 set_running_thread_priority(bl_t
, bl_pr
);
739 bl_t
->priority
= bl_pr
;
741 /* If blocking thread has a blocker, apply transitive inheritance */
745 break; /* End of chain or object doesn't support inheritance */
749 if (UNLIKELY(next
== tstart
))
750 break; /* Full-circle - deadlock! */
752 UNLOCK_THREAD(thread
);
759 /* Blocker could change - retest condition */
760 if (LIKELY(bl
->thread
== next
))
774 if (UNLIKELY(thread
!= tstart
))
776 /* Relock original if it changed */
781 return cores
[CURRENT_CORE
].running
;
784 /*---------------------------------------------------------------------------
785 * Transfer ownership to a thread waiting for an objects and transfer
786 * inherited priority boost from other waiters. This algorithm knows that
787 * blocking chains may only unblock from the very end.
789 * Only the owning thread itself may call this and so the assumption that
790 * it is the running thread is made.
791 *---------------------------------------------------------------------------
793 struct thread_entry
*
794 wakeup_priority_protocol_transfer(struct thread_entry
*thread
)
796 /* Waking thread inherits priority boost from object owner */
797 struct blocker
*bl
= thread
->blocker
;
798 struct thread_entry
*bl_t
= bl
->thread
;
799 struct thread_entry
*next
;
802 THREAD_ASSERT(cores
[CURRENT_CORE
].running
== bl_t
,
803 "UPPT->wrong thread", cores
[CURRENT_CORE
].running
);
807 bl_pr
= bl
->priority
;
809 /* Remove the object's boost from the owning thread */
810 if (prio_subtract_entry(&bl_t
->pdist
, bl_pr
) == 0 &&
811 bl_pr
<= bl_t
->priority
)
813 /* No more threads at this priority are waiting and the old level is
814 * at least the thread level */
815 int priority
= find_first_set_bit(bl_t
->pdist
.mask
);
817 if (priority
!= bl_t
->priority
)
819 /* Adjust this thread's priority */
820 set_running_thread_priority(bl_t
, priority
);
826 if (LIKELY(next
== NULL
))
828 /* Expected shortcut - no more waiters */
829 bl_pr
= PRIORITY_IDLE
;
833 if (thread
->priority
<= bl_pr
)
835 /* Need to scan threads remaining in queue */
836 bl_pr
= find_highest_priority_in_list_l(next
);
839 if (prio_add_entry(&thread
->pdist
, bl_pr
) == 1 &&
840 bl_pr
< thread
->priority
)
842 /* Thread priority must be raised */
843 thread
->priority
= bl_pr
;
847 bl
->thread
= thread
; /* This thread pwns */
848 bl
->priority
= bl_pr
; /* Save highest blocked priority */
849 thread
->blocker
= NULL
; /* Thread not blocked */
856 /*---------------------------------------------------------------------------
857 * No threads must be blocked waiting for this thread except for it to exit.
858 * The alternative is more elaborate cleanup and object registration code.
859 * Check this for risk of silent data corruption when objects with
860 * inheritable blocking are abandoned by the owner - not precise but may
862 *---------------------------------------------------------------------------
864 static void __attribute__((noinline
)) check_for_obj_waiters(
865 const char *function
, struct thread_entry
*thread
)
867 /* Only one bit in the mask should be set with a frequency on 1 which
868 * represents the thread's own base priority */
869 uint32_t mask
= thread
->pdist
.mask
;
870 if ((mask
& (mask
- 1)) != 0 ||
871 thread
->pdist
.hist
[find_first_set_bit(mask
)] > 1)
873 unsigned char name
[32];
874 thread_get_name(name
, 32, thread
);
875 panicf("%s->%s with obj. waiters", function
, name
);
878 #endif /* HAVE_PRIORITY_SCHEDULING */
880 /*---------------------------------------------------------------------------
881 * Move a thread back to a running state on its core.
882 *---------------------------------------------------------------------------
884 static void core_schedule_wakeup(struct thread_entry
*thread
)
886 const unsigned int core
= IF_COP_CORE(thread
->core
);
890 thread
->state
= STATE_RUNNING
;
892 add_to_list_l(&cores
[core
].running
, thread
);
893 rtr_add_entry(core
, thread
->priority
);
898 if (core
!= CURRENT_CORE
)
903 /*---------------------------------------------------------------------------
904 * Check the core's timeout list when at least one thread is due to wake.
905 * Filtering for the condition is done before making the call. Resets the
906 * tick when the next check will occur.
907 *---------------------------------------------------------------------------
909 void check_tmo_threads(void)
911 const unsigned int core
= CURRENT_CORE
;
912 const long tick
= current_tick
; /* snapshot the current tick */
913 long next_tmo_check
= tick
+ 60*HZ
; /* minimum duration: once/minute */
914 struct thread_entry
*next
= cores
[core
].timeout
;
916 /* If there are no processes waiting for a timeout, just keep the check
917 tick from falling into the past. */
919 /* Break the loop once we have walked through the list of all
920 * sleeping processes or have removed them all. */
923 /* Check sleeping threads. Allow interrupts between checks. */
926 struct thread_entry
*curr
= next
;
928 next
= curr
->tmo
.next
;
930 /* Lock thread slot against explicit wakeup */
934 unsigned state
= curr
->state
;
936 if (state
< TIMEOUT_STATE_FIRST
)
938 /* Cleanup threads no longer on a timeout but still on the
940 remove_from_list_tmo(curr
);
942 else if (LIKELY(TIME_BEFORE(tick
, curr
->tmo_tick
)))
944 /* Timeout still pending - this will be the usual case */
945 if (TIME_BEFORE(curr
->tmo_tick
, next_tmo_check
))
947 /* Earliest timeout found so far - move the next check up
949 next_tmo_check
= curr
->tmo_tick
;
954 /* Sleep timeout has been reached so bring the thread back to
956 if (state
== STATE_BLOCKED_W_TMO
)
958 #ifdef HAVE_CORELOCK_OBJECT
959 /* Lock the waiting thread's kernel object */
960 struct corelock
*ocl
= curr
->obj_cl
;
962 if (UNLIKELY(corelock_try_lock(ocl
) == 0))
964 /* Need to retry in the correct order though the need is
970 if (UNLIKELY(curr
->state
!= STATE_BLOCKED_W_TMO
))
972 /* Thread was woken or removed explicitely while slot
974 corelock_unlock(ocl
);
975 remove_from_list_tmo(curr
);
980 #endif /* NUM_CORES */
982 remove_from_list_l(curr
->bqp
, curr
);
984 #ifdef HAVE_WAKEUP_EXT_CB
985 if (curr
->wakeup_ext_cb
!= NULL
)
986 curr
->wakeup_ext_cb(curr
);
989 #ifdef HAVE_PRIORITY_SCHEDULING
990 if (curr
->blocker
!= NULL
)
991 wakeup_priority_protocol_release(curr
);
993 corelock_unlock(ocl
);
995 /* else state == STATE_SLEEPING */
997 remove_from_list_tmo(curr
);
1001 curr
->state
= STATE_RUNNING
;
1003 add_to_list_l(&cores
[core
].running
, curr
);
1004 rtr_add_entry(core
, curr
->priority
);
1009 UNLOCK_THREAD(curr
);
1012 cores
[core
].next_tmo_check
= next_tmo_check
;
1015 /*---------------------------------------------------------------------------
1016 * Performs operations that must be done before blocking a thread but after
1017 * the state is saved.
1018 *---------------------------------------------------------------------------
1021 static inline void run_blocking_ops(
1022 unsigned int core
, struct thread_entry
*thread
)
1024 struct thread_blk_ops
*ops
= &cores
[core
].blk_ops
;
1025 const unsigned flags
= ops
->flags
;
1027 if (LIKELY(flags
== TBOP_CLEAR
))
1032 case TBOP_SWITCH_CORE
:
1033 core_switch_blk_op(core
, thread
);
1035 case TBOP_UNLOCK_CORELOCK
:
1036 corelock_unlock(ops
->cl_p
);
1040 ops
->flags
= TBOP_CLEAR
;
1042 #endif /* NUM_CORES > 1 */
1045 void profile_thread(void)
1047 profstart(cores
[CURRENT_CORE
].running
- threads
);
1051 /*---------------------------------------------------------------------------
1052 * Prepares a thread to block on an object's list and/or for a specified
1053 * duration - expects object and slot to be appropriately locked if needed
1054 * and interrupts to be masked.
1055 *---------------------------------------------------------------------------
1057 static inline void block_thread_on_l(struct thread_entry
*thread
,
1060 /* If inlined, unreachable branches will be pruned with no size penalty
1061 because state is passed as a constant parameter. */
1062 const unsigned int core
= IF_COP_CORE(thread
->core
);
1064 /* Remove the thread from the list of running threads. */
1066 remove_from_list_l(&cores
[core
].running
, thread
);
1067 rtr_subtract_entry(core
, thread
->priority
);
1070 /* Add a timeout to the block if not infinite */
1074 case STATE_BLOCKED_W_TMO
:
1075 /* Put the thread into a new list of inactive threads. */
1076 add_to_list_l(thread
->bqp
, thread
);
1078 if (state
== STATE_BLOCKED
)
1082 case STATE_SLEEPING
:
1083 /* If this thread times out sooner than any other thread, update
1084 next_tmo_check to its timeout */
1085 if (TIME_BEFORE(thread
->tmo_tick
, cores
[core
].next_tmo_check
))
1087 cores
[core
].next_tmo_check
= thread
->tmo_tick
;
1090 if (thread
->tmo
.prev
== NULL
)
1092 add_to_list_tmo(thread
);
1094 /* else thread was never removed from list - just keep it there */
1098 /* Remember the the next thread about to block. */
1099 cores
[core
].block_task
= thread
;
1101 /* Report new state. */
1102 thread
->state
= state
;
1105 /*---------------------------------------------------------------------------
1106 * Switch thread in round robin fashion for any given priority. Any thread
1107 * that removed itself from the running list first must specify itself in
1110 * INTERNAL: Intended for use by kernel and not for programs.
1111 *---------------------------------------------------------------------------
1113 void switch_thread(void)
1116 const unsigned int core
= CURRENT_CORE
;
1117 struct thread_entry
*block
= cores
[core
].block_task
;
1118 struct thread_entry
*thread
= cores
[core
].running
;
1120 /* Get context to save - next thread to run is unknown until all wakeups
1124 cores
[core
].block_task
= NULL
;
1127 if (UNLIKELY(thread
== block
))
1129 /* This was the last thread running and another core woke us before
1130 * reaching here. Force next thread selection to give tmo threads or
1131 * other threads woken before this block a first chance. */
1137 /* Blocking task is the old one */
1144 _profile_thread_stopped(thread
->id
& THREAD_ID_SLOT_MASK
);
1146 profile_thread_stopped(thread
->id
& THREAD_ID_SLOT_MASK
);
1150 /* Begin task switching by saving our current context so that we can
1151 * restore the state of the current thread later to the point prior
1153 store_context(&thread
->context
);
1155 /* Check if the current thread stack is overflown */
1156 if (UNLIKELY(thread
->stack
[0] != DEADBEEF
) && thread
->stack_size
> 0)
1157 thread_stkov(thread
);
1160 /* Run any blocking operations requested before switching/sleeping */
1161 run_blocking_ops(core
, thread
);
1164 #ifdef HAVE_PRIORITY_SCHEDULING
1165 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1166 /* Reset the value of thread's skip count */
1167 thread
->skip_count
= 0;
1172 /* If there are threads on a timeout and the earliest wakeup is due,
1173 * check the list and wake any threads that need to start running
1175 if (!TIME_BEFORE(current_tick
, cores
[core
].next_tmo_check
))
1177 check_tmo_threads();
1183 thread
= cores
[core
].running
;
1185 if (UNLIKELY(thread
== NULL
))
1187 /* Enter sleep mode to reduce power usage - woken up on interrupt
1188 * or wakeup request from another core - expected to enable
1191 core_sleep(IF_COP(core
));
1195 #ifdef HAVE_PRIORITY_SCHEDULING
1196 /* Select the new task based on priorities and the last time a
1197 * process got CPU time relative to the highest priority runnable
1199 struct priority_distribution
*pd
= &cores
[core
].rtr
;
1200 int max
= find_first_set_bit(pd
->mask
);
1204 /* Not switching on a block, tentatively select next thread */
1205 thread
= thread
->l
.next
;
1210 int priority
= thread
->priority
;
1213 /* This ridiculously simple method of aging seems to work
1214 * suspiciously well. It does tend to reward CPU hogs (under
1215 * yielding) but that's generally not desirable at all. On
1216 * the plus side, it, relatively to other threads, penalizes
1217 * excess yielding which is good if some high priority thread
1218 * is performing no useful work such as polling for a device
1219 * to be ready. Of course, aging is only employed when higher
1220 * and lower priority threads are runnable. The highest
1221 * priority runnable thread(s) are never skipped unless a
1222 * lower-priority process has aged sufficiently. Priorities
1223 * of REALTIME class are run strictly according to priority
1224 * thus are not subject to switchout due to lower-priority
1225 * processes aging; they must give up the processor by going
1226 * off the run list. */
1227 if (LIKELY(priority
<= max
) ||
1228 IF_NO_SKIP_YIELD( thread
->skip_count
== -1 || )
1229 (priority
> PRIORITY_REALTIME
&&
1230 (diff
= priority
- max
,
1231 ++thread
->skip_count
> diff
*diff
)))
1233 cores
[core
].running
= thread
;
1237 thread
= thread
->l
.next
;
1240 /* Without priority use a simple FCFS algorithm */
1243 /* Not switching on a block, select next thread */
1244 thread
= thread
->l
.next
;
1245 cores
[core
].running
= thread
;
1247 #endif /* HAVE_PRIORITY_SCHEDULING */
1255 /* And finally give control to the next thread. */
1256 load_context(&thread
->context
);
1259 profile_thread_started(thread
->id
& THREAD_ID_SLOT_MASK
);
1264 /*---------------------------------------------------------------------------
1265 * Sleeps a thread for at least a specified number of ticks with zero being
1266 * a wait until the next tick.
1268 * INTERNAL: Intended for use by kernel and not for programs.
1269 *---------------------------------------------------------------------------
1271 void sleep_thread(int ticks
)
1273 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1275 LOCK_THREAD(current
);
1277 /* Set our timeout, remove from run list and join timeout list. */
1278 current
->tmo_tick
= current_tick
+ ticks
+ 1;
1279 block_thread_on_l(current
, STATE_SLEEPING
);
1281 UNLOCK_THREAD(current
);
1284 /*---------------------------------------------------------------------------
1285 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1287 * INTERNAL: Intended for use by kernel objects and not for programs.
1288 *---------------------------------------------------------------------------
1290 void block_thread(struct thread_entry
*current
)
1292 /* Set the state to blocked and take us off of the run queue until we
1293 * are explicitly woken */
1294 LOCK_THREAD(current
);
1296 /* Set the list for explicit wakeup */
1297 block_thread_on_l(current
, STATE_BLOCKED
);
1299 #ifdef HAVE_PRIORITY_SCHEDULING
1300 if (current
->blocker
!= NULL
)
1302 /* Object supports PIP */
1303 current
= blocker_inherit_priority(current
);
1307 UNLOCK_THREAD(current
);
1310 /*---------------------------------------------------------------------------
1311 * Block a thread on a blocking queue for a specified time interval or until
1312 * explicitly woken - whichever happens first.
1314 * INTERNAL: Intended for use by kernel objects and not for programs.
1315 *---------------------------------------------------------------------------
1317 void block_thread_w_tmo(struct thread_entry
*current
, int timeout
)
1319 /* Get the entry for the current running thread. */
1320 LOCK_THREAD(current
);
1322 /* Set the state to blocked with the specified timeout */
1323 current
->tmo_tick
= current_tick
+ timeout
;
1325 /* Set the list for explicit wakeup */
1326 block_thread_on_l(current
, STATE_BLOCKED_W_TMO
);
1328 #ifdef HAVE_PRIORITY_SCHEDULING
1329 if (current
->blocker
!= NULL
)
1331 /* Object supports PIP */
1332 current
= blocker_inherit_priority(current
);
1336 UNLOCK_THREAD(current
);
1339 /*---------------------------------------------------------------------------
1340 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1341 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1343 * This code should be considered a critical section by the caller meaning
1344 * that the object's corelock should be held.
1346 * INTERNAL: Intended for use by kernel objects and not for programs.
1347 *---------------------------------------------------------------------------
1349 unsigned int wakeup_thread(struct thread_entry
**list
)
1351 struct thread_entry
*thread
= *list
;
1352 unsigned int result
= THREAD_NONE
;
1354 /* Check if there is a blocked thread at all. */
1358 LOCK_THREAD(thread
);
1360 /* Determine thread's current state. */
1361 switch (thread
->state
)
1364 case STATE_BLOCKED_W_TMO
:
1365 remove_from_list_l(list
, thread
);
1369 #ifdef HAVE_PRIORITY_SCHEDULING
1370 struct thread_entry
*current
;
1371 struct blocker
*bl
= thread
->blocker
;
1375 /* No inheritance - just boost the thread by aging */
1376 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1377 thread
->skip_count
= thread
->priority
;
1378 current
= cores
[CURRENT_CORE
].running
;
1382 /* Call the specified unblocking PIP */
1383 current
= bl
->wakeup_protocol(thread
);
1386 if (current
!= NULL
&&
1387 find_first_set_bit(cores
[IF_COP_CORE(current
->core
)].rtr
.mask
)
1388 < current
->priority
)
1390 /* There is a thread ready to run of higher or same priority on
1391 * the same core as the current one; recommend a task switch.
1392 * Knowing if this is an interrupt call would be helpful here. */
1393 result
|= THREAD_SWITCH
;
1395 #endif /* HAVE_PRIORITY_SCHEDULING */
1397 core_schedule_wakeup(thread
);
1400 /* Nothing to do. State is not blocked. */
1401 #if THREAD_EXTRA_CHECKS
1403 THREAD_PANICF("wakeup_thread->block invalid", thread
);
1410 UNLOCK_THREAD(thread
);
1414 /*---------------------------------------------------------------------------
1415 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1416 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1417 * the queue must be locked first.
1419 * INTERNAL: Intended for use by kernel objects and not for programs.
1420 *---------------------------------------------------------------------------
1422 unsigned int thread_queue_wake(struct thread_entry
**list
)
1424 unsigned result
= THREAD_NONE
;
1428 unsigned int rc
= wakeup_thread(list
);
1430 if (rc
== THREAD_NONE
)
1431 break; /* No more threads */
1439 /*---------------------------------------------------------------------------
1440 * Assign the thread slot a new ID. Version is 1-255.
1441 *---------------------------------------------------------------------------
1443 static void new_thread_id(unsigned int slot_num
,
1444 struct thread_entry
*thread
)
1446 unsigned int version
=
1447 (thread
->id
+ (1u << THREAD_ID_VERSION_SHIFT
))
1448 & THREAD_ID_VERSION_MASK
;
1450 /* If wrapped to 0, make it 1 */
1452 version
= 1u << THREAD_ID_VERSION_SHIFT
;
1454 thread
->id
= version
| (slot_num
& THREAD_ID_SLOT_MASK
);
1457 /*---------------------------------------------------------------------------
1458 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1459 * will be locked on multicore.
1460 *---------------------------------------------------------------------------
1462 static struct thread_entry
* find_empty_thread_slot(void)
1464 /* Any slot could be on an interrupt-accessible list */
1465 IF_COP( int oldlevel
= disable_irq_save(); )
1466 struct thread_entry
*thread
= NULL
;
1469 for (n
= 0; n
< MAXTHREADS
; n
++)
1471 /* Obtain current slot state - lock it on multicore */
1472 struct thread_entry
*t
= &threads
[n
];
1475 if (t
->state
== STATE_KILLED
IF_COP( && t
->name
!= THREAD_DESTRUCT
))
1477 /* Slot is empty - leave it locked and caller will unlock */
1482 /* Finished examining slot - no longer busy - unlock on multicore */
1486 IF_COP( restore_irq(oldlevel
); ) /* Reenable interrups - this slot is
1487 not accesible to them yet */
1491 /*---------------------------------------------------------------------------
1492 * Return the thread_entry pointer for a thread_id. Return the current
1493 * thread if the ID is (unsigned int)-1 (alias for current).
1494 *---------------------------------------------------------------------------
1496 struct thread_entry
* thread_id_entry(unsigned int thread_id
)
1498 return &threads
[thread_id
& THREAD_ID_SLOT_MASK
];
1501 /*---------------------------------------------------------------------------
1502 * Return the thread id of the calling thread
1503 * --------------------------------------------------------------------------
1505 unsigned int thread_self(void)
1507 return cores
[CURRENT_CORE
].running
->id
;
1510 /*---------------------------------------------------------------------------
1511 * Return the thread entry of the calling thread.
1513 * INTERNAL: Intended for use by kernel and not for programs.
1514 *---------------------------------------------------------------------------
1516 struct thread_entry
* thread_self_entry(void)
1518 return cores
[CURRENT_CORE
].running
;
1521 /*---------------------------------------------------------------------------
1522 * Place the current core in idle mode - woken up on interrupt or wake
1523 * request from another core.
1524 *---------------------------------------------------------------------------
1526 void core_idle(void)
1528 IF_COP( const unsigned int core
= CURRENT_CORE
; )
1530 core_sleep(IF_COP(core
));
1533 /*---------------------------------------------------------------------------
1534 * Create a thread. If using a dual core architecture, specify which core to
1535 * start the thread on.
1537 * Return ID if context area could be allocated, else NULL.
1538 *---------------------------------------------------------------------------
1540 unsigned int create_thread(void (*function
)(void),
1541 void* stack
, size_t stack_size
,
1542 unsigned flags
, const char *name
1543 IF_PRIO(, int priority
)
1544 IF_COP(, unsigned int core
))
1547 unsigned int stack_words
;
1548 uintptr_t stackptr
, stackend
;
1549 struct thread_entry
*thread
;
1553 thread
= find_empty_thread_slot();
1559 oldlevel
= disable_irq_save();
1561 /* Munge the stack to make it easy to spot stack overflows */
1562 stackptr
= ALIGN_UP((uintptr_t)stack
, sizeof (uintptr_t));
1563 stackend
= ALIGN_DOWN((uintptr_t)stack
+ stack_size
, sizeof (uintptr_t));
1564 stack_size
= stackend
- stackptr
;
1565 stack_words
= stack_size
/ sizeof (uintptr_t);
1567 for (i
= 0; i
< stack_words
; i
++)
1569 ((uintptr_t *)stackptr
)[i
] = DEADBEEF
;
1572 /* Store interesting information */
1573 thread
->name
= name
;
1574 thread
->stack
= (uintptr_t *)stackptr
;
1575 thread
->stack_size
= stack_size
;
1576 thread
->queue
= NULL
;
1577 #ifdef HAVE_WAKEUP_EXT_CB
1578 thread
->wakeup_ext_cb
= NULL
;
1580 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1581 thread
->cpu_boost
= 0;
1583 #ifdef HAVE_PRIORITY_SCHEDULING
1584 memset(&thread
->pdist
, 0, sizeof(thread
->pdist
));
1585 thread
->blocker
= NULL
;
1586 thread
->base_priority
= priority
;
1587 thread
->priority
= priority
;
1588 thread
->skip_count
= priority
;
1589 prio_add_entry(&thread
->pdist
, priority
);
1592 #ifdef HAVE_IO_PRIORITY
1593 /* Default to high (foreground) priority */
1594 thread
->io_priority
= IO_PRIORITY_IMMEDIATE
;
1598 thread
->core
= core
;
1600 /* Writeback stack munging or anything else before starting */
1601 if (core
!= CURRENT_CORE
)
1603 commit_discard_idcache();
1607 /* Thread is not on any timeout list but be a bit paranoid */
1608 thread
->tmo
.prev
= NULL
;
1610 state
= (flags
& CREATE_THREAD_FROZEN
) ?
1611 STATE_FROZEN
: STATE_RUNNING
;
1613 thread
->context
.sp
= (typeof (thread
->context
.sp
))stackend
;
1615 /* Load the thread's context structure with needed startup information */
1616 THREAD_STARTUP_INIT(core
, thread
, function
);
1618 thread
->state
= state
;
1619 i
= thread
->id
; /* Snapshot while locked */
1621 if (state
== STATE_RUNNING
)
1622 core_schedule_wakeup(thread
);
1624 UNLOCK_THREAD(thread
);
1625 restore_irq(oldlevel
);
1630 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1631 /*---------------------------------------------------------------------------
1632 * Change the boost state of a thread boosting or unboosting the CPU
1634 *---------------------------------------------------------------------------
1636 static inline void boost_thread(struct thread_entry
*thread
, bool boost
)
1638 if ((thread
->cpu_boost
!= 0) != boost
)
1640 thread
->cpu_boost
= boost
;
1645 void trigger_cpu_boost(void)
1647 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1648 boost_thread(current
, true);
1651 void cancel_cpu_boost(void)
1653 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1654 boost_thread(current
, false);
1656 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
1658 /*---------------------------------------------------------------------------
1659 * Block the current thread until another thread terminates. A thread may
1660 * wait on itself to terminate which prevents it from running again and it
1661 * will need to be killed externally.
1662 * Parameter is the ID as returned from create_thread().
1663 *---------------------------------------------------------------------------
1665 void thread_wait(unsigned int thread_id
)
1667 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1668 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1670 /* Lock thread-as-waitable-object lock */
1671 corelock_lock(&thread
->waiter_cl
);
1673 /* Be sure it hasn't been killed yet */
1674 if (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
)
1676 IF_COP( current
->obj_cl
= &thread
->waiter_cl
; )
1677 current
->bqp
= &thread
->queue
;
1680 block_thread(current
);
1682 corelock_unlock(&thread
->waiter_cl
);
1688 corelock_unlock(&thread
->waiter_cl
);
1691 /*---------------------------------------------------------------------------
1692 * Exit the current thread. The Right Way to Do Things (TM).
1693 *---------------------------------------------------------------------------
1695 /* This is done to foil optimizations that may require the current stack,
1696 * such as optimizing subexpressions that put variables on the stack that
1697 * get used after switching stacks. */
1699 /* Called by ASM stub */
1700 static void thread_final_exit_do(struct thread_entry
*current
)
1702 /* No special procedure is required before calling */
1703 static inline void thread_final_exit(struct thread_entry
*current
)
1706 /* At this point, this thread isn't using resources allocated for
1707 * execution except the slot itself. */
1709 /* Signal this thread */
1710 thread_queue_wake(¤t
->queue
);
1711 corelock_unlock(¤t
->waiter_cl
);
1713 /* This should never and must never be reached - if it is, the
1714 * state is corrupted */
1715 THREAD_PANICF("thread_exit->K:*R", current
);
1719 void thread_exit(void)
1721 register struct thread_entry
* current
= cores
[CURRENT_CORE
].running
;
1723 /* Cancel CPU boost if any */
1728 corelock_lock(¤t
->waiter_cl
);
1729 LOCK_THREAD(current
);
1731 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1732 if (current
->name
== THREAD_DESTRUCT
)
1734 /* Thread being killed - become a waiter */
1735 unsigned int id
= current
->id
;
1736 UNLOCK_THREAD(current
);
1737 corelock_unlock(¤t
->waiter_cl
);
1739 THREAD_PANICF("thread_exit->WK:*R", current
);
1743 #ifdef HAVE_PRIORITY_SCHEDULING
1744 check_for_obj_waiters("thread_exit", current
);
1747 if (current
->tmo
.prev
!= NULL
)
1749 /* Cancel pending timeout list removal */
1750 remove_from_list_tmo(current
);
1753 /* Switch tasks and never return */
1754 block_thread_on_l(current
, STATE_KILLED
);
1756 /* Slot must be unusable until thread is really gone */
1757 UNLOCK_THREAD_AT_TASK_SWITCH(current
);
1759 /* Update ID for this slot */
1760 new_thread_id(current
->id
, current
);
1761 current
->name
= NULL
;
1763 /* Do final cleanup and remove the thread */
1764 thread_final_exit(current
);
1767 #ifdef ALLOW_REMOVE_THREAD
1768 /*---------------------------------------------------------------------------
1769 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1772 * Parameter is the ID as returned from create_thread().
1774 * Use with care on threads that are not under careful control as this may
1775 * leave various objects in an undefined state.
1776 *---------------------------------------------------------------------------
1778 void remove_thread(unsigned int thread_id
)
1780 #ifdef HAVE_CORELOCK_OBJECT
1781 /* core is not constant here because of core switching */
1782 unsigned int core
= CURRENT_CORE
;
1783 unsigned int old_core
= NUM_CORES
;
1784 struct corelock
*ocl
= NULL
;
1786 const unsigned int core
= CURRENT_CORE
;
1788 struct thread_entry
*current
= cores
[core
].running
;
1789 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1794 if (thread
== current
)
1795 thread_exit(); /* Current thread - do normal exit */
1797 oldlevel
= disable_irq_save();
1799 corelock_lock(&thread
->waiter_cl
);
1800 LOCK_THREAD(thread
);
1802 state
= thread
->state
;
1804 if (thread
->id
!= thread_id
|| state
== STATE_KILLED
)
1808 if (thread
->name
== THREAD_DESTRUCT
)
1810 /* Thread being killed - become a waiter */
1811 UNLOCK_THREAD(thread
);
1812 corelock_unlock(&thread
->waiter_cl
);
1813 restore_irq(oldlevel
);
1814 thread_wait(thread_id
);
1818 thread
->name
= THREAD_DESTRUCT
; /* Slot can't be used for now */
1820 #ifdef HAVE_PRIORITY_SCHEDULING
1821 check_for_obj_waiters("remove_thread", thread
);
1824 if (thread
->core
!= core
)
1826 /* Switch cores and safely extract the thread there */
1827 /* Slot HAS to be unlocked or a deadlock could occur which means other
1828 * threads have to be guided into becoming thread waiters if they
1829 * attempt to remove it. */
1830 unsigned int new_core
= thread
->core
;
1832 corelock_unlock(&thread
->waiter_cl
);
1834 UNLOCK_THREAD(thread
);
1835 restore_irq(oldlevel
);
1837 old_core
= switch_core(new_core
);
1839 oldlevel
= disable_irq_save();
1841 corelock_lock(&thread
->waiter_cl
);
1842 LOCK_THREAD(thread
);
1844 state
= thread
->state
;
1846 /* Perform the extraction and switch ourselves back to the original
1849 #endif /* NUM_CORES > 1 */
1851 if (thread
->tmo
.prev
!= NULL
)
1853 /* Clean thread off the timeout list if a timeout check hasn't
1855 remove_from_list_tmo(thread
);
1858 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1859 /* Cancel CPU boost if any */
1860 boost_thread(thread
, false);
1863 IF_COP( retry_state
: )
1869 /* Remove thread from ready to run tasks */
1870 remove_from_list_l(&cores
[core
].running
, thread
);
1871 rtr_subtract_entry(core
, thread
->priority
);
1875 case STATE_BLOCKED_W_TMO
:
1876 /* Remove thread from the queue it's blocked on - including its
1877 * own if waiting there */
1879 if (&thread
->waiter_cl
!= thread
->obj_cl
)
1881 ocl
= thread
->obj_cl
;
1883 if (UNLIKELY(corelock_try_lock(ocl
) == 0))
1885 UNLOCK_THREAD(thread
);
1887 LOCK_THREAD(thread
);
1889 if (UNLIKELY(thread
->state
!= state
))
1891 /* Something woke the thread */
1892 state
= thread
->state
;
1893 corelock_unlock(ocl
);
1899 remove_from_list_l(thread
->bqp
, thread
);
1901 #ifdef HAVE_WAKEUP_EXT_CB
1902 if (thread
->wakeup_ext_cb
!= NULL
)
1903 thread
->wakeup_ext_cb(thread
);
1906 #ifdef HAVE_PRIORITY_SCHEDULING
1907 if (thread
->blocker
!= NULL
)
1909 /* Remove thread's priority influence from its chain */
1910 wakeup_priority_protocol_release(thread
);
1916 corelock_unlock(ocl
);
1919 /* Otherwise thread is frozen and hasn't run yet */
1922 new_thread_id(thread_id
, thread
);
1923 thread
->state
= STATE_KILLED
;
1925 /* If thread was waiting on itself, it will have been removed above.
1926 * The wrong order would result in waking the thread first and deadlocking
1927 * since the slot is already locked. */
1928 thread_queue_wake(&thread
->queue
);
1930 thread
->name
= NULL
;
1932 thread_killed
: /* Thread was already killed */
1933 /* Removal complete - safe to unlock and reenable interrupts */
1934 corelock_unlock(&thread
->waiter_cl
);
1935 UNLOCK_THREAD(thread
);
1936 restore_irq(oldlevel
);
1939 if (old_core
< NUM_CORES
)
1941 /* Did a removal on another processor's thread - switch back to
1943 switch_core(old_core
);
1947 #endif /* ALLOW_REMOVE_THREAD */
1949 #ifdef HAVE_PRIORITY_SCHEDULING
1950 /*---------------------------------------------------------------------------
1951 * Sets the thread's relative base priority for the core it runs on. Any
1952 * needed inheritance changes also may happen.
1953 *---------------------------------------------------------------------------
1955 int thread_set_priority(unsigned int thread_id
, int priority
)
1957 int old_base_priority
= -1;
1958 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1960 /* A little safety measure */
1961 if (priority
< HIGHEST_PRIORITY
|| priority
> LOWEST_PRIORITY
)
1964 /* Thread could be on any list and therefore on an interrupt accessible
1965 one - disable interrupts */
1966 int oldlevel
= disable_irq_save();
1968 LOCK_THREAD(thread
);
1970 /* Make sure it's not killed */
1971 if (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
)
1973 int old_priority
= thread
->priority
;
1975 old_base_priority
= thread
->base_priority
;
1976 thread
->base_priority
= priority
;
1978 prio_move_entry(&thread
->pdist
, old_base_priority
, priority
);
1979 priority
= find_first_set_bit(thread
->pdist
.mask
);
1981 if (old_priority
== priority
)
1983 /* No priority change - do nothing */
1985 else if (thread
->state
== STATE_RUNNING
)
1987 /* This thread is running - change location on the run
1988 * queue. No transitive inheritance needed. */
1989 set_running_thread_priority(thread
, priority
);
1993 thread
->priority
= priority
;
1995 if (thread
->blocker
!= NULL
)
1997 /* Bubble new priority down the chain */
1998 struct blocker
*bl
= thread
->blocker
; /* Blocker struct */
1999 struct thread_entry
*bl_t
= bl
->thread
; /* Blocking thread */
2000 struct thread_entry
* const tstart
= thread
; /* Initial thread */
2001 const int highest
= MIN(priority
, old_priority
); /* Higher of new or old */
2005 struct thread_entry
*next
; /* Next thread to check */
2006 int bl_pr
; /* Highest blocked thread */
2007 int queue_pr
; /* New highest blocked thread */
2009 /* Owner can change but thread cannot be dislodged - thread
2010 * may not be the first in the queue which allows other
2011 * threads ahead in the list to be given ownership during the
2012 * operation. If thread is next then the waker will have to
2013 * wait for us and the owner of the object will remain fixed.
2014 * If we successfully grab the owner -- which at some point
2015 * is guaranteed -- then the queue remains fixed until we
2021 /* Double-check the owner - retry if it changed */
2022 if (LIKELY(bl
->thread
== bl_t
))
2025 UNLOCK_THREAD(bl_t
);
2029 bl_pr
= bl
->priority
;
2031 if (highest
> bl_pr
)
2032 break; /* Object priority won't change */
2034 /* This will include the thread being set */
2035 queue_pr
= find_highest_priority_in_list_l(*thread
->bqp
);
2037 if (queue_pr
== bl_pr
)
2038 break; /* Object priority not changing */
2040 /* Update thread boost for this object */
2041 bl
->priority
= queue_pr
;
2042 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
2043 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
2045 if (bl_t
->priority
== bl_pr
)
2046 break; /* Blocking thread priority not changing */
2048 if (bl_t
->state
== STATE_RUNNING
)
2050 /* Thread not blocked - we're done */
2051 set_running_thread_priority(bl_t
, bl_pr
);
2055 bl_t
->priority
= bl_pr
;
2056 bl
= bl_t
->blocker
; /* Blocking thread has a blocker? */
2059 break; /* End of chain */
2063 if (UNLIKELY(next
== tstart
))
2064 break; /* Full-circle */
2066 UNLOCK_THREAD(thread
);
2072 UNLOCK_THREAD(bl_t
);
2077 UNLOCK_THREAD(thread
);
2079 restore_irq(oldlevel
);
2081 return old_base_priority
;
2084 /*---------------------------------------------------------------------------
2085 * Returns the current base priority for a thread.
2086 *---------------------------------------------------------------------------
2088 int thread_get_priority(unsigned int thread_id
)
2090 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2091 int base_priority
= thread
->base_priority
;
2093 /* Simply check without locking slot. It may or may not be valid by the
2094 * time the function returns anyway. If all tests pass, it is the
2095 * correct value for when it was valid. */
2096 if (thread
->id
!= thread_id
|| thread
->state
== STATE_KILLED
)
2099 return base_priority
;
2101 #endif /* HAVE_PRIORITY_SCHEDULING */
2103 #ifdef HAVE_IO_PRIORITY
2104 int thread_get_io_priority(unsigned int thread_id
)
2106 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2107 return thread
->io_priority
;
2110 void thread_set_io_priority(unsigned int thread_id
,int io_priority
)
2112 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2113 thread
->io_priority
= io_priority
;
2117 /*---------------------------------------------------------------------------
2118 * Starts a frozen thread - similar semantics to wakeup_thread except that
2119 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2120 * virtue of the slot having a state of STATE_FROZEN.
2121 *---------------------------------------------------------------------------
2123 void thread_thaw(unsigned int thread_id
)
2125 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2126 int oldlevel
= disable_irq_save();
2128 LOCK_THREAD(thread
);
2130 /* If thread is the current one, it cannot be frozen, therefore
2131 * there is no need to check that. */
2132 if (thread
->id
== thread_id
&& thread
->state
== STATE_FROZEN
)
2133 core_schedule_wakeup(thread
);
2135 UNLOCK_THREAD(thread
);
2136 restore_irq(oldlevel
);
2140 /*---------------------------------------------------------------------------
2141 * Switch the processor that the currently executing thread runs on.
2142 *---------------------------------------------------------------------------
2144 unsigned int switch_core(unsigned int new_core
)
2146 const unsigned int core
= CURRENT_CORE
;
2147 struct thread_entry
*current
= cores
[core
].running
;
2149 if (core
== new_core
)
2151 /* No change - just return same core */
2155 int oldlevel
= disable_irq_save();
2156 LOCK_THREAD(current
);
2158 if (current
->name
== THREAD_DESTRUCT
)
2160 /* Thread being killed - deactivate and let process complete */
2161 unsigned int id
= current
->id
;
2162 UNLOCK_THREAD(current
);
2163 restore_irq(oldlevel
);
2165 /* Should never be reached */
2166 THREAD_PANICF("switch_core->D:*R", current
);
2169 /* Get us off the running list for the current core */
2171 remove_from_list_l(&cores
[core
].running
, current
);
2172 rtr_subtract_entry(core
, current
->priority
);
2175 /* Stash return value (old core) in a safe place */
2176 current
->retval
= core
;
2178 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2179 * the other core will likely attempt a removal from the wrong list! */
2180 if (current
->tmo
.prev
!= NULL
)
2182 remove_from_list_tmo(current
);
2185 /* Change the core number for this thread slot */
2186 current
->core
= new_core
;
2188 /* Do not use core_schedule_wakeup here since this will result in
2189 * the thread starting to run on the other core before being finished on
2190 * this one. Delay the list unlock to keep the other core stuck
2191 * until this thread is ready. */
2194 rtr_add_entry(new_core
, current
->priority
);
2195 add_to_list_l(&cores
[new_core
].running
, current
);
2197 /* Make a callback into device-specific code, unlock the wakeup list so
2198 * that execution may resume on the new core, unlock our slot and finally
2199 * restore the interrupt level */
2200 cores
[core
].blk_ops
.flags
= TBOP_SWITCH_CORE
;
2201 cores
[core
].blk_ops
.cl_p
= &cores
[new_core
].rtr_cl
;
2202 cores
[core
].block_task
= current
;
2204 UNLOCK_THREAD(current
);
2206 /* Alert other core to activity */
2207 core_wake(new_core
);
2209 /* Do the stack switching, cache_maintenence and switch_thread call -
2210 requires native code */
2211 switch_thread_core(core
, current
);
2213 /* Finally return the old core to caller */
2214 return current
->retval
;
2216 #endif /* NUM_CORES > 1 */
2218 /*---------------------------------------------------------------------------
2219 * Initialize threading API. This assumes interrupts are not yet enabled. On
2220 * multicore setups, no core is allowed to proceed until create_thread calls
2221 * are safe to perform.
2222 *---------------------------------------------------------------------------
2224 void init_threads(void)
2226 const unsigned int core
= CURRENT_CORE
;
2227 struct thread_entry
*thread
;
2231 /* Initialize core locks and IDs in all slots */
2233 for (n
= 0; n
< MAXTHREADS
; n
++)
2235 thread
= &threads
[n
];
2236 corelock_init(&thread
->waiter_cl
);
2237 corelock_init(&thread
->slot_cl
);
2238 thread
->id
= THREAD_ID_INIT(n
);
2242 /* CPU will initialize first and then sleep */
2243 thread
= find_empty_thread_slot();
2247 /* WTF? There really must be a slot available at this stage.
2248 * This can fail if, for example, .bss isn't zero'ed out by the loader
2249 * or threads is in the wrong section. */
2250 THREAD_PANICF("init_threads->no slot", NULL
);
2253 /* Initialize initially non-zero members of core */
2254 cores
[core
].next_tmo_check
= current_tick
; /* Something not in the past */
2256 /* Initialize initially non-zero members of slot */
2257 UNLOCK_THREAD(thread
); /* No sync worries yet */
2258 thread
->name
= main_thread_name
;
2259 thread
->state
= STATE_RUNNING
;
2260 IF_COP( thread
->core
= core
; )
2261 #ifdef HAVE_PRIORITY_SCHEDULING
2262 corelock_init(&cores
[core
].rtr_cl
);
2263 thread
->base_priority
= PRIORITY_USER_INTERFACE
;
2264 prio_add_entry(&thread
->pdist
, PRIORITY_USER_INTERFACE
);
2265 thread
->priority
= PRIORITY_USER_INTERFACE
;
2266 rtr_add_entry(core
, PRIORITY_USER_INTERFACE
);
2269 add_to_list_l(&cores
[core
].running
, thread
);
2273 thread
->stack
= stackbegin
;
2274 thread
->stack_size
= (uintptr_t)stackend
- (uintptr_t)stackbegin
;
2275 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2276 /* Wait for other processors to finish their inits since create_thread
2277 * isn't safe to call until the kernel inits are done. The first
2278 * threads created in the system must of course be created by CPU.
2279 * Another possible approach is to initialize all cores and slots
2280 * for each core by CPU, let the remainder proceed in parallel and
2281 * signal CPU when all are finished. */
2282 core_thread_init(CPU
);
2286 /* Initial stack is the idle stack */
2287 thread
->stack
= idle_stacks
[core
];
2288 thread
->stack_size
= IDLE_STACK_SIZE
;
2289 /* After last processor completes, it should signal all others to
2290 * proceed or may signal the next and call thread_exit(). The last one
2291 * to finish will signal CPU. */
2292 core_thread_init(core
);
2293 /* Other cores do not have a main thread - go idle inside switch_thread
2294 * until a thread can run on the core. */
2296 #endif /* NUM_CORES */
2298 #ifdef INIT_MAIN_THREAD
2299 init_main_thread(&thread
->context
);
2303 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2305 static inline int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2307 static int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2310 unsigned int stack_words
= stack_size
/ sizeof (uintptr_t);
2314 for (i
= 0; i
< stack_words
; i
++)
2316 if (stackptr
[i
] != DEADBEEF
)
2318 usage
= ((stack_words
- i
) * 100) / stack_words
;
2326 /*---------------------------------------------------------------------------
2327 * Returns the maximum percentage of stack a thread ever used while running.
2328 * NOTE: Some large buffer allocations that don't use enough the buffer to
2329 * overwrite stackptr[0] will not be seen.
2330 *---------------------------------------------------------------------------
2332 int thread_stack_usage(const struct thread_entry
*thread
)
2334 if (LIKELY(thread
->stack_size
> 0))
2335 return stack_usage(thread
->stack
, thread
->stack_size
);
2340 /*---------------------------------------------------------------------------
2341 * Returns the maximum percentage of the core's idle stack ever used during
2343 *---------------------------------------------------------------------------
2345 int idle_stack_usage(unsigned int core
)
2347 return stack_usage(idle_stacks
[core
], IDLE_STACK_SIZE
);
2351 /*---------------------------------------------------------------------------
2352 * Fills in the buffer with the specified thread's name. If the name is NULL,
2353 * empty, or the thread is in destruct state a formatted ID is written
2355 *---------------------------------------------------------------------------
2357 void thread_get_name(char *buffer
, int size
,
2358 struct thread_entry
*thread
)
2367 /* Display thread name if one or ID if none */
2368 const char *name
= thread
->name
;
2369 const char *fmt
= "%s";
2370 if (name
== NULL
IF_COP(|| name
== THREAD_DESTRUCT
) || *name
== '\0')
2372 name
= (const char *)(uintptr_t)thread
->id
;
2375 snprintf(buffer
, size
, fmt
, name
);