1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
33 /****************************************************************************
35 * See notes below on implementing processor-specific portions! *
36 ***************************************************************************/
38 /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
40 #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
42 #define THREAD_EXTRA_CHECKS 0
46 * General locking order to guarantee progress. Order must be observed but
47 * all stages are not nescessarily obligatory. Going from 1) to 3) is
51 * This is first because of the likelyhood of having an interrupt occur that
52 * also accesses one of the objects farther down the list. Any non-blocking
53 * synchronization done may already have a lock on something during normal
54 * execution and if an interrupt handler running on the same processor as
55 * the one that has the resource locked were to attempt to access the
56 * resource, the interrupt handler would wait forever waiting for an unlock
57 * that will never happen. There is no danger if the interrupt occurs on
58 * a different processor because the one that has the lock will eventually
59 * unlock and the other processor's handler may proceed at that time. Not
60 * nescessary when the resource in question is definitely not available to
64 * 1) May be needed beforehand if the kernel object allows dual-use such as
65 * event queues. The kernel object must have a scheme to protect itself from
66 * access by another processor and is responsible for serializing the calls
67 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
68 * other. Objects' queues are also protected here.
71 * This locks access to the thread's slot such that its state cannot be
72 * altered by another processor when a state change is in progress such as
73 * when it is in the process of going on a blocked list. An attempt to wake
74 * a thread while it is still blocking will likely desync its state with
75 * the other resources used for that state.
78 * These lists are specific to a particular processor core and are accessible
79 * by all processor cores and interrupt handlers. The running (rtr) list is
80 * the prime example where a thread may be added by any means.
83 /*---------------------------------------------------------------------------
84 * Processor specific: core_sleep/core_wake/misc. notes
87 * FIQ is not dealt with by the scheduler code and is simply restored if it
88 * must by masked for some reason - because threading modifies a register
89 * that FIQ may also modify and there's no way to accomplish it atomically.
90 * s3c2440 is such a case.
92 * Audio interrupts are generally treated at a higher priority than others
93 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
94 * are not in general safe. Special cases may be constructed on a per-
95 * source basis and blocking operations are not available.
97 * core_sleep procedure to implement for any CPU to ensure an asychronous
98 * wakup never results in requiring a wait until the next tick (up to
99 * 10000uS!). May require assembly and careful instruction ordering.
101 * 1) On multicore, stay awake if directed to do so by another. If so, goto
103 * 2) If processor requires, atomically reenable interrupts and perform step
105 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
106 * on Coldfire) goto step 5.
107 * 4) Enable interrupts.
110 * core_wake and multprocessor notes for sleep/wake coordination:
111 * If possible, to wake up another processor, the forcing of an interrupt on
112 * the woken core by the waker core is the easiest way to ensure a non-
113 * delayed wake and immediate execution of any woken threads. If that isn't
114 * available then some careful non-blocking synchonization is needed (as on
115 * PP targets at the moment).
116 *---------------------------------------------------------------------------
119 /* Cast to the the machine pointer size, whose size could be < 4 or > 32
121 #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
122 static struct core_entry cores
[NUM_CORES
] IBSS_ATTR
;
123 struct thread_entry threads
[MAXTHREADS
] IBSS_ATTR
;
125 static const char main_thread_name
[] = "main";
126 extern uintptr_t stackbegin
[];
127 extern uintptr_t stackend
[];
129 static inline void core_sleep(IF_COP_VOID(unsigned int core
))
130 __attribute__((always_inline
));
132 void check_tmo_threads(void)
133 __attribute__((noinline
));
135 static inline void block_thread_on_l(struct thread_entry
*thread
, unsigned state
)
136 __attribute__((always_inline
));
138 static void add_to_list_tmo(struct thread_entry
*thread
)
139 __attribute__((noinline
));
141 static void core_schedule_wakeup(struct thread_entry
*thread
)
142 __attribute__((noinline
));
145 static inline void run_blocking_ops(
146 unsigned int core
, struct thread_entry
*thread
)
147 __attribute__((always_inline
));
150 static void thread_stkov(struct thread_entry
*thread
)
151 __attribute__((noinline
));
153 static inline void store_context(void* addr
)
154 __attribute__((always_inline
));
156 static inline void load_context(const void* addr
)
157 __attribute__((always_inline
));
159 void switch_thread(void)
160 __attribute__((noinline
));
162 /****************************************************************************
163 * Processor-specific section - include necessary core support
166 #include "thread-arm.c"
168 #include "thread-pp.c"
170 #elif defined(CPU_COLDFIRE)
171 #include "thread-coldfire.c"
172 #elif CONFIG_CPU == SH7034
173 #include "thread-sh.c"
174 #elif defined(CPU_MIPS) && CPU_MIPS == 32
175 #include "thread-mips32.c"
177 /* Wouldn't compile anyway */
178 #error Processor not implemented.
179 #endif /* CONFIG_CPU == */
181 #ifndef IF_NO_SKIP_YIELD
182 #define IF_NO_SKIP_YIELD(...)
186 * End Processor-specific section
187 ***************************************************************************/
189 #if THREAD_EXTRA_CHECKS
190 static void thread_panicf(const char *msg
, struct thread_entry
*thread
)
192 IF_COP( const unsigned int core
= thread
->core
; )
193 static char name
[32];
194 thread_get_name(name
, 32, thread
);
195 panicf ("%s %s" IF_COP(" (%d)"), msg
, name
IF_COP(, core
));
197 static void thread_stkov(struct thread_entry
*thread
)
199 thread_panicf("Stkov", thread
);
201 #define THREAD_PANICF(msg, thread) \
202 thread_panicf(msg, thread)
203 #define THREAD_ASSERT(exp, msg, thread) \
204 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
206 static void thread_stkov(struct thread_entry
*thread
)
208 IF_COP( const unsigned int core
= thread
->core
; )
209 static char name
[32];
210 thread_get_name(name
, 32, thread
);
211 panicf("Stkov %s" IF_COP(" (%d)"), name
IF_COP(, core
));
213 #define THREAD_PANICF(msg, thread)
214 #define THREAD_ASSERT(exp, msg, thread)
215 #endif /* THREAD_EXTRA_CHECKS */
219 #define LOCK_THREAD(thread) \
220 ({ corelock_lock(&(thread)->slot_cl); })
221 #define TRY_LOCK_THREAD(thread) \
222 ({ corelock_try_lock(&thread->slot_cl); })
223 #define UNLOCK_THREAD(thread) \
224 ({ corelock_unlock(&(thread)->slot_cl); })
225 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
226 ({ unsigned int _core = (thread)->core; \
227 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
228 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
230 #define LOCK_THREAD(thread) \
232 #define TRY_LOCK_THREAD(thread) \
234 #define UNLOCK_THREAD(thread) \
236 #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
241 #define RTR_LOCK(core) \
242 ({ corelock_lock(&cores[core].rtr_cl); })
243 #define RTR_UNLOCK(core) \
244 ({ corelock_unlock(&cores[core].rtr_cl); })
246 #ifdef HAVE_PRIORITY_SCHEDULING
247 #define rtr_add_entry(core, priority) \
248 prio_add_entry(&cores[core].rtr, (priority))
250 #define rtr_subtract_entry(core, priority) \
251 prio_subtract_entry(&cores[core].rtr, (priority))
253 #define rtr_move_entry(core, from, to) \
254 prio_move_entry(&cores[core].rtr, (from), (to))
256 #define rtr_add_entry(core, priority)
257 #define rtr_add_entry_inl(core, priority)
258 #define rtr_subtract_entry(core, priority)
259 #define rtr_subtract_entry_inl(core, priotity)
260 #define rtr_move_entry(core, from, to)
261 #define rtr_move_entry_inl(core, from, to)
264 /*---------------------------------------------------------------------------
265 * Thread list structure - circular:
266 * +------------------------------+
268 * +--+---+<-+---+<-+---+<-+---+<-+
269 * Head->| T | | T | | T | | T |
270 * +->+---+->+---+->+---+->+---+--+
272 * +------------------------------+
273 *---------------------------------------------------------------------------
276 /*---------------------------------------------------------------------------
277 * Adds a thread to a list of threads using "insert last". Uses the "l"
279 *---------------------------------------------------------------------------
281 static void add_to_list_l(struct thread_entry
**list
,
282 struct thread_entry
*thread
)
284 struct thread_entry
*l
= *list
;
288 /* Insert into unoccupied list */
289 thread
->l
.prev
= thread
;
290 thread
->l
.next
= thread
;
296 thread
->l
.prev
= l
->l
.prev
;
298 l
->l
.prev
->l
.next
= thread
;
302 /*---------------------------------------------------------------------------
303 * Removes a thread from a list of threads. Uses the "l" links.
304 *---------------------------------------------------------------------------
306 static void remove_from_list_l(struct thread_entry
**list
,
307 struct thread_entry
*thread
)
309 struct thread_entry
*prev
, *next
;
311 next
= thread
->l
.next
;
322 /* List becomes next item */
326 prev
= thread
->l
.prev
;
328 /* Fix links to jump over the removed entry. */
333 /*---------------------------------------------------------------------------
334 * Timeout list structure - circular reverse (to make "remove item" O(1)),
335 * NULL-terminated forward (to ease the far more common forward traversal):
336 * +------------------------------+
338 * +--+---+<-+---+<-+---+<-+---+<-+
339 * Head->| T | | T | | T | | T |
340 * +---+->+---+->+---+->+---+-X
341 *---------------------------------------------------------------------------
344 /*---------------------------------------------------------------------------
345 * Add a thread from the core's timout list by linking the pointers in its
347 *---------------------------------------------------------------------------
349 static void add_to_list_tmo(struct thread_entry
*thread
)
351 struct thread_entry
*tmo
= cores
[IF_COP_CORE(thread
->core
)].timeout
;
352 THREAD_ASSERT(thread
->tmo
.prev
== NULL
,
353 "add_to_list_tmo->already listed", thread
);
355 thread
->tmo
.next
= NULL
;
359 /* Insert into unoccupied list */
360 thread
->tmo
.prev
= thread
;
361 cores
[IF_COP_CORE(thread
->core
)].timeout
= thread
;
366 thread
->tmo
.prev
= tmo
->tmo
.prev
;
367 tmo
->tmo
.prev
->tmo
.next
= thread
;
368 tmo
->tmo
.prev
= thread
;
371 /*---------------------------------------------------------------------------
372 * Remove a thread from the core's timout list by unlinking the pointers in
373 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
375 *---------------------------------------------------------------------------
377 static void remove_from_list_tmo(struct thread_entry
*thread
)
379 struct thread_entry
**list
= &cores
[IF_COP_CORE(thread
->core
)].timeout
;
380 struct thread_entry
*prev
= thread
->tmo
.prev
;
381 struct thread_entry
*next
= thread
->tmo
.next
;
383 THREAD_ASSERT(prev
!= NULL
, "remove_from_list_tmo->not listed", thread
);
386 next
->tmo
.prev
= prev
;
390 /* List becomes next item and empty if next == NULL */
392 /* Mark as unlisted */
393 thread
->tmo
.prev
= NULL
;
398 (*list
)->tmo
.prev
= prev
;
399 prev
->tmo
.next
= next
;
400 /* Mark as unlisted */
401 thread
->tmo
.prev
= NULL
;
406 #ifdef HAVE_PRIORITY_SCHEDULING
407 /*---------------------------------------------------------------------------
408 * Priority distribution structure (one category for each possible priority):
410 * +----+----+----+ ... +-----+
411 * hist: | F0 | F1 | F2 | | F31 |
412 * +----+----+----+ ... +-----+
413 * mask: | b0 | b1 | b2 | | b31 |
414 * +----+----+----+ ... +-----+
416 * F = count of threads at priority category n (frequency)
417 * b = bitmask of non-zero priority categories (occupancy)
423 *---------------------------------------------------------------------------
424 * Basic priority inheritance priotocol (PIP):
426 * Mn = mutex n, Tn = thread n
428 * A lower priority thread inherits the priority of the highest priority
429 * thread blocked waiting for it to complete an action (such as release a
430 * mutex or respond to a message via queue_send):
434 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
435 * priority than T1 then T1 inherits the priority of T2.
441 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
442 * T1 inherits the higher of T2 and T3.
444 * 3) T3->M2->T2->M1->T1
446 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
447 * then T1 inherits the priority of T3 through T2.
449 * Blocking chains can grow arbitrarily complex (though it's best that they
450 * not form at all very often :) and build-up from these units.
451 *---------------------------------------------------------------------------
454 /*---------------------------------------------------------------------------
455 * Increment frequency at category "priority"
456 *---------------------------------------------------------------------------
458 static inline unsigned int prio_add_entry(
459 struct priority_distribution
*pd
, int priority
)
462 /* Enough size/instruction count difference for ARM makes it worth it to
463 * use different code (192 bytes for ARM). Only thing better is ASM. */
465 count
= pd
->hist
[priority
];
467 pd
->mask
|= 1 << priority
;
468 pd
->hist
[priority
] = count
;
469 #else /* This one's better for Coldfire */
470 if ((count
= ++pd
->hist
[priority
]) == 1)
471 pd
->mask
|= 1 << priority
;
477 /*---------------------------------------------------------------------------
478 * Decrement frequency at category "priority"
479 *---------------------------------------------------------------------------
481 static inline unsigned int prio_subtract_entry(
482 struct priority_distribution
*pd
, int priority
)
487 count
= pd
->hist
[priority
];
489 pd
->mask
&= ~(1 << priority
);
490 pd
->hist
[priority
] = count
;
492 if ((count
= --pd
->hist
[priority
]) == 0)
493 pd
->mask
&= ~(1 << priority
);
499 /*---------------------------------------------------------------------------
500 * Remove from one category and add to another
501 *---------------------------------------------------------------------------
503 static inline void prio_move_entry(
504 struct priority_distribution
*pd
, int from
, int to
)
506 uint32_t mask
= pd
->mask
;
511 count
= pd
->hist
[from
];
513 mask
&= ~(1 << from
);
514 pd
->hist
[from
] = count
;
516 count
= pd
->hist
[to
];
519 pd
->hist
[to
] = count
;
521 if (--pd
->hist
[from
] == 0)
522 mask
&= ~(1 << from
);
524 if (++pd
->hist
[to
] == 1)
531 /*---------------------------------------------------------------------------
532 * Change the priority and rtr entry for a running thread
533 *---------------------------------------------------------------------------
535 static inline void set_running_thread_priority(
536 struct thread_entry
*thread
, int priority
)
538 const unsigned int core
= IF_COP_CORE(thread
->core
);
540 rtr_move_entry(core
, thread
->priority
, priority
);
541 thread
->priority
= priority
;
545 /*---------------------------------------------------------------------------
546 * Finds the highest priority thread in a list of threads. If the list is
547 * empty, the PRIORITY_IDLE is returned.
549 * It is possible to use the struct priority_distribution within an object
550 * instead of scanning the remaining threads in the list but as a compromise,
551 * the resulting per-object memory overhead is saved at a slight speed
552 * penalty under high contention.
553 *---------------------------------------------------------------------------
555 static int find_highest_priority_in_list_l(
556 struct thread_entry
* const thread
)
558 if (LIKELY(thread
!= NULL
))
560 /* Go though list until the ending up at the initial thread */
561 int highest_priority
= thread
->priority
;
562 struct thread_entry
*curr
= thread
;
566 int priority
= curr
->priority
;
568 if (priority
< highest_priority
)
569 highest_priority
= priority
;
573 while (curr
!= thread
);
575 return highest_priority
;
578 return PRIORITY_IDLE
;
581 /*---------------------------------------------------------------------------
582 * Register priority with blocking system and bubble it down the chain if
583 * any until we reach the end or something is already equal or higher.
585 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
586 * targets but that same action also guarantees a circular block anyway and
587 * those are prevented, right? :-)
588 *---------------------------------------------------------------------------
590 static struct thread_entry
*
591 blocker_inherit_priority(struct thread_entry
*current
)
593 const int priority
= current
->priority
;
594 struct blocker
*bl
= current
->blocker
;
595 struct thread_entry
* const tstart
= current
;
596 struct thread_entry
*bl_t
= bl
->thread
;
598 /* Blocker cannot change since the object protection is held */
603 struct thread_entry
*next
;
604 int bl_pr
= bl
->priority
;
606 if (priority
>= bl_pr
)
607 break; /* Object priority already high enough */
609 bl
->priority
= priority
;
612 prio_add_entry(&bl_t
->pdist
, priority
);
614 if (bl_pr
< PRIORITY_IDLE
)
616 /* Not first waiter - subtract old one */
617 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
620 if (priority
>= bl_t
->priority
)
621 break; /* Thread priority high enough */
623 if (bl_t
->state
== STATE_RUNNING
)
625 /* Blocking thread is a running thread therefore there are no
626 * further blockers. Change the "run queue" on which it
628 set_running_thread_priority(bl_t
, priority
);
632 bl_t
->priority
= priority
;
634 /* If blocking thread has a blocker, apply transitive inheritance */
638 break; /* End of chain or object doesn't support inheritance */
642 if (UNLIKELY(next
== tstart
))
643 break; /* Full-circle - deadlock! */
645 UNLOCK_THREAD(current
);
652 /* Blocker could change - retest condition */
653 if (LIKELY(bl
->thread
== next
))
669 /*---------------------------------------------------------------------------
670 * Readjust priorities when waking a thread blocked waiting for another
671 * in essence "releasing" the thread's effect on the object owner. Can be
672 * performed from any context.
673 *---------------------------------------------------------------------------
675 struct thread_entry
*
676 wakeup_priority_protocol_release(struct thread_entry
*thread
)
678 const int priority
= thread
->priority
;
679 struct blocker
*bl
= thread
->blocker
;
680 struct thread_entry
* const tstart
= thread
;
681 struct thread_entry
*bl_t
= bl
->thread
;
683 /* Blocker cannot change since object will be locked */
686 thread
->blocker
= NULL
; /* Thread not blocked */
690 struct thread_entry
*next
;
691 int bl_pr
= bl
->priority
;
693 if (priority
> bl_pr
)
694 break; /* Object priority higher */
700 /* No more threads in queue */
701 prio_subtract_entry(&bl_t
->pdist
, bl_pr
);
702 bl
->priority
= PRIORITY_IDLE
;
706 /* Check list for highest remaining priority */
707 int queue_pr
= find_highest_priority_in_list_l(next
);
709 if (queue_pr
== bl_pr
)
710 break; /* Object priority not changing */
712 /* Change queue priority */
713 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
714 bl
->priority
= queue_pr
;
717 if (bl_pr
> bl_t
->priority
)
718 break; /* thread priority is higher */
720 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
722 if (bl_pr
== bl_t
->priority
)
723 break; /* Thread priority not changing */
725 if (bl_t
->state
== STATE_RUNNING
)
727 /* No further blockers */
728 set_running_thread_priority(bl_t
, bl_pr
);
732 bl_t
->priority
= bl_pr
;
734 /* If blocking thread has a blocker, apply transitive inheritance */
738 break; /* End of chain or object doesn't support inheritance */
742 if (UNLIKELY(next
== tstart
))
743 break; /* Full-circle - deadlock! */
745 UNLOCK_THREAD(thread
);
752 /* Blocker could change - retest condition */
753 if (LIKELY(bl
->thread
== next
))
767 if (UNLIKELY(thread
!= tstart
))
769 /* Relock original if it changed */
774 return cores
[CURRENT_CORE
].running
;
777 /*---------------------------------------------------------------------------
778 * Transfer ownership to a thread waiting for an objects and transfer
779 * inherited priority boost from other waiters. This algorithm knows that
780 * blocking chains may only unblock from the very end.
782 * Only the owning thread itself may call this and so the assumption that
783 * it is the running thread is made.
784 *---------------------------------------------------------------------------
786 struct thread_entry
*
787 wakeup_priority_protocol_transfer(struct thread_entry
*thread
)
789 /* Waking thread inherits priority boost from object owner */
790 struct blocker
*bl
= thread
->blocker
;
791 struct thread_entry
*bl_t
= bl
->thread
;
792 struct thread_entry
*next
;
795 THREAD_ASSERT(cores
[CURRENT_CORE
].running
== bl_t
,
796 "UPPT->wrong thread", cores
[CURRENT_CORE
].running
);
800 bl_pr
= bl
->priority
;
802 /* Remove the object's boost from the owning thread */
803 if (prio_subtract_entry(&bl_t
->pdist
, bl_pr
) == 0 &&
804 bl_pr
<= bl_t
->priority
)
806 /* No more threads at this priority are waiting and the old level is
807 * at least the thread level */
808 int priority
= find_first_set_bit(bl_t
->pdist
.mask
);
810 if (priority
!= bl_t
->priority
)
812 /* Adjust this thread's priority */
813 set_running_thread_priority(bl_t
, priority
);
819 if (LIKELY(next
== NULL
))
821 /* Expected shortcut - no more waiters */
822 bl_pr
= PRIORITY_IDLE
;
826 if (thread
->priority
<= bl_pr
)
828 /* Need to scan threads remaining in queue */
829 bl_pr
= find_highest_priority_in_list_l(next
);
832 if (prio_add_entry(&thread
->pdist
, bl_pr
) == 1 &&
833 bl_pr
< thread
->priority
)
835 /* Thread priority must be raised */
836 thread
->priority
= bl_pr
;
840 bl
->thread
= thread
; /* This thread pwns */
841 bl
->priority
= bl_pr
; /* Save highest blocked priority */
842 thread
->blocker
= NULL
; /* Thread not blocked */
849 /*---------------------------------------------------------------------------
850 * No threads must be blocked waiting for this thread except for it to exit.
851 * The alternative is more elaborate cleanup and object registration code.
852 * Check this for risk of silent data corruption when objects with
853 * inheritable blocking are abandoned by the owner - not precise but may
855 *---------------------------------------------------------------------------
857 static void check_for_obj_waiters(const char *function
, struct thread_entry
*thread
)
859 /* Only one bit in the mask should be set with a frequency on 1 which
860 * represents the thread's own base priority */
861 uint32_t mask
= thread
->pdist
.mask
;
862 if ((mask
& (mask
- 1)) != 0 ||
863 thread
->pdist
.hist
[find_first_set_bit(mask
)] > 1)
865 unsigned char name
[32];
866 thread_get_name(name
, 32, thread
);
867 panicf("%s->%s with obj. waiters", function
, name
);
870 #endif /* HAVE_PRIORITY_SCHEDULING */
872 /*---------------------------------------------------------------------------
873 * Move a thread back to a running state on its core.
874 *---------------------------------------------------------------------------
876 static void core_schedule_wakeup(struct thread_entry
*thread
)
878 const unsigned int core
= IF_COP_CORE(thread
->core
);
882 thread
->state
= STATE_RUNNING
;
884 add_to_list_l(&cores
[core
].running
, thread
);
885 rtr_add_entry(core
, thread
->priority
);
890 if (core
!= CURRENT_CORE
)
895 /*---------------------------------------------------------------------------
896 * Check the core's timeout list when at least one thread is due to wake.
897 * Filtering for the condition is done before making the call. Resets the
898 * tick when the next check will occur.
899 *---------------------------------------------------------------------------
901 void check_tmo_threads(void)
903 const unsigned int core
= CURRENT_CORE
;
904 const long tick
= current_tick
; /* snapshot the current tick */
905 long next_tmo_check
= tick
+ 60*HZ
; /* minimum duration: once/minute */
906 struct thread_entry
*next
= cores
[core
].timeout
;
908 /* If there are no processes waiting for a timeout, just keep the check
909 tick from falling into the past. */
911 /* Break the loop once we have walked through the list of all
912 * sleeping processes or have removed them all. */
915 /* Check sleeping threads. Allow interrupts between checks. */
918 struct thread_entry
*curr
= next
;
920 next
= curr
->tmo
.next
;
922 /* Lock thread slot against explicit wakeup */
926 unsigned state
= curr
->state
;
928 if (state
< TIMEOUT_STATE_FIRST
)
930 /* Cleanup threads no longer on a timeout but still on the
932 remove_from_list_tmo(curr
);
934 else if (LIKELY(TIME_BEFORE(tick
, curr
->tmo_tick
)))
936 /* Timeout still pending - this will be the usual case */
937 if (TIME_BEFORE(curr
->tmo_tick
, next_tmo_check
))
939 /* Earliest timeout found so far - move the next check up
941 next_tmo_check
= curr
->tmo_tick
;
946 /* Sleep timeout has been reached so bring the thread back to
948 if (state
== STATE_BLOCKED_W_TMO
)
951 /* Lock the waiting thread's kernel object */
952 struct corelock
*ocl
= curr
->obj_cl
;
954 if (UNLIKELY(corelock_try_lock(ocl
) == 0))
956 /* Need to retry in the correct order though the need is
962 if (UNLIKELY(curr
->state
!= STATE_BLOCKED_W_TMO
))
964 /* Thread was woken or removed explicitely while slot
966 corelock_unlock(ocl
);
967 remove_from_list_tmo(curr
);
972 #endif /* NUM_CORES */
974 remove_from_list_l(curr
->bqp
, curr
);
976 #ifdef HAVE_WAKEUP_EXT_CB
977 if (curr
->wakeup_ext_cb
!= NULL
)
978 curr
->wakeup_ext_cb(curr
);
981 #ifdef HAVE_PRIORITY_SCHEDULING
982 if (curr
->blocker
!= NULL
)
983 wakeup_priority_protocol_release(curr
);
985 corelock_unlock(ocl
);
987 /* else state == STATE_SLEEPING */
989 remove_from_list_tmo(curr
);
993 curr
->state
= STATE_RUNNING
;
995 add_to_list_l(&cores
[core
].running
, curr
);
996 rtr_add_entry(core
, curr
->priority
);
1001 UNLOCK_THREAD(curr
);
1004 cores
[core
].next_tmo_check
= next_tmo_check
;
1007 /*---------------------------------------------------------------------------
1008 * Performs operations that must be done before blocking a thread but after
1009 * the state is saved.
1010 *---------------------------------------------------------------------------
1013 static inline void run_blocking_ops(
1014 unsigned int core
, struct thread_entry
*thread
)
1016 struct thread_blk_ops
*ops
= &cores
[core
].blk_ops
;
1017 const unsigned flags
= ops
->flags
;
1019 if (LIKELY(flags
== TBOP_CLEAR
))
1024 case TBOP_SWITCH_CORE
:
1025 core_switch_blk_op(core
, thread
);
1027 case TBOP_UNLOCK_CORELOCK
:
1028 corelock_unlock(ops
->cl_p
);
1032 ops
->flags
= TBOP_CLEAR
;
1034 #endif /* NUM_CORES > 1 */
1037 void profile_thread(void)
1039 profstart(cores
[CURRENT_CORE
].running
- threads
);
1043 /*---------------------------------------------------------------------------
1044 * Prepares a thread to block on an object's list and/or for a specified
1045 * duration - expects object and slot to be appropriately locked if needed
1046 * and interrupts to be masked.
1047 *---------------------------------------------------------------------------
1049 static inline void block_thread_on_l(struct thread_entry
*thread
,
1052 /* If inlined, unreachable branches will be pruned with no size penalty
1053 because state is passed as a constant parameter. */
1054 const unsigned int core
= IF_COP_CORE(thread
->core
);
1056 /* Remove the thread from the list of running threads. */
1058 remove_from_list_l(&cores
[core
].running
, thread
);
1059 rtr_subtract_entry(core
, thread
->priority
);
1062 /* Add a timeout to the block if not infinite */
1066 case STATE_BLOCKED_W_TMO
:
1067 /* Put the thread into a new list of inactive threads. */
1068 add_to_list_l(thread
->bqp
, thread
);
1070 if (state
== STATE_BLOCKED
)
1074 case STATE_SLEEPING
:
1075 /* If this thread times out sooner than any other thread, update
1076 next_tmo_check to its timeout */
1077 if (TIME_BEFORE(thread
->tmo_tick
, cores
[core
].next_tmo_check
))
1079 cores
[core
].next_tmo_check
= thread
->tmo_tick
;
1082 if (thread
->tmo
.prev
== NULL
)
1084 add_to_list_tmo(thread
);
1086 /* else thread was never removed from list - just keep it there */
1090 /* Remember the the next thread about to block. */
1091 cores
[core
].block_task
= thread
;
1093 /* Report new state. */
1094 thread
->state
= state
;
1097 /*---------------------------------------------------------------------------
1098 * Switch thread in round robin fashion for any given priority. Any thread
1099 * that removed itself from the running list first must specify itself in
1102 * INTERNAL: Intended for use by kernel and not for programs.
1103 *---------------------------------------------------------------------------
1105 void switch_thread(void)
1108 const unsigned int core
= CURRENT_CORE
;
1109 struct thread_entry
*block
= cores
[core
].block_task
;
1110 struct thread_entry
*thread
= cores
[core
].running
;
1112 /* Get context to save - next thread to run is unknown until all wakeups
1116 cores
[core
].block_task
= NULL
;
1119 if (UNLIKELY(thread
== block
))
1121 /* This was the last thread running and another core woke us before
1122 * reaching here. Force next thread selection to give tmo threads or
1123 * other threads woken before this block a first chance. */
1129 /* Blocking task is the old one */
1135 profile_thread_stopped(thread
->id
& THREAD_ID_SLOT_MASK
);
1138 /* Begin task switching by saving our current context so that we can
1139 * restore the state of the current thread later to the point prior
1141 store_context(&thread
->context
);
1143 /* Check if the current thread stack is overflown */
1144 if (UNLIKELY(thread
->stack
[0] != DEADBEEF
))
1145 thread_stkov(thread
);
1148 /* Run any blocking operations requested before switching/sleeping */
1149 run_blocking_ops(core
, thread
);
1152 #ifdef HAVE_PRIORITY_SCHEDULING
1153 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1154 /* Reset the value of thread's skip count */
1155 thread
->skip_count
= 0;
1160 /* If there are threads on a timeout and the earliest wakeup is due,
1161 * check the list and wake any threads that need to start running
1163 if (!TIME_BEFORE(current_tick
, cores
[core
].next_tmo_check
))
1165 check_tmo_threads();
1171 thread
= cores
[core
].running
;
1173 if (UNLIKELY(thread
== NULL
))
1175 /* Enter sleep mode to reduce power usage - woken up on interrupt
1176 * or wakeup request from another core - expected to enable
1179 core_sleep(IF_COP(core
));
1183 #ifdef HAVE_PRIORITY_SCHEDULING
1184 /* Select the new task based on priorities and the last time a
1185 * process got CPU time relative to the highest priority runnable
1187 struct priority_distribution
*pd
= &cores
[core
].rtr
;
1188 int max
= find_first_set_bit(pd
->mask
);
1192 /* Not switching on a block, tentatively select next thread */
1193 thread
= thread
->l
.next
;
1198 int priority
= thread
->priority
;
1201 /* This ridiculously simple method of aging seems to work
1202 * suspiciously well. It does tend to reward CPU hogs (under
1203 * yielding) but that's generally not desirable at all. On
1204 * the plus side, it, relatively to other threads, penalizes
1205 * excess yielding which is good if some high priority thread
1206 * is performing no useful work such as polling for a device
1207 * to be ready. Of course, aging is only employed when higher
1208 * and lower priority threads are runnable. The highest
1209 * priority runnable thread(s) are never skipped unless a
1210 * lower-priority process has aged sufficiently. Priorities
1211 * of REALTIME class are run strictly according to priority
1212 * thus are not subject to switchout due to lower-priority
1213 * processes aging; they must give up the processor by going
1214 * off the run list. */
1215 if (LIKELY(priority
<= max
) ||
1216 IF_NO_SKIP_YIELD( thread
->skip_count
== -1 || )
1217 (priority
> PRIORITY_REALTIME
&&
1218 (diff
= priority
- max
,
1219 ++thread
->skip_count
> diff
*diff
)))
1221 cores
[core
].running
= thread
;
1225 thread
= thread
->l
.next
;
1228 /* Without priority use a simple FCFS algorithm */
1231 /* Not switching on a block, select next thread */
1232 thread
= thread
->l
.next
;
1233 cores
[core
].running
= thread
;
1235 #endif /* HAVE_PRIORITY_SCHEDULING */
1243 /* And finally give control to the next thread. */
1244 load_context(&thread
->context
);
1247 profile_thread_started(thread
->id
& THREAD_ID_SLOT_MASK
);
1252 /*---------------------------------------------------------------------------
1253 * Sleeps a thread for at least a specified number of ticks with zero being
1254 * a wait until the next tick.
1256 * INTERNAL: Intended for use by kernel and not for programs.
1257 *---------------------------------------------------------------------------
1259 void sleep_thread(int ticks
)
1261 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1263 LOCK_THREAD(current
);
1265 /* Set our timeout, remove from run list and join timeout list. */
1266 current
->tmo_tick
= current_tick
+ ticks
+ 1;
1267 block_thread_on_l(current
, STATE_SLEEPING
);
1269 UNLOCK_THREAD(current
);
1272 /*---------------------------------------------------------------------------
1273 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1275 * INTERNAL: Intended for use by kernel objects and not for programs.
1276 *---------------------------------------------------------------------------
1278 void block_thread(struct thread_entry
*current
)
1280 /* Set the state to blocked and take us off of the run queue until we
1281 * are explicitly woken */
1282 LOCK_THREAD(current
);
1284 /* Set the list for explicit wakeup */
1285 block_thread_on_l(current
, STATE_BLOCKED
);
1287 #ifdef HAVE_PRIORITY_SCHEDULING
1288 if (current
->blocker
!= NULL
)
1290 /* Object supports PIP */
1291 current
= blocker_inherit_priority(current
);
1295 UNLOCK_THREAD(current
);
1298 /*---------------------------------------------------------------------------
1299 * Block a thread on a blocking queue for a specified time interval or until
1300 * explicitly woken - whichever happens first.
1302 * INTERNAL: Intended for use by kernel objects and not for programs.
1303 *---------------------------------------------------------------------------
1305 void block_thread_w_tmo(struct thread_entry
*current
, int timeout
)
1307 /* Get the entry for the current running thread. */
1308 LOCK_THREAD(current
);
1310 /* Set the state to blocked with the specified timeout */
1311 current
->tmo_tick
= current_tick
+ timeout
;
1313 /* Set the list for explicit wakeup */
1314 block_thread_on_l(current
, STATE_BLOCKED_W_TMO
);
1316 #ifdef HAVE_PRIORITY_SCHEDULING
1317 if (current
->blocker
!= NULL
)
1319 /* Object supports PIP */
1320 current
= blocker_inherit_priority(current
);
1324 UNLOCK_THREAD(current
);
1327 /*---------------------------------------------------------------------------
1328 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1329 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1331 * This code should be considered a critical section by the caller meaning
1332 * that the object's corelock should be held.
1334 * INTERNAL: Intended for use by kernel objects and not for programs.
1335 *---------------------------------------------------------------------------
1337 unsigned int wakeup_thread(struct thread_entry
**list
)
1339 struct thread_entry
*thread
= *list
;
1340 unsigned int result
= THREAD_NONE
;
1342 /* Check if there is a blocked thread at all. */
1346 LOCK_THREAD(thread
);
1348 /* Determine thread's current state. */
1349 switch (thread
->state
)
1352 case STATE_BLOCKED_W_TMO
:
1353 remove_from_list_l(list
, thread
);
1357 #ifdef HAVE_PRIORITY_SCHEDULING
1358 struct thread_entry
*current
;
1359 struct blocker
*bl
= thread
->blocker
;
1363 /* No inheritance - just boost the thread by aging */
1364 IF_NO_SKIP_YIELD( if (thread
->skip_count
!= -1) )
1365 thread
->skip_count
= thread
->priority
;
1366 current
= cores
[CURRENT_CORE
].running
;
1370 /* Call the specified unblocking PIP */
1371 current
= bl
->wakeup_protocol(thread
);
1374 if (current
!= NULL
&&
1375 find_first_set_bit(cores
[IF_COP_CORE(current
->core
)].rtr
.mask
)
1376 < current
->priority
)
1378 /* There is a thread ready to run of higher or same priority on
1379 * the same core as the current one; recommend a task switch.
1380 * Knowing if this is an interrupt call would be helpful here. */
1381 result
|= THREAD_SWITCH
;
1383 #endif /* HAVE_PRIORITY_SCHEDULING */
1385 core_schedule_wakeup(thread
);
1388 /* Nothing to do. State is not blocked. */
1389 #if THREAD_EXTRA_CHECKS
1391 THREAD_PANICF("wakeup_thread->block invalid", thread
);
1398 UNLOCK_THREAD(thread
);
1402 /*---------------------------------------------------------------------------
1403 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1404 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1405 * the queue must be locked first.
1407 * INTERNAL: Intended for use by kernel objects and not for programs.
1408 *---------------------------------------------------------------------------
1410 unsigned int thread_queue_wake(struct thread_entry
**list
)
1412 unsigned result
= THREAD_NONE
;
1416 unsigned int rc
= wakeup_thread(list
);
1418 if (rc
== THREAD_NONE
)
1419 break; /* No more threads */
1427 /*---------------------------------------------------------------------------
1428 * Assign the thread slot a new ID. Version is 1-255.
1429 *---------------------------------------------------------------------------
1431 static void new_thread_id(unsigned int slot_num
,
1432 struct thread_entry
*thread
)
1434 unsigned int version
=
1435 (thread
->id
+ (1u << THREAD_ID_VERSION_SHIFT
))
1436 & THREAD_ID_VERSION_MASK
;
1438 /* If wrapped to 0, make it 1 */
1440 version
= 1u << THREAD_ID_VERSION_SHIFT
;
1442 thread
->id
= version
| (slot_num
& THREAD_ID_SLOT_MASK
);
1445 /*---------------------------------------------------------------------------
1446 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1447 * will be locked on multicore.
1448 *---------------------------------------------------------------------------
1450 static struct thread_entry
* find_empty_thread_slot(void)
1452 /* Any slot could be on an interrupt-accessible list */
1453 IF_COP( int oldlevel
= disable_irq_save(); )
1454 struct thread_entry
*thread
= NULL
;
1457 for (n
= 0; n
< MAXTHREADS
; n
++)
1459 /* Obtain current slot state - lock it on multicore */
1460 struct thread_entry
*t
= &threads
[n
];
1463 if (t
->state
== STATE_KILLED
IF_COP( && t
->name
!= THREAD_DESTRUCT
))
1465 /* Slot is empty - leave it locked and caller will unlock */
1470 /* Finished examining slot - no longer busy - unlock on multicore */
1474 IF_COP( restore_irq(oldlevel
); ) /* Reenable interrups - this slot is
1475 not accesible to them yet */
1479 /*---------------------------------------------------------------------------
1480 * Return the thread_entry pointer for a thread_id. Return the current
1481 * thread if the ID is 0 (alias for current).
1482 *---------------------------------------------------------------------------
1484 struct thread_entry
* thread_id_entry(unsigned int thread_id
)
1486 return (thread_id
== THREAD_ID_CURRENT
) ?
1487 cores
[CURRENT_CORE
].running
:
1488 &threads
[thread_id
& THREAD_ID_SLOT_MASK
];
1491 /*---------------------------------------------------------------------------
1492 * Place the current core in idle mode - woken up on interrupt or wake
1493 * request from another core.
1494 *---------------------------------------------------------------------------
1496 void core_idle(void)
1498 IF_COP( const unsigned int core
= CURRENT_CORE
; )
1500 core_sleep(IF_COP(core
));
1503 /*---------------------------------------------------------------------------
1504 * Create a thread. If using a dual core architecture, specify which core to
1505 * start the thread on.
1507 * Return ID if context area could be allocated, else NULL.
1508 *---------------------------------------------------------------------------
1510 unsigned int create_thread(void (*function
)(void),
1511 void* stack
, size_t stack_size
,
1512 unsigned flags
, const char *name
1513 IF_PRIO(, int priority
)
1514 IF_COP(, unsigned int core
))
1517 unsigned int stack_words
;
1518 uintptr_t stackptr
, stackend
;
1519 struct thread_entry
*thread
;
1523 thread
= find_empty_thread_slot();
1529 oldlevel
= disable_irq_save();
1531 /* Munge the stack to make it easy to spot stack overflows */
1532 stackptr
= ALIGN_UP((uintptr_t)stack
, sizeof (uintptr_t));
1533 stackend
= ALIGN_DOWN((uintptr_t)stack
+ stack_size
, sizeof (uintptr_t));
1534 stack_size
= stackend
- stackptr
;
1535 stack_words
= stack_size
/ sizeof (uintptr_t);
1537 for (i
= 0; i
< stack_words
; i
++)
1539 ((uintptr_t *)stackptr
)[i
] = DEADBEEF
;
1542 /* Store interesting information */
1543 thread
->name
= name
;
1544 thread
->stack
= (uintptr_t *)stackptr
;
1545 thread
->stack_size
= stack_size
;
1546 thread
->queue
= NULL
;
1547 #ifdef HAVE_WAKEUP_EXT_CB
1548 thread
->wakeup_ext_cb
= NULL
;
1550 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1551 thread
->cpu_boost
= 0;
1553 #ifdef HAVE_PRIORITY_SCHEDULING
1554 memset(&thread
->pdist
, 0, sizeof(thread
->pdist
));
1555 thread
->blocker
= NULL
;
1556 thread
->base_priority
= priority
;
1557 thread
->priority
= priority
;
1558 thread
->skip_count
= priority
;
1559 prio_add_entry(&thread
->pdist
, priority
);
1562 #ifdef HAVE_IO_PRIORITY
1563 /* Default to high (foreground) priority */
1564 thread
->io_priority
= IO_PRIORITY_IMMEDIATE
;
1568 thread
->core
= core
;
1570 /* Writeback stack munging or anything else before starting */
1571 if (core
!= CURRENT_CORE
)
1577 /* Thread is not on any timeout list but be a bit paranoid */
1578 thread
->tmo
.prev
= NULL
;
1580 state
= (flags
& CREATE_THREAD_FROZEN
) ?
1581 STATE_FROZEN
: STATE_RUNNING
;
1583 thread
->context
.sp
= (typeof (thread
->context
.sp
))stackend
;
1585 /* Load the thread's context structure with needed startup information */
1586 THREAD_STARTUP_INIT(core
, thread
, function
);
1588 thread
->state
= state
;
1589 i
= thread
->id
; /* Snapshot while locked */
1591 if (state
== STATE_RUNNING
)
1592 core_schedule_wakeup(thread
);
1594 UNLOCK_THREAD(thread
);
1595 restore_irq(oldlevel
);
1600 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1601 /*---------------------------------------------------------------------------
1602 * Change the boost state of a thread boosting or unboosting the CPU
1604 *---------------------------------------------------------------------------
1606 static inline void boost_thread(struct thread_entry
*thread
, bool boost
)
1608 if ((thread
->cpu_boost
!= 0) != boost
)
1610 thread
->cpu_boost
= boost
;
1615 void trigger_cpu_boost(void)
1617 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1618 boost_thread(current
, true);
1621 void cancel_cpu_boost(void)
1623 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1624 boost_thread(current
, false);
1626 #endif /* HAVE_SCHEDULER_BOOSTCTRL */
1628 /*---------------------------------------------------------------------------
1629 * Block the current thread until another thread terminates. A thread may
1630 * wait on itself to terminate which prevents it from running again and it
1631 * will need to be killed externally.
1632 * Parameter is the ID as returned from create_thread().
1633 *---------------------------------------------------------------------------
1635 void thread_wait(unsigned int thread_id
)
1637 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
1638 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1640 /* Lock thread-as-waitable-object lock */
1641 corelock_lock(&thread
->waiter_cl
);
1643 /* Be sure it hasn't been killed yet */
1644 if (thread_id
== THREAD_ID_CURRENT
||
1645 (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
))
1647 IF_COP( current
->obj_cl
= &thread
->waiter_cl
; )
1648 current
->bqp
= &thread
->queue
;
1651 block_thread(current
);
1653 corelock_unlock(&thread
->waiter_cl
);
1659 corelock_unlock(&thread
->waiter_cl
);
1662 /*---------------------------------------------------------------------------
1663 * Exit the current thread. The Right Way to Do Things (TM).
1664 *---------------------------------------------------------------------------
1666 void thread_exit(void)
1668 const unsigned int core
= CURRENT_CORE
;
1669 struct thread_entry
*current
= cores
[core
].running
;
1671 /* Cancel CPU boost if any */
1676 corelock_lock(¤t
->waiter_cl
);
1677 LOCK_THREAD(current
);
1679 #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1680 if (current
->name
== THREAD_DESTRUCT
)
1682 /* Thread being killed - become a waiter */
1683 unsigned int id
= current
->id
;
1684 UNLOCK_THREAD(current
);
1685 corelock_unlock(¤t
->waiter_cl
);
1687 THREAD_PANICF("thread_exit->WK:*R", current
);
1691 #ifdef HAVE_PRIORITY_SCHEDULING
1692 check_for_obj_waiters("thread_exit", current
);
1695 if (current
->tmo
.prev
!= NULL
)
1697 /* Cancel pending timeout list removal */
1698 remove_from_list_tmo(current
);
1701 /* Switch tasks and never return */
1702 block_thread_on_l(current
, STATE_KILLED
);
1705 /* Switch to the idle stack if not on the main core (where "main"
1706 * runs) - we can hope gcc doesn't need the old stack beyond this
1710 switch_to_idle_stack(core
);
1715 /* At this point, this thread isn't using resources allocated for
1716 * execution except the slot itself. */
1719 /* Update ID for this slot */
1720 new_thread_id(current
->id
, current
);
1721 current
->name
= NULL
;
1723 /* Signal this thread */
1724 thread_queue_wake(¤t
->queue
);
1725 corelock_unlock(¤t
->waiter_cl
);
1726 /* Slot must be unusable until thread is really gone */
1727 UNLOCK_THREAD_AT_TASK_SWITCH(current
);
1729 /* This should never and must never be reached - if it is, the
1730 * state is corrupted */
1731 THREAD_PANICF("thread_exit->K:*R", current
);
1734 #ifdef ALLOW_REMOVE_THREAD
1735 /*---------------------------------------------------------------------------
1736 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1739 * Parameter is the ID as returned from create_thread().
1741 * Use with care on threads that are not under careful control as this may
1742 * leave various objects in an undefined state.
1743 *---------------------------------------------------------------------------
1745 void remove_thread(unsigned int thread_id
)
1748 /* core is not constant here because of core switching */
1749 unsigned int core
= CURRENT_CORE
;
1750 unsigned int old_core
= NUM_CORES
;
1751 struct corelock
*ocl
= NULL
;
1753 const unsigned int core
= CURRENT_CORE
;
1755 struct thread_entry
*current
= cores
[core
].running
;
1756 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1761 if (thread
== current
)
1762 thread_exit(); /* Current thread - do normal exit */
1764 oldlevel
= disable_irq_save();
1766 corelock_lock(&thread
->waiter_cl
);
1767 LOCK_THREAD(thread
);
1769 state
= thread
->state
;
1771 if (thread
->id
!= thread_id
|| state
== STATE_KILLED
)
1775 if (thread
->name
== THREAD_DESTRUCT
)
1777 /* Thread being killed - become a waiter */
1778 UNLOCK_THREAD(thread
);
1779 corelock_unlock(&thread
->waiter_cl
);
1780 restore_irq(oldlevel
);
1781 thread_wait(thread_id
);
1785 thread
->name
= THREAD_DESTRUCT
; /* Slot can't be used for now */
1787 #ifdef HAVE_PRIORITY_SCHEDULING
1788 check_for_obj_waiters("remove_thread", thread
);
1791 if (thread
->core
!= core
)
1793 /* Switch cores and safely extract the thread there */
1794 /* Slot HAS to be unlocked or a deadlock could occur which means other
1795 * threads have to be guided into becoming thread waiters if they
1796 * attempt to remove it. */
1797 unsigned int new_core
= thread
->core
;
1799 corelock_unlock(&thread
->waiter_cl
);
1801 UNLOCK_THREAD(thread
);
1802 restore_irq(oldlevel
);
1804 old_core
= switch_core(new_core
);
1806 oldlevel
= disable_irq_save();
1808 corelock_lock(&thread
->waiter_cl
);
1809 LOCK_THREAD(thread
);
1811 state
= thread
->state
;
1813 /* Perform the extraction and switch ourselves back to the original
1816 #endif /* NUM_CORES > 1 */
1818 if (thread
->tmo
.prev
!= NULL
)
1820 /* Clean thread off the timeout list if a timeout check hasn't
1822 remove_from_list_tmo(thread
);
1825 #ifdef HAVE_SCHEDULER_BOOSTCTRL
1826 /* Cancel CPU boost if any */
1827 boost_thread(thread
, false);
1830 IF_COP( retry_state
: )
1836 /* Remove thread from ready to run tasks */
1837 remove_from_list_l(&cores
[core
].running
, thread
);
1838 rtr_subtract_entry(core
, thread
->priority
);
1842 case STATE_BLOCKED_W_TMO
:
1843 /* Remove thread from the queue it's blocked on - including its
1844 * own if waiting there */
1846 if (&thread
->waiter_cl
!= thread
->obj_cl
)
1848 ocl
= thread
->obj_cl
;
1850 if (UNLIKELY(corelock_try_lock(ocl
) == 0))
1852 UNLOCK_THREAD(thread
);
1854 LOCK_THREAD(thread
);
1856 if (UNLIKELY(thread
->state
!= state
))
1858 /* Something woke the thread */
1859 state
= thread
->state
;
1860 corelock_unlock(ocl
);
1866 remove_from_list_l(thread
->bqp
, thread
);
1868 #ifdef HAVE_WAKEUP_EXT_CB
1869 if (thread
->wakeup_ext_cb
!= NULL
)
1870 thread
->wakeup_ext_cb(thread
);
1873 #ifdef HAVE_PRIORITY_SCHEDULING
1874 if (thread
->blocker
!= NULL
)
1876 /* Remove thread's priority influence from its chain */
1877 wakeup_priority_protocol_release(thread
);
1883 corelock_unlock(ocl
);
1886 /* Otherwise thread is frozen and hasn't run yet */
1889 new_thread_id(thread_id
, thread
);
1890 thread
->state
= STATE_KILLED
;
1892 /* If thread was waiting on itself, it will have been removed above.
1893 * The wrong order would result in waking the thread first and deadlocking
1894 * since the slot is already locked. */
1895 thread_queue_wake(&thread
->queue
);
1897 thread
->name
= NULL
;
1899 thread_killed
: /* Thread was already killed */
1900 /* Removal complete - safe to unlock and reenable interrupts */
1901 corelock_unlock(&thread
->waiter_cl
);
1902 UNLOCK_THREAD(thread
);
1903 restore_irq(oldlevel
);
1906 if (old_core
< NUM_CORES
)
1908 /* Did a removal on another processor's thread - switch back to
1910 switch_core(old_core
);
1914 #endif /* ALLOW_REMOVE_THREAD */
1916 #ifdef HAVE_PRIORITY_SCHEDULING
1917 /*---------------------------------------------------------------------------
1918 * Sets the thread's relative base priority for the core it runs on. Any
1919 * needed inheritance changes also may happen.
1920 *---------------------------------------------------------------------------
1922 int thread_set_priority(unsigned int thread_id
, int priority
)
1924 int old_base_priority
= -1;
1925 struct thread_entry
*thread
= thread_id_entry(thread_id
);
1927 /* A little safety measure */
1928 if (priority
< HIGHEST_PRIORITY
|| priority
> LOWEST_PRIORITY
)
1931 /* Thread could be on any list and therefore on an interrupt accessible
1932 one - disable interrupts */
1933 int oldlevel
= disable_irq_save();
1935 LOCK_THREAD(thread
);
1937 /* Make sure it's not killed */
1938 if (thread_id
== THREAD_ID_CURRENT
||
1939 (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
))
1941 int old_priority
= thread
->priority
;
1943 old_base_priority
= thread
->base_priority
;
1944 thread
->base_priority
= priority
;
1946 prio_move_entry(&thread
->pdist
, old_base_priority
, priority
);
1947 priority
= find_first_set_bit(thread
->pdist
.mask
);
1949 if (old_priority
== priority
)
1951 /* No priority change - do nothing */
1953 else if (thread
->state
== STATE_RUNNING
)
1955 /* This thread is running - change location on the run
1956 * queue. No transitive inheritance needed. */
1957 set_running_thread_priority(thread
, priority
);
1961 thread
->priority
= priority
;
1963 if (thread
->blocker
!= NULL
)
1965 /* Bubble new priority down the chain */
1966 struct blocker
*bl
= thread
->blocker
; /* Blocker struct */
1967 struct thread_entry
*bl_t
= bl
->thread
; /* Blocking thread */
1968 struct thread_entry
* const tstart
= thread
; /* Initial thread */
1969 const int highest
= MIN(priority
, old_priority
); /* Higher of new or old */
1973 struct thread_entry
*next
; /* Next thread to check */
1974 int bl_pr
; /* Highest blocked thread */
1975 int queue_pr
; /* New highest blocked thread */
1977 /* Owner can change but thread cannot be dislodged - thread
1978 * may not be the first in the queue which allows other
1979 * threads ahead in the list to be given ownership during the
1980 * operation. If thread is next then the waker will have to
1981 * wait for us and the owner of the object will remain fixed.
1982 * If we successfully grab the owner -- which at some point
1983 * is guaranteed -- then the queue remains fixed until we
1989 /* Double-check the owner - retry if it changed */
1990 if (LIKELY(bl
->thread
== bl_t
))
1993 UNLOCK_THREAD(bl_t
);
1997 bl_pr
= bl
->priority
;
1999 if (highest
> bl_pr
)
2000 break; /* Object priority won't change */
2002 /* This will include the thread being set */
2003 queue_pr
= find_highest_priority_in_list_l(*thread
->bqp
);
2005 if (queue_pr
== bl_pr
)
2006 break; /* Object priority not changing */
2008 /* Update thread boost for this object */
2009 bl
->priority
= queue_pr
;
2010 prio_move_entry(&bl_t
->pdist
, bl_pr
, queue_pr
);
2011 bl_pr
= find_first_set_bit(bl_t
->pdist
.mask
);
2013 if (bl_t
->priority
== bl_pr
)
2014 break; /* Blocking thread priority not changing */
2016 if (bl_t
->state
== STATE_RUNNING
)
2018 /* Thread not blocked - we're done */
2019 set_running_thread_priority(bl_t
, bl_pr
);
2023 bl_t
->priority
= bl_pr
;
2024 bl
= bl_t
->blocker
; /* Blocking thread has a blocker? */
2027 break; /* End of chain */
2031 if (UNLIKELY(next
== tstart
))
2032 break; /* Full-circle */
2034 UNLOCK_THREAD(thread
);
2040 UNLOCK_THREAD(bl_t
);
2045 UNLOCK_THREAD(thread
);
2047 restore_irq(oldlevel
);
2049 return old_base_priority
;
2052 /*---------------------------------------------------------------------------
2053 * Returns the current base priority for a thread.
2054 *---------------------------------------------------------------------------
2056 int thread_get_priority(unsigned int thread_id
)
2058 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2059 int base_priority
= thread
->base_priority
;
2061 /* Simply check without locking slot. It may or may not be valid by the
2062 * time the function returns anyway. If all tests pass, it is the
2063 * correct value for when it was valid. */
2064 if (thread_id
!= THREAD_ID_CURRENT
&&
2065 (thread
->id
!= thread_id
|| thread
->state
== STATE_KILLED
))
2068 return base_priority
;
2070 #endif /* HAVE_PRIORITY_SCHEDULING */
2072 #ifdef HAVE_IO_PRIORITY
2073 int thread_get_io_priority(unsigned int thread_id
)
2075 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2076 return thread
->io_priority
;
2079 void thread_set_io_priority(unsigned int thread_id
,int io_priority
)
2081 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2082 thread
->io_priority
= io_priority
;
2086 /*---------------------------------------------------------------------------
2087 * Starts a frozen thread - similar semantics to wakeup_thread except that
2088 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2089 * virtue of the slot having a state of STATE_FROZEN.
2090 *---------------------------------------------------------------------------
2092 void thread_thaw(unsigned int thread_id
)
2094 struct thread_entry
*thread
= thread_id_entry(thread_id
);
2095 int oldlevel
= disable_irq_save();
2097 LOCK_THREAD(thread
);
2099 /* If thread is the current one, it cannot be frozen, therefore
2100 * there is no need to check that. */
2101 if (thread
->id
== thread_id
&& thread
->state
== STATE_FROZEN
)
2102 core_schedule_wakeup(thread
);
2104 UNLOCK_THREAD(thread
);
2105 restore_irq(oldlevel
);
2108 /*---------------------------------------------------------------------------
2109 * Return the ID of the currently executing thread.
2110 *---------------------------------------------------------------------------
2112 unsigned int thread_get_current(void)
2114 return cores
[CURRENT_CORE
].running
->id
;
2118 /*---------------------------------------------------------------------------
2119 * Switch the processor that the currently executing thread runs on.
2120 *---------------------------------------------------------------------------
2122 unsigned int switch_core(unsigned int new_core
)
2124 const unsigned int core
= CURRENT_CORE
;
2125 struct thread_entry
*current
= cores
[core
].running
;
2127 if (core
== new_core
)
2129 /* No change - just return same core */
2133 int oldlevel
= disable_irq_save();
2134 LOCK_THREAD(current
);
2136 if (current
->name
== THREAD_DESTRUCT
)
2138 /* Thread being killed - deactivate and let process complete */
2139 unsigned int id
= current
->id
;
2140 UNLOCK_THREAD(current
);
2141 restore_irq(oldlevel
);
2143 /* Should never be reached */
2144 THREAD_PANICF("switch_core->D:*R", current
);
2147 /* Get us off the running list for the current core */
2149 remove_from_list_l(&cores
[core
].running
, current
);
2150 rtr_subtract_entry(core
, current
->priority
);
2153 /* Stash return value (old core) in a safe place */
2154 current
->retval
= core
;
2156 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2157 * the other core will likely attempt a removal from the wrong list! */
2158 if (current
->tmo
.prev
!= NULL
)
2160 remove_from_list_tmo(current
);
2163 /* Change the core number for this thread slot */
2164 current
->core
= new_core
;
2166 /* Do not use core_schedule_wakeup here since this will result in
2167 * the thread starting to run on the other core before being finished on
2168 * this one. Delay the list unlock to keep the other core stuck
2169 * until this thread is ready. */
2172 rtr_add_entry(new_core
, current
->priority
);
2173 add_to_list_l(&cores
[new_core
].running
, current
);
2175 /* Make a callback into device-specific code, unlock the wakeup list so
2176 * that execution may resume on the new core, unlock our slot and finally
2177 * restore the interrupt level */
2178 cores
[core
].blk_ops
.flags
= TBOP_SWITCH_CORE
;
2179 cores
[core
].blk_ops
.cl_p
= &cores
[new_core
].rtr_cl
;
2180 cores
[core
].block_task
= current
;
2182 UNLOCK_THREAD(current
);
2184 /* Alert other core to activity */
2185 core_wake(new_core
);
2187 /* Do the stack switching, cache_maintenence and switch_thread call -
2188 requires native code */
2189 switch_thread_core(core
, current
);
2191 /* Finally return the old core to caller */
2192 return current
->retval
;
2194 #endif /* NUM_CORES > 1 */
2196 /*---------------------------------------------------------------------------
2197 * Initialize threading API. This assumes interrupts are not yet enabled. On
2198 * multicore setups, no core is allowed to proceed until create_thread calls
2199 * are safe to perform.
2200 *---------------------------------------------------------------------------
2202 void init_threads(void)
2204 const unsigned int core
= CURRENT_CORE
;
2205 struct thread_entry
*thread
;
2209 /* Initialize core locks and IDs in all slots */
2211 for (n
= 0; n
< MAXTHREADS
; n
++)
2213 thread
= &threads
[n
];
2214 corelock_init(&thread
->waiter_cl
);
2215 corelock_init(&thread
->slot_cl
);
2216 thread
->id
= THREAD_ID_INIT(n
);
2220 /* CPU will initialize first and then sleep */
2221 thread
= find_empty_thread_slot();
2225 /* WTF? There really must be a slot available at this stage.
2226 * This can fail if, for example, .bss isn't zero'ed out by the loader
2227 * or threads is in the wrong section. */
2228 THREAD_PANICF("init_threads->no slot", NULL
);
2231 /* Initialize initially non-zero members of core */
2232 cores
[core
].next_tmo_check
= current_tick
; /* Something not in the past */
2234 /* Initialize initially non-zero members of slot */
2235 UNLOCK_THREAD(thread
); /* No sync worries yet */
2236 thread
->name
= main_thread_name
;
2237 thread
->state
= STATE_RUNNING
;
2238 IF_COP( thread
->core
= core
; )
2239 #ifdef HAVE_PRIORITY_SCHEDULING
2240 corelock_init(&cores
[core
].rtr_cl
);
2241 thread
->base_priority
= PRIORITY_USER_INTERFACE
;
2242 prio_add_entry(&thread
->pdist
, PRIORITY_USER_INTERFACE
);
2243 thread
->priority
= PRIORITY_USER_INTERFACE
;
2244 rtr_add_entry(core
, PRIORITY_USER_INTERFACE
);
2247 add_to_list_l(&cores
[core
].running
, thread
);
2251 thread
->stack
= stackbegin
;
2252 thread
->stack_size
= (uintptr_t)stackend
- (uintptr_t)stackbegin
;
2253 #if NUM_CORES > 1 /* This code path will not be run on single core targets */
2254 /* Wait for other processors to finish their inits since create_thread
2255 * isn't safe to call until the kernel inits are done. The first
2256 * threads created in the system must of course be created by CPU.
2257 * Another possible approach is to initialize all cores and slots
2258 * for each core by CPU, let the remainder proceed in parallel and
2259 * signal CPU when all are finished. */
2260 core_thread_init(CPU
);
2264 /* Initial stack is the idle stack */
2265 thread
->stack
= idle_stacks
[core
];
2266 thread
->stack_size
= IDLE_STACK_SIZE
;
2267 /* After last processor completes, it should signal all others to
2268 * proceed or may signal the next and call thread_exit(). The last one
2269 * to finish will signal CPU. */
2270 core_thread_init(core
);
2271 /* Other cores do not have a main thread - go idle inside switch_thread
2272 * until a thread can run on the core. */
2274 #endif /* NUM_CORES */
2278 /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2280 static inline int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2282 static int stack_usage(uintptr_t *stackptr
, size_t stack_size
)
2285 unsigned int stack_words
= stack_size
/ sizeof (uintptr_t);
2289 for (i
= 0; i
< stack_words
; i
++)
2291 if (stackptr
[i
] != DEADBEEF
)
2293 usage
= ((stack_words
- i
) * 100) / stack_words
;
2301 /*---------------------------------------------------------------------------
2302 * Returns the maximum percentage of stack a thread ever used while running.
2303 * NOTE: Some large buffer allocations that don't use enough the buffer to
2304 * overwrite stackptr[0] will not be seen.
2305 *---------------------------------------------------------------------------
2307 int thread_stack_usage(const struct thread_entry
*thread
)
2309 return stack_usage(thread
->stack
, thread
->stack_size
);
2313 /*---------------------------------------------------------------------------
2314 * Returns the maximum percentage of the core's idle stack ever used during
2316 *---------------------------------------------------------------------------
2318 int idle_stack_usage(unsigned int core
)
2320 return stack_usage(idle_stacks
[core
], IDLE_STACK_SIZE
);
2324 /*---------------------------------------------------------------------------
2325 * Fills in the buffer with the specified thread's name. If the name is NULL,
2326 * empty, or the thread is in destruct state a formatted ID is written
2328 *---------------------------------------------------------------------------
2330 void thread_get_name(char *buffer
, int size
,
2331 struct thread_entry
*thread
)
2340 /* Display thread name if one or ID if none */
2341 const char *name
= thread
->name
;
2342 const char *fmt
= "%s";
2343 if (name
== NULL
IF_COP(|| name
== THREAD_DESTRUCT
) || *name
== '\0')
2345 name
= (const char *)thread
;
2348 snprintf(buffer
, size
, fmt
, name
);