1 /* SPDX-License-Identifier: GPL-2.0-only */
8 #include <console/console.h>
12 static void idle_thread_init(void);
14 /* There needs to be at least one thread to run the ramstate state machine. */
15 #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
17 /* Storage space for the thread structs .*/
18 static struct thread all_threads
[TOTAL_NUM_THREADS
];
20 /* All runnable (but not running) and free threads are kept on their
21 * respective lists. */
22 static struct thread
*runnable_threads
;
23 static struct thread
*free_threads
;
25 static inline struct cpu_info
*thread_cpu_info(const struct thread
*t
)
27 return (void *)(t
->stack_orig
);
30 static inline int thread_can_yield(const struct thread
*t
)
32 return (t
!= NULL
&& t
->can_yield
);
35 /* Assumes current CPU info can switch. */
36 static inline struct thread
*cpu_info_to_thread(const struct cpu_info
*ci
)
41 static inline struct thread
*current_thread(void)
43 return cpu_info_to_thread(cpu_info());
46 static inline int thread_list_empty(struct thread
**list
)
51 static inline struct thread
*pop_thread(struct thread
**list
)
61 static inline void push_thread(struct thread
**list
, struct thread
*t
)
67 static inline void push_runnable(struct thread
*t
)
69 push_thread(&runnable_threads
, t
);
72 static inline struct thread
*pop_runnable(void)
74 return pop_thread(&runnable_threads
);
77 static inline struct thread
*get_free_thread(void)
81 struct cpu_info
*new_ci
;
83 if (thread_list_empty(&free_threads
))
86 t
= pop_thread(&free_threads
);
90 /* Initialize the cpu_info structure on the new stack. */
91 new_ci
= thread_cpu_info(t
);
95 /* Reset the current stack value to the original. */
96 t
->stack_current
= t
->stack_orig
;
101 static inline void free_thread(struct thread
*t
)
103 push_thread(&free_threads
, t
);
106 /* The idle thread is ran whenever there isn't anything else that is runnable.
107 * It's sole responsibility is to ensure progress is made by running the timer
109 static void idle_thread(void *unused
)
111 /* This thread never voluntarily yields. */
112 thread_prevent_coop();
117 static void schedule(struct thread
*t
)
119 struct thread
*current
= current_thread();
121 /* If t is NULL need to find new runnable thread. */
123 if (thread_list_empty(&runnable_threads
))
124 die("Runnable thread list is empty!\n");
127 /* current is still runnable. */
128 push_runnable(current
);
130 switch_to_thread(t
->stack_current
, ¤t
->stack_current
);
133 static void terminate_thread(struct thread
*t
)
139 static void asmlinkage
call_wrapper(void *unused
)
141 struct thread
*current
= current_thread();
143 current
->entry(current
->entry_arg
);
144 terminate_thread(current
);
147 /* Block the current state transitions until thread is complete. */
148 static void asmlinkage
call_wrapper_block_current(void *unused
)
150 struct thread
*current
= current_thread();
152 boot_state_current_block();
153 current
->entry(current
->entry_arg
);
154 boot_state_current_unblock();
155 terminate_thread(current
);
158 struct block_boot_state
{
160 boot_state_sequence_t seq
;
163 /* Block the provided state until thread is complete. */
164 static void asmlinkage
call_wrapper_block_state(void *arg
)
166 struct block_boot_state
*bbs
= arg
;
167 struct thread
*current
= current_thread();
169 boot_state_block(bbs
->state
, bbs
->seq
);
170 current
->entry(current
->entry_arg
);
171 boot_state_unblock(bbs
->state
, bbs
->seq
);
172 terminate_thread(current
);
175 /* Prepare a thread so that it starts by executing thread_entry(thread_arg).
176 * Within thread_entry() it will call func(arg). */
177 static void prepare_thread(struct thread
*t
, void *func
, void *arg
,
178 asmlinkage
void (*thread_entry
)(void *),
181 /* Stash the function and argument to run. */
185 /* All new threads can yield by default. */
188 arch_prepare_thread(t
, thread_entry
, thread_arg
);
191 static void thread_resume_from_timeout(struct timeout_callback
*tocb
)
199 static void idle_thread_init(void)
203 t
= get_free_thread();
206 die("No threads available for idle thread!\n");
208 /* Queue idle thread to run once all other threads have yielded. */
209 prepare_thread(t
, idle_thread
, NULL
, call_wrapper
, NULL
);
211 /* Mark the currently executing thread to cooperate. */
215 /* Don't inline this function so the timeout_callback won't have its storage
216 * space on the stack cleaned up before the call to schedule(). */
217 static int __attribute__((noinline
))
218 thread_yield_timed_callback(struct timeout_callback
*tocb
,
219 unsigned int microsecs
)
221 tocb
->priv
= current_thread();
222 tocb
->callback
= thread_resume_from_timeout
;
224 if (timer_sched_callback(tocb
, microsecs
))
227 /* The timer callback will wake up the current thread. */
232 static void *thread_alloc_space(struct thread
*t
, size_t bytes
)
234 /* Allocate the amount of space on the stack keeping the stack
235 * aligned to the pointer size. */
236 t
->stack_current
-= ALIGN_UP(bytes
, sizeof(uintptr_t));
238 return (void *)t
->stack_current
;
241 void threads_initialize(void)
249 thread_stacks
= arch_get_thread_stackbase();
251 /* Initialize the BSP thread first. The cpu_info structure is assumed
252 * to be just under the top of the stack. */
256 t
->stack_orig
= (uintptr_t)ci
;
259 stack_top
= &thread_stacks
[CONFIG_STACK_SIZE
] - sizeof(struct cpu_info
);
260 for (i
= 1; i
< TOTAL_NUM_THREADS
; i
++) {
262 t
->stack_orig
= (uintptr_t)stack_top
;
264 stack_top
+= CONFIG_STACK_SIZE
;
271 int thread_run(void (*func
)(void *), void *arg
)
273 struct thread
*current
;
276 current
= current_thread();
278 if (!thread_can_yield(current
)) {
280 "thread_run() called from non-yielding context!\n");
284 t
= get_free_thread();
287 printk(BIOS_ERR
, "thread_run() No more threads!\n");
291 prepare_thread(t
, func
, arg
, call_wrapper_block_current
, NULL
);
297 int thread_run_until(void (*func
)(void *), void *arg
,
298 boot_state_t state
, boot_state_sequence_t seq
)
300 struct thread
*current
;
302 struct block_boot_state
*bbs
;
304 current
= current_thread();
306 if (!thread_can_yield(current
)) {
308 "thread_run() called from non-yielding context!\n");
312 t
= get_free_thread();
315 printk(BIOS_ERR
, "thread_run() No more threads!\n");
319 bbs
= thread_alloc_space(t
, sizeof(*bbs
));
322 prepare_thread(t
, func
, arg
, call_wrapper_block_state
, bbs
);
328 int thread_yield_microseconds(unsigned int microsecs
)
330 struct thread
*current
;
331 struct timeout_callback tocb
;
333 current
= current_thread();
335 if (!thread_can_yield(current
))
338 if (thread_yield_timed_callback(&tocb
, microsecs
))
344 void thread_cooperate(void)
346 struct thread
*current
;
348 current
= current_thread();
351 current
->can_yield
= 1;
354 void thread_prevent_coop(void)
356 struct thread
*current
;
358 current
= current_thread();
361 current
->can_yield
= 0;