vc/intel/fsp/fsp2_0/adl: Update FSP header file version to 1432
[coreboot.git] / src / lib / thread.c
blobe2280c63febbee575c1e4ca963604c5318a90da1
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <stddef.h>
4 #include <stdint.h>
5 #include <stdlib.h>
6 #include <arch/cpu.h>
7 #include <bootstate.h>
8 #include <console/console.h>
9 #include <thread.h>
10 #include <timer.h>
12 static void idle_thread_init(void);
14 /* There needs to be at least one thread to run the ramstate state machine. */
15 #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
17 /* Storage space for the thread structs .*/
18 static struct thread all_threads[TOTAL_NUM_THREADS];
20 /* All runnable (but not running) and free threads are kept on their
21 * respective lists. */
22 static struct thread *runnable_threads;
23 static struct thread *free_threads;
25 static inline struct cpu_info *thread_cpu_info(const struct thread *t)
27 return (void *)(t->stack_orig);
30 static inline int thread_can_yield(const struct thread *t)
32 return (t != NULL && t->can_yield);
35 /* Assumes current CPU info can switch. */
36 static inline struct thread *cpu_info_to_thread(const struct cpu_info *ci)
38 return ci->thread;
41 static inline struct thread *current_thread(void)
43 return cpu_info_to_thread(cpu_info());
46 static inline int thread_list_empty(struct thread **list)
48 return *list == NULL;
51 static inline struct thread *pop_thread(struct thread **list)
53 struct thread *t;
55 t = *list;
56 *list = t->next;
57 t->next = NULL;
58 return t;
61 static inline void push_thread(struct thread **list, struct thread *t)
63 t->next = *list;
64 *list = t;
67 static inline void push_runnable(struct thread *t)
69 push_thread(&runnable_threads, t);
72 static inline struct thread *pop_runnable(void)
74 return pop_thread(&runnable_threads);
77 static inline struct thread *get_free_thread(void)
79 struct thread *t;
80 struct cpu_info *ci;
81 struct cpu_info *new_ci;
83 if (thread_list_empty(&free_threads))
84 return NULL;
86 t = pop_thread(&free_threads);
88 ci = cpu_info();
90 /* Initialize the cpu_info structure on the new stack. */
91 new_ci = thread_cpu_info(t);
92 *new_ci = *ci;
93 new_ci->thread = t;
95 /* Reset the current stack value to the original. */
96 t->stack_current = t->stack_orig;
98 return t;
101 static inline void free_thread(struct thread *t)
103 push_thread(&free_threads, t);
106 /* The idle thread is ran whenever there isn't anything else that is runnable.
107 * It's sole responsibility is to ensure progress is made by running the timer
108 * callbacks. */
109 static void idle_thread(void *unused)
111 /* This thread never voluntarily yields. */
112 thread_prevent_coop();
113 while (1)
114 timers_run();
117 static void schedule(struct thread *t)
119 struct thread *current = current_thread();
121 /* If t is NULL need to find new runnable thread. */
122 if (t == NULL) {
123 if (thread_list_empty(&runnable_threads))
124 die("Runnable thread list is empty!\n");
125 t = pop_runnable();
126 } else {
127 /* current is still runnable. */
128 push_runnable(current);
130 switch_to_thread(t->stack_current, &current->stack_current);
133 static void terminate_thread(struct thread *t)
135 free_thread(t);
136 schedule(NULL);
139 static void asmlinkage call_wrapper(void *unused)
141 struct thread *current = current_thread();
143 current->entry(current->entry_arg);
144 terminate_thread(current);
147 /* Block the current state transitions until thread is complete. */
148 static void asmlinkage call_wrapper_block_current(void *unused)
150 struct thread *current = current_thread();
152 boot_state_current_block();
153 current->entry(current->entry_arg);
154 boot_state_current_unblock();
155 terminate_thread(current);
158 struct block_boot_state {
159 boot_state_t state;
160 boot_state_sequence_t seq;
163 /* Block the provided state until thread is complete. */
164 static void asmlinkage call_wrapper_block_state(void *arg)
166 struct block_boot_state *bbs = arg;
167 struct thread *current = current_thread();
169 boot_state_block(bbs->state, bbs->seq);
170 current->entry(current->entry_arg);
171 boot_state_unblock(bbs->state, bbs->seq);
172 terminate_thread(current);
175 /* Prepare a thread so that it starts by executing thread_entry(thread_arg).
176 * Within thread_entry() it will call func(arg). */
177 static void prepare_thread(struct thread *t, void *func, void *arg,
178 asmlinkage void (*thread_entry)(void *),
179 void *thread_arg)
181 /* Stash the function and argument to run. */
182 t->entry = func;
183 t->entry_arg = arg;
185 /* All new threads can yield by default. */
186 t->can_yield = 1;
188 arch_prepare_thread(t, thread_entry, thread_arg);
191 static void thread_resume_from_timeout(struct timeout_callback *tocb)
193 struct thread *to;
195 to = tocb->priv;
196 schedule(to);
199 static void idle_thread_init(void)
201 struct thread *t;
203 t = get_free_thread();
205 if (t == NULL)
206 die("No threads available for idle thread!\n");
208 /* Queue idle thread to run once all other threads have yielded. */
209 prepare_thread(t, idle_thread, NULL, call_wrapper, NULL);
210 push_runnable(t);
211 /* Mark the currently executing thread to cooperate. */
212 thread_cooperate();
215 /* Don't inline this function so the timeout_callback won't have its storage
216 * space on the stack cleaned up before the call to schedule(). */
217 static int __attribute__((noinline))
218 thread_yield_timed_callback(struct timeout_callback *tocb,
219 unsigned int microsecs)
221 tocb->priv = current_thread();
222 tocb->callback = thread_resume_from_timeout;
224 if (timer_sched_callback(tocb, microsecs))
225 return -1;
227 /* The timer callback will wake up the current thread. */
228 schedule(NULL);
229 return 0;
232 static void *thread_alloc_space(struct thread *t, size_t bytes)
234 /* Allocate the amount of space on the stack keeping the stack
235 * aligned to the pointer size. */
236 t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t));
238 return (void *)t->stack_current;
241 void threads_initialize(void)
243 int i;
244 struct thread *t;
245 u8 *stack_top;
246 struct cpu_info *ci;
247 u8 *thread_stacks;
249 thread_stacks = arch_get_thread_stackbase();
251 /* Initialize the BSP thread first. The cpu_info structure is assumed
252 * to be just under the top of the stack. */
253 t = &all_threads[0];
254 ci = cpu_info();
255 ci->thread = t;
256 t->stack_orig = (uintptr_t)ci;
257 t->id = 0;
259 stack_top = &thread_stacks[CONFIG_STACK_SIZE] - sizeof(struct cpu_info);
260 for (i = 1; i < TOTAL_NUM_THREADS; i++) {
261 t = &all_threads[i];
262 t->stack_orig = (uintptr_t)stack_top;
263 t->id = i;
264 stack_top += CONFIG_STACK_SIZE;
265 free_thread(t);
268 idle_thread_init();
271 int thread_run(void (*func)(void *), void *arg)
273 struct thread *current;
274 struct thread *t;
276 current = current_thread();
278 if (!thread_can_yield(current)) {
279 printk(BIOS_ERR,
280 "thread_run() called from non-yielding context!\n");
281 return -1;
284 t = get_free_thread();
286 if (t == NULL) {
287 printk(BIOS_ERR, "thread_run() No more threads!\n");
288 return -1;
291 prepare_thread(t, func, arg, call_wrapper_block_current, NULL);
292 schedule(t);
294 return 0;
297 int thread_run_until(void (*func)(void *), void *arg,
298 boot_state_t state, boot_state_sequence_t seq)
300 struct thread *current;
301 struct thread *t;
302 struct block_boot_state *bbs;
304 current = current_thread();
306 if (!thread_can_yield(current)) {
307 printk(BIOS_ERR,
308 "thread_run() called from non-yielding context!\n");
309 return -1;
312 t = get_free_thread();
314 if (t == NULL) {
315 printk(BIOS_ERR, "thread_run() No more threads!\n");
316 return -1;
319 bbs = thread_alloc_space(t, sizeof(*bbs));
320 bbs->state = state;
321 bbs->seq = seq;
322 prepare_thread(t, func, arg, call_wrapper_block_state, bbs);
323 schedule(t);
325 return 0;
328 int thread_yield_microseconds(unsigned int microsecs)
330 struct thread *current;
331 struct timeout_callback tocb;
333 current = current_thread();
335 if (!thread_can_yield(current))
336 return -1;
338 if (thread_yield_timed_callback(&tocb, microsecs))
339 return -1;
341 return 0;
344 void thread_cooperate(void)
346 struct thread *current;
348 current = current_thread();
350 if (current != NULL)
351 current->can_yield = 1;
354 void thread_prevent_coop(void)
356 struct thread *current;
358 current = current_thread();
360 if (current != NULL)
361 current->can_yield = 0;