2 * This file is part of the coreboot project.
4 * Copyright (C) 2013 Google, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
19 #include <bootstate.h>
20 #include <console/console.h>
23 static void idle_thread_init(void);
25 /* There needs to be at least one thread to run the ramstate state machine. */
26 #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
28 /* Storage space for the thread structs .*/
29 static struct thread all_threads
[TOTAL_NUM_THREADS
];
31 /* All runnable (but not running) and free threads are kept on their
32 * respective lists. */
33 static struct thread
*runnable_threads
;
34 static struct thread
*free_threads
;
36 static inline struct cpu_info
*thread_cpu_info(const struct thread
*t
)
38 return (void *)(t
->stack_orig
);
41 static inline int thread_can_yield(const struct thread
*t
)
43 return (t
!= NULL
&& t
->can_yield
);
46 /* Assumes current CPU info can switch. */
47 static inline struct thread
*cpu_info_to_thread(const struct cpu_info
*ci
)
52 static inline struct thread
*current_thread(void)
54 return cpu_info_to_thread(cpu_info());
57 static inline int thread_list_empty(struct thread
**list
)
62 static inline struct thread
*pop_thread(struct thread
**list
)
72 static inline void push_thread(struct thread
**list
, struct thread
*t
)
78 static inline void push_runnable(struct thread
*t
)
80 push_thread(&runnable_threads
, t
);
83 static inline struct thread
*pop_runnable(void)
85 return pop_thread(&runnable_threads
);
88 static inline struct thread
*get_free_thread(void)
92 struct cpu_info
*new_ci
;
94 if (thread_list_empty(&free_threads
))
97 t
= pop_thread(&free_threads
);
101 /* Initialize the cpu_info structure on the new stack. */
102 new_ci
= thread_cpu_info(t
);
106 /* Reset the current stack value to the original. */
107 t
->stack_current
= t
->stack_orig
;
112 static inline void free_thread(struct thread
*t
)
114 push_thread(&free_threads
, t
);
117 /* The idle thread is ran whenever there isn't anything else that is runnable.
118 * It's sole responsibility is to ensure progress is made by running the timer
120 static void idle_thread(void *unused
)
122 /* This thread never voluntarily yields. */
123 thread_prevent_coop();
129 static void schedule(struct thread
*t
)
131 struct thread
*current
= current_thread();
133 /* If t is NULL need to find new runnable thread. */
135 if (thread_list_empty(&runnable_threads
))
136 die("Runnable thread list is empty!\n");
139 /* current is still runnable. */
140 push_runnable(current
);
142 switch_to_thread(t
->stack_current
, ¤t
->stack_current
);
145 static void terminate_thread(struct thread
*t
)
151 static void asmlinkage
call_wrapper(void *unused
)
153 struct thread
*current
= current_thread();
155 current
->entry(current
->entry_arg
);
156 terminate_thread(current
);
159 /* Block the current state transitions until thread is complete. */
160 static void asmlinkage
call_wrapper_block_current(void *unused
)
162 struct thread
*current
= current_thread();
164 boot_state_current_block();
165 current
->entry(current
->entry_arg
);
166 boot_state_current_unblock();
167 terminate_thread(current
);
170 struct block_boot_state
{
172 boot_state_sequence_t seq
;
175 /* Block the provided state until thread is complete. */
176 static void asmlinkage
call_wrapper_block_state(void *arg
)
178 struct block_boot_state
*bbs
= arg
;
179 struct thread
*current
= current_thread();
181 boot_state_block(bbs
->state
, bbs
->seq
);
182 current
->entry(current
->entry_arg
);
183 boot_state_unblock(bbs
->state
, bbs
->seq
);
184 terminate_thread(current
);
187 /* Prepare a thread so that it starts by executing thread_entry(thread_arg).
188 * Within thread_entry() it will call func(arg). */
189 static void prepare_thread(struct thread
*t
, void *func
, void *arg
,
190 void asmlinkage (*thread_entry
)(void *),
193 /* Stash the function and argument to run. */
197 /* All new threads can yield by default. */
200 arch_prepare_thread(t
, thread_entry
, thread_arg
);
203 static void thread_resume_from_timeout(struct timeout_callback
*tocb
)
211 static void idle_thread_init(void)
215 t
= get_free_thread();
218 die("No threads available for idle thread!\n");
221 /* Queue idle thread to run once all other threads have yielded. */
222 prepare_thread(t
, idle_thread
, NULL
, call_wrapper
, NULL
);
224 /* Mark the currently executing thread to cooperate. */
228 /* Don't inline this function so the timeout_callback won't have its storage
229 * space on the stack cleaned up before the call to schedule(). */
230 static int __attribute__((noinline
))
231 thread_yield_timed_callback(struct timeout_callback
*tocb
, unsigned microsecs
)
233 tocb
->priv
= current_thread();
234 tocb
->callback
= thread_resume_from_timeout
;
236 if (timer_sched_callback(tocb
, microsecs
))
239 /* The timer callback will wake up the current thread. */
244 static void *thread_alloc_space(struct thread
*t
, size_t bytes
)
246 /* Allocate the amount of space on the stack keeping the stack
247 * aligned to the pointer size. */
248 t
->stack_current
-= ALIGN_UP(bytes
, sizeof(uintptr_t));
250 return (void *)t
->stack_current
;
253 void threads_initialize(void)
261 thread_stacks
= arch_get_thread_stackbase();
263 /* Initialize the BSP thread first. The cpu_info structure is assumed
264 * to be just under the top of the stack. */
268 t
->stack_orig
= (uintptr_t)ci
;
271 stack_top
= &thread_stacks
[CONFIG_STACK_SIZE
] - sizeof(struct cpu_info
);
272 for (i
= 1; i
< TOTAL_NUM_THREADS
; i
++) {
274 t
->stack_orig
= (uintptr_t)stack_top
;
276 stack_top
+= CONFIG_STACK_SIZE
;
283 int thread_run(void (*func
)(void *), void *arg
)
285 struct thread
*current
;
288 current
= current_thread();
290 if (!thread_can_yield(current
)) {
292 "thread_run() called from non-yielding context!\n");
296 t
= get_free_thread();
299 printk(BIOS_ERR
, "thread_run() No more threads!\n");
303 prepare_thread(t
, func
, arg
, call_wrapper_block_current
, NULL
);
309 int thread_run_until(void (*func
)(void *), void *arg
,
310 boot_state_t state
, boot_state_sequence_t seq
)
312 struct thread
*current
;
314 struct block_boot_state
*bbs
;
316 current
= current_thread();
318 if (!thread_can_yield(current
)) {
320 "thread_run() called from non-yielding context!\n");
324 t
= get_free_thread();
327 printk(BIOS_ERR
, "thread_run() No more threads!\n");
331 bbs
= thread_alloc_space(t
, sizeof(*bbs
));
334 prepare_thread(t
, func
, arg
, call_wrapper_block_state
, bbs
);
340 int thread_yield_microseconds(unsigned microsecs
)
342 struct thread
*current
;
343 struct timeout_callback tocb
;
345 current
= current_thread();
347 if (!thread_can_yield(current
))
350 if (thread_yield_timed_callback(&tocb
, microsecs
))
356 void thread_cooperate(void)
358 struct thread
*current
;
360 current
= current_thread();
363 current
->can_yield
= 1;
366 void thread_prevent_coop(void)
368 struct thread
*current
;
370 current
= current_thread();
373 current
->can_yield
= 0;