Merge as_t structure into one and leave the differring parts in as_genarch_t.
[helenos.git] / kernel / generic / include / proc / thread.h
blob0e70d5aa7a7a8081e9aaca155a3cb81daad82821
1 /*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup genericproc
30 * @{
32 /** @file
35 #ifndef KERN_THREAD_H_
36 #define KERN_THREAD_H_
38 #include <synch/waitq.h>
39 #include <proc/task.h>
40 #include <time/timeout.h>
41 #include <cpu.h>
42 #include <synch/rwlock.h>
43 #include <adt/btree.h>
44 #include <mm/slab.h>
45 #include <arch/cpu.h>
46 #include <mm/tlb.h>
47 #include <proc/uarg.h>
49 #define THREAD_STACK_SIZE STACK_SIZE
50 #define THREAD_NAME_BUFLEN 20
52 extern char *thread_states[];
54 /* Thread flags */
56 /** Thread cannot be migrated to another CPU. */
57 #define THREAD_FLAG_WIRED (1 << 0)
58 /** Thread was migrated to another CPU and has not run yet. */
59 #define THREAD_FLAG_STOLEN (1 << 1)
60 /** Thread executes in userspace. */
61 #define THREAD_FLAG_USPACE (1 << 2)
63 /** Thread states. */
64 typedef enum {
65 /** It is an error, if thread is found in this state. */
66 Invalid,
67 /** State of a thread that is currently executing on some CPU. */
68 Running,
69 /** Thread in this state is waiting for an event. */
70 Sleeping,
71 /** State of threads in a run queue. */
72 Ready,
73 /** Threads are in this state before they are first readied. */
74 Entering,
75 /** After a thread calls thread_exit(), it is put into Exiting state. */
76 Exiting,
77 /** Threads that were not detached but exited are in the Undead state. */
78 Undead
79 } state_t;
81 /** Join types. */
82 typedef enum {
83 None,
84 TaskClnp, /**< The thread will be joined by ktaskclnp thread. */
85 TaskGC /**< The thread will be joined by ktaskgc thread. */
86 } thread_join_type_t;
88 /** Thread structure. There is one per thread. */
89 typedef struct thread {
90 link_t rq_link; /**< Run queue link. */
91 link_t wq_link; /**< Wait queue link. */
92 link_t th_link; /**< Links to threads within containing task. */
94 /** Lock protecting thread structure.
96 * Protects the whole thread structure except list links above.
98 SPINLOCK_DECLARE(lock);
100 char name[THREAD_NAME_BUFLEN];
102 /** Function implementing the thread. */
103 void (* thread_code)(void *);
104 /** Argument passed to thread_code() function. */
105 void *thread_arg;
108 * From here, the stored context is restored when the thread is
109 * scheduled.
111 context_t saved_context;
113 * From here, the stored timeout context is restored when sleep times
114 * out.
116 context_t sleep_timeout_context;
118 * From here, the stored interruption context is restored when sleep is
119 * interrupted.
121 context_t sleep_interruption_context;
123 /** If true, the thread can be interrupted from sleep. */
124 bool sleep_interruptible;
125 /** Wait queue in which this thread sleeps. */
126 waitq_t *sleep_queue;
127 /** Timeout used for timeoutable sleeping. */
128 timeout_t sleep_timeout;
129 /** Flag signalling sleep timeout in progress. */
130 volatile int timeout_pending;
133 * True if this thread is executing copy_from_uspace().
134 * False otherwise.
136 bool in_copy_from_uspace;
138 * True if this thread is executing copy_to_uspace().
139 * False otherwise.
141 bool in_copy_to_uspace;
144 * If true, the thread will not go to sleep at all and will call
145 * thread_exit() before returning to userspace.
147 bool interrupted;
149 /** Who joinins the thread. */
150 thread_join_type_t join_type;
151 /** If true, thread_join_timeout() cannot be used on this thread. */
152 bool detached;
153 /** Waitq for thread_join_timeout(). */
154 waitq_t join_wq;
156 fpu_context_t *saved_fpu_context;
157 int fpu_context_exists;
160 * Defined only if thread doesn't run.
161 * It means that fpu context is in CPU that last time executes this
162 * thread. This disables migration.
164 int fpu_context_engaged;
166 rwlock_type_t rwlock_holder_type;
168 /** Callback fired in scheduler before the thread is put asleep. */
169 void (* call_me)(void *);
170 /** Argument passed to call_me(). */
171 void *call_me_with;
173 /** Thread's state. */
174 state_t state;
175 /** Thread's flags. */
176 int flags;
178 /** Thread's CPU. */
179 cpu_t *cpu;
180 /** Containing task. */
181 task_t *task;
183 /** Ticks before preemption. */
184 uint64_t ticks;
186 /** Thread accounting. */
187 uint64_t cycles;
188 /** Last sampled cycle. */
189 uint64_t last_cycle;
190 /** Thread doesn't affect accumulated accounting. */
191 bool uncounted;
193 /** Thread's priority. Implemented as index to CPU->rq */
194 int priority;
195 /** Thread ID. */
196 uint32_t tid;
198 /** Architecture-specific data. */
199 thread_arch_t arch;
201 /** Thread's kernel stack. */
202 uint8_t *kstack;
203 } thread_t;
205 /** Thread list lock.
207 * This lock protects all link_t structures chained in threads_head.
208 * Must be acquired before T.lock for each T of type thread_t.
211 SPINLOCK_EXTERN(threads_lock);
213 /** B+tree containing all threads. */
214 extern btree_t threads_btree;
216 extern void thread_init(void);
217 extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
218 int flags, char *name, bool uncounted);
219 extern void thread_ready(thread_t *t);
220 extern void thread_exit(void) __attribute__((noreturn));
222 #ifndef thread_create_arch
223 extern void thread_create_arch(thread_t *t);
224 #endif
225 #ifndef thr_constructor_arch
226 extern void thr_constructor_arch(thread_t *t);
227 #endif
228 #ifndef thr_destructor_arch
229 extern void thr_destructor_arch(thread_t *t);
230 #endif
232 extern void thread_sleep(uint32_t sec);
233 extern void thread_usleep(uint32_t usec);
235 #define thread_join(t) \
236 thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
237 extern int thread_join_timeout(thread_t *t, uint32_t usec, int flags);
238 extern void thread_detach(thread_t *t);
240 extern void thread_register_call_me(void (* call_me)(void *),
241 void *call_me_with);
242 extern void thread_print_list(void);
243 extern void thread_destroy(thread_t *t);
244 extern void thread_update_accounting(void);
245 extern bool thread_exists(thread_t *t);
246 extern void thread_interrupt_sleep(thread_t *t);
248 /** Fpu context slab cache. */
249 extern slab_cache_t *fpu_context_slab;
251 /* Thread syscall prototypes. */
252 unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name);
253 unative_t sys_thread_exit(int uspace_status);
255 #endif
257 /** @}