2 * Copyright (c) 2001-2004 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup genericproc
35 * @brief Thread management functions.
38 #include <proc/scheduler.h>
39 #include <proc/thread.h>
40 #include <proc/task.h>
41 #include <proc/uarg.h>
45 #include <arch/cycle.h>
47 #include <synch/synch.h>
48 #include <synch/spinlock.h>
49 #include <synch/waitq.h>
50 #include <synch/rwlock.h>
54 #include <adt/btree.h>
57 #include <time/clock.h>
59 #include <arch/interrupt.h>
61 #include <arch/faddr.h>
67 #include <main/uinit.h>
68 #include <syscall/copy.h>
73 char *thread_states
[] = {
83 /** Lock protecting the threads_btree B+tree.
85 * For locking rules, see declaration thereof.
87 SPINLOCK_INITIALIZE(threads_lock
);
89 /** B+tree of all threads.
91 * When a thread is found in the threads_btree B+tree, it is guaranteed to
92 * exist as long as the threads_lock is held.
94 btree_t threads_btree
;
96 SPINLOCK_INITIALIZE(tidlock
);
97 uint32_t last_tid
= 0;
99 static slab_cache_t
*thread_slab
;
101 slab_cache_t
*fpu_context_slab
;
106 * This wrapper is provided to ensure that every thread makes a call to
107 * thread_exit() when its implementing function returns.
109 * interrupts_disable() is assumed.
112 static void cushion(void)
114 void (*f
)(void *) = THREAD
->thread_code
;
115 void *arg
= THREAD
->thread_arg
;
116 THREAD
->last_cycle
= get_cycle();
118 /* This is where each thread wakes up after its creation */
119 spinlock_unlock(&THREAD
->lock
);
124 /* Accumulate accounting to the task */
125 ipl_t ipl
= interrupts_disable();
127 spinlock_lock(&THREAD
->lock
);
128 if (!THREAD
->uncounted
) {
129 thread_update_accounting();
130 uint64_t cycles
= THREAD
->cycles
;
132 spinlock_unlock(&THREAD
->lock
);
134 spinlock_lock(&TASK
->lock
);
135 TASK
->cycles
+= cycles
;
136 spinlock_unlock(&TASK
->lock
);
138 spinlock_unlock(&THREAD
->lock
);
140 interrupts_restore(ipl
);
146 /** Initialization and allocation for thread_t structure */
147 static int thr_constructor(void *obj
, int kmflags
)
149 thread_t
*t
= (thread_t
*) obj
;
151 spinlock_initialize(&t
->lock
, "thread_t_lock");
152 link_initialize(&t
->rq_link
);
153 link_initialize(&t
->wq_link
);
154 link_initialize(&t
->th_link
);
156 /* call the architecture-specific part of the constructor */
157 thr_constructor_arch(t
);
160 # ifdef CONFIG_FPU_LAZY
161 t
->saved_fpu_context
= NULL
;
163 t
->saved_fpu_context
= slab_alloc(fpu_context_slab
,kmflags
);
164 if (!t
->saved_fpu_context
)
169 t
->kstack
= frame_alloc(STACK_FRAMES
, FRAME_KA
| kmflags
);
172 if (t
->saved_fpu_context
)
173 slab_free(fpu_context_slab
,t
->saved_fpu_context
);
181 /** Destruction of thread_t object */
182 static int thr_destructor(void *obj
)
184 thread_t
*t
= (thread_t
*) obj
;
186 /* call the architecture-specific part of the destructor */
187 thr_destructor_arch(t
);
189 frame_free(KA2PA(t
->kstack
));
191 if (t
->saved_fpu_context
)
192 slab_free(fpu_context_slab
,t
->saved_fpu_context
);
194 return 1; /* One page freed */
197 /** Initialize threads
199 * Initialize kernel threads support.
202 void thread_init(void)
206 thread_slab
= slab_cache_create("thread_slab", sizeof(thread_t
), 0,
207 thr_constructor
, thr_destructor
, 0);
210 fpu_context_slab
= slab_cache_create("fpu_slab", sizeof(fpu_context_t
),
211 FPU_CONTEXT_ALIGN
, NULL
, NULL
, 0);
214 btree_create(&threads_btree
);
217 /** Make thread ready
219 * Switch thread t to the ready state.
221 * @param t Thread to make ready.
224 void thread_ready(thread_t
*t
)
231 ipl
= interrupts_disable();
233 spinlock_lock(&t
->lock
);
235 ASSERT(! (t
->state
== Ready
));
237 i
= (t
->priority
< RQ_COUNT
- 1) ? ++t
->priority
: t
->priority
;
240 if (t
->flags
& THREAD_FLAG_WIRED
) {
244 spinlock_unlock(&t
->lock
);
247 * Append t to respective ready queue on respective processor.
250 spinlock_lock(&r
->lock
);
251 list_append(&t
->rq_link
, &r
->rq_head
);
253 spinlock_unlock(&r
->lock
);
256 avg
= atomic_get(&nrdy
) / config
.cpu_active
;
257 atomic_inc(&cpu
->nrdy
);
259 interrupts_restore(ipl
);
262 /** Destroy thread memory structure
264 * Detach thread from all queues, cpus etc. and destroy it.
266 * Assume thread->lock is held!!
268 void thread_destroy(thread_t
*t
)
270 bool destroy_task
= false;
272 ASSERT(t
->state
== Exiting
|| t
->state
== Undead
);
276 spinlock_lock(&t
->cpu
->lock
);
277 if(t
->cpu
->fpu_owner
== t
)
278 t
->cpu
->fpu_owner
= NULL
;
279 spinlock_unlock(&t
->cpu
->lock
);
281 spinlock_unlock(&t
->lock
);
283 spinlock_lock(&threads_lock
);
284 btree_remove(&threads_btree
, (btree_key_t
) ((uintptr_t ) t
), NULL
);
285 spinlock_unlock(&threads_lock
);
288 * Detach from the containing task.
290 spinlock_lock(&t
->task
->lock
);
291 list_remove(&t
->th_link
);
292 if (--t
->task
->refcount
== 0) {
293 t
->task
->accept_new_threads
= false;
296 spinlock_unlock(&t
->task
->lock
);
299 task_destroy(t
->task
);
301 slab_free(thread_slab
, t
);
304 /** Create new thread
306 * Create a new thread.
308 * @param func Thread's implementing function.
309 * @param arg Thread's implementing function argument.
310 * @param task Task to which the thread belongs.
311 * @param flags Thread flags.
312 * @param name Symbolic name.
313 * @param uncounted Thread's accounting doesn't affect accumulated task
316 * @return New thread's structure on success, NULL on failure.
319 thread_t
*thread_create(void (* func
)(void *), void *arg
, task_t
*task
,
320 int flags
, char *name
, bool uncounted
)
325 t
= (thread_t
*) slab_alloc(thread_slab
, 0);
329 /* Not needed, but good for debugging */
330 memsetb((uintptr_t) t
->kstack
, THREAD_STACK_SIZE
* 1 << STACK_FRAMES
,
333 ipl
= interrupts_disable();
334 spinlock_lock(&tidlock
);
336 spinlock_unlock(&tidlock
);
337 interrupts_restore(ipl
);
339 context_save(&t
->saved_context
);
340 context_set(&t
->saved_context
, FADDR(cushion
), (uintptr_t) t
->kstack
,
343 the_initialize((the_t
*) t
->kstack
);
345 ipl
= interrupts_disable();
346 t
->saved_context
.ipl
= interrupts_read();
347 interrupts_restore(ipl
);
349 memcpy(t
->name
, name
, THREAD_NAME_BUFLEN
);
351 t
->thread_code
= func
;
355 t
->uncounted
= uncounted
;
356 t
->priority
= -1; /* start in rq[0] */
361 t
->call_me_with
= NULL
;
363 timeout_initialize(&t
->sleep_timeout
);
364 t
->sleep_interruptible
= false;
365 t
->sleep_queue
= NULL
;
366 t
->timeout_pending
= 0;
368 t
->in_copy_from_uspace
= false;
369 t
->in_copy_to_uspace
= false;
371 t
->interrupted
= false;
374 waitq_initialize(&t
->join_wq
);
376 t
->rwlock_holder_type
= RWLOCK_NONE
;
380 t
->fpu_context_exists
= 0;
381 t
->fpu_context_engaged
= 0;
383 /* might depend on previous initialization */
384 thread_create_arch(t
);
387 * Attach to the containing task.
389 ipl
= interrupts_disable();
390 spinlock_lock(&task
->lock
);
391 if (!task
->accept_new_threads
) {
392 spinlock_unlock(&task
->lock
);
393 slab_free(thread_slab
, t
);
394 interrupts_restore(ipl
);
397 list_append(&t
->th_link
, &task
->th_head
);
398 if (task
->refcount
++ == 0)
399 task
->main_thread
= t
;
400 spinlock_unlock(&task
->lock
);
403 * Register this thread in the system-wide list.
405 spinlock_lock(&threads_lock
);
406 btree_insert(&threads_btree
, (btree_key_t
) ((uintptr_t) t
), (void *) t
,
408 spinlock_unlock(&threads_lock
);
410 interrupts_restore(ipl
);
415 /** Terminate thread.
417 * End current thread execution and switch it to the exiting state. All pending
418 * timeouts are executed.
420 void thread_exit(void)
425 ipl
= interrupts_disable();
426 spinlock_lock(&THREAD
->lock
);
427 if (THREAD
->timeout_pending
) {
428 /* busy waiting for timeouts in progress */
429 spinlock_unlock(&THREAD
->lock
);
430 interrupts_restore(ipl
);
433 THREAD
->state
= Exiting
;
434 spinlock_unlock(&THREAD
->lock
);
445 * Suspend execution of the current thread.
447 * @param sec Number of seconds to sleep.
450 void thread_sleep(uint32_t sec
)
452 thread_usleep(sec
* 1000000);
455 /** Wait for another thread to exit.
457 * @param t Thread to join on exit.
458 * @param usec Timeout in microseconds.
459 * @param flags Mode of operation.
461 * @return An error code from errno.h or an error code from synch.h.
463 int thread_join_timeout(thread_t
*t
, uint32_t usec
, int flags
)
472 * Since thread join can only be called once on an undetached thread,
473 * the thread pointer is guaranteed to be still valid.
476 ipl
= interrupts_disable();
477 spinlock_lock(&t
->lock
);
478 ASSERT(!t
->detached
);
479 spinlock_unlock(&t
->lock
);
480 interrupts_restore(ipl
);
482 rc
= waitq_sleep_timeout(&t
->join_wq
, usec
, flags
);
489 * Mark the thread as detached, if the thread is already in the Undead state,
490 * deallocate its resources.
492 * @param t Thread to be detached.
494 void thread_detach(thread_t
*t
)
499 * Since the thread is expected to not be already detached,
500 * pointer to it must be still valid.
502 ipl
= interrupts_disable();
503 spinlock_lock(&t
->lock
);
504 ASSERT(!t
->detached
);
505 if (t
->state
== Undead
) {
506 thread_destroy(t
); /* unlocks &t->lock */
507 interrupts_restore(ipl
);
512 spinlock_unlock(&t
->lock
);
513 interrupts_restore(ipl
);
518 * Suspend execution of the current thread.
520 * @param usec Number of microseconds to sleep.
523 void thread_usleep(uint32_t usec
)
527 waitq_initialize(&wq
);
529 (void) waitq_sleep_timeout(&wq
, usec
, SYNCH_FLAGS_NON_BLOCKING
);
532 /** Register thread out-of-context invocation
534 * Register a function and its argument to be executed
535 * on next context switch to the current thread.
537 * @param call_me Out-of-context function.
538 * @param call_me_with Out-of-context function argument.
541 void thread_register_call_me(void (* call_me
)(void *), void *call_me_with
)
545 ipl
= interrupts_disable();
546 spinlock_lock(&THREAD
->lock
);
547 THREAD
->call_me
= call_me
;
548 THREAD
->call_me_with
= call_me_with
;
549 spinlock_unlock(&THREAD
->lock
);
550 interrupts_restore(ipl
);
553 /** Print list of threads debug info */
554 void thread_print_list(void)
559 /* Messing with thread structures, avoid deadlock */
560 ipl
= interrupts_disable();
561 spinlock_lock(&threads_lock
);
563 printf("tid name address state task ctx code stack cycles cpu kstack waitqueue\n");
564 printf("------ ---------- ---------- -------- ---------- --- ---------- ---------- ---------- ---- ---------- ----------\n");
566 for (cur
= threads_btree
.leaf_head
.next
; cur
!= &threads_btree
.leaf_head
; cur
= cur
->next
) {
570 node
= list_get_instance(cur
, btree_node_t
, leaf_link
);
571 for (i
= 0; i
< node
->keys
; i
++) {
574 t
= (thread_t
*) node
->value
[i
];
578 order(t
->cycles
, &cycles
, &suffix
);
580 printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", t
->tid
, t
->name
, t
, thread_states
[t
->state
], t
->task
, t
->task
->context
, t
->thread_code
, t
->kstack
, cycles
, suffix
);
583 printf("%-4zd", t
->cpu
->id
);
587 if (t
->state
== Sleeping
)
588 printf(" %#10zx %#10zx", t
->kstack
, t
->sleep_queue
);
594 spinlock_unlock(&threads_lock
);
595 interrupts_restore(ipl
);
598 /** Check whether thread exists.
600 * Note that threads_lock must be already held and
601 * interrupts must be already disabled.
603 * @param t Pointer to thread.
605 * @return True if thread t is known to the system, false otherwise.
607 bool thread_exists(thread_t
*t
)
611 return btree_search(&threads_btree
, (btree_key_t
) ((uintptr_t) t
), &leaf
) != NULL
;
615 /** Update accounting of current thread.
617 * Note that thread_lock on THREAD must be already held and
618 * interrupts must be already disabled.
621 void thread_update_accounting(void)
623 uint64_t time
= get_cycle();
624 THREAD
->cycles
+= time
- THREAD
->last_cycle
;
625 THREAD
->last_cycle
= time
;
628 /** Process syscall to create new thread.
631 unative_t
sys_thread_create(uspace_arg_t
*uspace_uarg
, char *uspace_name
)
634 char namebuf
[THREAD_NAME_BUFLEN
];
635 uspace_arg_t
*kernel_uarg
;
639 rc
= copy_from_uspace(namebuf
, uspace_name
, THREAD_NAME_BUFLEN
);
641 return (unative_t
) rc
;
643 kernel_uarg
= (uspace_arg_t
*) malloc(sizeof(uspace_arg_t
), 0);
644 rc
= copy_from_uspace(kernel_uarg
, uspace_uarg
, sizeof(uspace_arg_t
));
647 return (unative_t
) rc
;
650 if ((t
= thread_create(uinit
, kernel_uarg
, TASK
, THREAD_FLAG_USPACE
, namebuf
, false))) {
653 return (unative_t
) tid
;
658 return (unative_t
) ENOMEM
;
661 /** Process syscall to terminate thread.
664 unative_t
sys_thread_exit(int uspace_status
)