Revert thread_interrupt_sleep() to waitq_interrupt_sleep().
[helenos.git] / kernel / generic / src / proc / task.c
blob4ac0c4232762433629c05959604903cadb734fbd
1 /*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup genericproc
30 * @{
33 /**
34 * @file
35 * @brief Task management.
38 #include <main/uinit.h>
39 #include <proc/thread.h>
40 #include <proc/task.h>
41 #include <proc/uarg.h>
42 #include <mm/as.h>
43 #include <mm/slab.h>
44 #include <synch/spinlock.h>
45 #include <synch/waitq.h>
46 #include <arch.h>
47 #include <panic.h>
48 #include <adt/btree.h>
49 #include <adt/list.h>
50 #include <ipc/ipc.h>
51 #include <security/cap.h>
52 #include <memstr.h>
53 #include <print.h>
54 #include <lib/elf.h>
55 #include <errno.h>
56 #include <func.h>
57 #include <syscall/copy.h>
58 #include <console/klog.h>
60 #ifndef LOADED_PROG_STACK_PAGES_NO
61 #define LOADED_PROG_STACK_PAGES_NO 1
62 #endif
64 /** Spinlock protecting the tasks_btree B+tree. */
65 SPINLOCK_INITIALIZE(tasks_lock);
67 /** B+tree of active tasks.
69 * The task is guaranteed to exist after it was found in the tasks_btree as
70 * long as:
71 * @li the tasks_lock is held,
72 * @li the task's lock is held when task's lock is acquired before releasing
73 * tasks_lock or
74 * @li the task's refcount is greater than 0
77 btree_t tasks_btree;
79 static task_id_t task_counter = 0;
81 static void ktaskclnp(void *arg);
82 static void ktaskgc(void *arg);
84 /** Initialize tasks
86 * Initialize kernel tasks support.
89 void task_init(void)
91 TASK = NULL;
92 btree_create(&tasks_btree);
96 /** Create new task
98 * Create new task with no threads.
100 * @param as Task's address space.
101 * @param name Symbolic name.
103 * @return New task's structure
106 task_t *task_create(as_t *as, char *name)
108 ipl_t ipl;
109 task_t *ta;
110 int i;
112 ta = (task_t *) malloc(sizeof(task_t), 0);
114 task_create_arch(ta);
116 spinlock_initialize(&ta->lock, "task_ta_lock");
117 list_initialize(&ta->th_head);
118 ta->as = as;
119 ta->name = name;
120 ta->main_thread = NULL;
121 ta->refcount = 0;
122 ta->context = CONTEXT;
124 ta->capabilities = 0;
125 ta->accept_new_threads = true;
126 ta->cycles = 0;
128 ipc_answerbox_init(&ta->answerbox);
129 for (i = 0; i < IPC_MAX_PHONES; i++)
130 ipc_phone_init(&ta->phones[i]);
131 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
132 ta->context)))
133 ipc_phone_connect(&ta->phones[0], ipc_phone_0);
134 atomic_set(&ta->active_calls, 0);
136 mutex_initialize(&ta->futexes_lock);
137 btree_create(&ta->futexes);
139 ipl = interrupts_disable();
142 * Increment address space reference count.
143 * TODO: Reconsider the locking scheme.
145 mutex_lock(&as->lock);
146 as->refcount++;
147 mutex_unlock(&as->lock);
149 spinlock_lock(&tasks_lock);
151 ta->taskid = ++task_counter;
152 btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
154 spinlock_unlock(&tasks_lock);
155 interrupts_restore(ipl);
157 return ta;
160 /** Destroy task.
162 * @param t Task to be destroyed.
164 void task_destroy(task_t *t)
166 task_destroy_arch(t);
167 btree_destroy(&t->futexes);
169 mutex_lock_active(&t->as->lock);
170 if (--t->as->refcount == 0) {
171 mutex_unlock(&t->as->lock);
172 as_destroy(t->as);
174 * t->as is destroyed.
176 } else
177 mutex_unlock(&t->as->lock);
179 free(t);
180 TASK = NULL;
183 /** Create new task with 1 thread and run it
185 * @param program_addr Address of program executable image.
186 * @param name Program name.
188 * @return Task of the running program or NULL on error.
190 task_t * task_run_program(void *program_addr, char *name)
192 as_t *as;
193 as_area_t *a;
194 int rc;
195 thread_t *t1, *t2;
196 task_t *task;
197 uspace_arg_t *kernel_uarg;
199 as = as_create(0);
200 ASSERT(as);
202 rc = elf_load((elf_header_t *) program_addr, as);
203 if (rc != EE_OK) {
204 as_destroy(as);
205 return NULL;
208 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
209 kernel_uarg->uspace_entry =
210 (void *) ((elf_header_t *) program_addr)->e_entry;
211 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
212 kernel_uarg->uspace_thread_function = NULL;
213 kernel_uarg->uspace_thread_arg = NULL;
214 kernel_uarg->uspace_uarg = NULL;
216 task = task_create(as, name);
217 ASSERT(task);
220 * Create the data as_area.
222 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
223 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
224 AS_AREA_ATTR_NONE, &anon_backend, NULL);
227 * Create the main thread.
229 t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
230 "uinit", false);
231 ASSERT(t1);
234 * Create killer thread for the new task.
236 t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
237 ASSERT(t2);
238 thread_ready(t2);
240 thread_ready(t1);
242 return task;
245 /** Syscall for reading task ID from userspace.
247 * @param uspace_task_id Userspace address of 8-byte buffer where to store
248 * current task ID.
250 * @return 0 on success or an error code from @ref errno.h.
252 unative_t sys_task_get_id(task_id_t *uspace_task_id)
255 * No need to acquire lock on TASK because taskid
256 * remains constant for the lifespan of the task.
258 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
259 sizeof(TASK->taskid));
262 /** Find task structure corresponding to task ID.
264 * The tasks_lock must be already held by the caller of this function
265 * and interrupts must be disabled.
267 * @param id Task ID.
269 * @return Task structure address or NULL if there is no such task ID.
271 task_t *task_find_by_id(task_id_t id)
273 btree_node_t *leaf;
275 return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
278 /** Get accounting data of given task.
280 * Note that task lock of 't' must be already held and
281 * interrupts must be already disabled.
283 * @param t Pointer to thread.
286 uint64_t task_get_accounting(task_t *t)
288 /* Accumulated value of task */
289 uint64_t ret = t->cycles;
291 /* Current values of threads */
292 link_t *cur;
293 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
294 thread_t *thr = list_get_instance(cur, thread_t, th_link);
296 spinlock_lock(&thr->lock);
297 /* Process only counted threads */
298 if (!thr->uncounted) {
299 if (thr == THREAD) {
300 /* Update accounting of current thread */
301 thread_update_accounting();
303 ret += thr->cycles;
305 spinlock_unlock(&thr->lock);
308 return ret;
311 /** Kill task.
313 * @param id ID of the task to be killed.
315 * @return 0 on success or an error code from errno.h
317 int task_kill(task_id_t id)
319 ipl_t ipl;
320 task_t *ta;
321 thread_t *t;
322 link_t *cur;
324 if (id == 1)
325 return EPERM;
327 ipl = interrupts_disable();
328 spinlock_lock(&tasks_lock);
330 if (!(ta = task_find_by_id(id))) {
331 spinlock_unlock(&tasks_lock);
332 interrupts_restore(ipl);
333 return ENOENT;
336 spinlock_lock(&ta->lock);
337 ta->refcount++;
338 spinlock_unlock(&ta->lock);
340 btree_remove(&tasks_btree, ta->taskid, NULL);
341 spinlock_unlock(&tasks_lock);
343 t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
345 spinlock_lock(&ta->lock);
346 ta->accept_new_threads = false;
347 ta->refcount--;
350 * Interrupt all threads except ktaskclnp.
352 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
353 thread_t *thr;
354 bool sleeping = false;
356 thr = list_get_instance(cur, thread_t, th_link);
357 if (thr == t)
358 continue;
360 spinlock_lock(&thr->lock);
361 thr->interrupted = true;
362 if (thr->state == Sleeping)
363 sleeping = true;
364 spinlock_unlock(&thr->lock);
366 if (sleeping)
367 waitq_interrupt_sleep(thr);
370 spinlock_unlock(&ta->lock);
371 interrupts_restore(ipl);
373 if (t)
374 thread_ready(t);
376 return 0;
379 /** Print task list */
380 void task_print_list(void)
382 link_t *cur;
383 ipl_t ipl;
385 /* Messing with thread structures, avoid deadlock */
386 ipl = interrupts_disable();
387 spinlock_lock(&tasks_lock);
389 printf("taskid name ctx address as cycles threads "
390 "calls callee\n");
391 printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n");
393 for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
394 cur = cur->next) {
395 btree_node_t *node;
396 int i;
398 node = list_get_instance(cur, btree_node_t, leaf_link);
399 for (i = 0; i < node->keys; i++) {
400 task_t *t;
401 int j;
403 t = (task_t *) node->value[i];
405 spinlock_lock(&t->lock);
407 uint64_t cycles;
408 char suffix;
409 order(task_get_accounting(t), &cycles, &suffix);
411 printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
412 "%6zd", t->taskid, t->name, t->context, t, t->as,
413 cycles, suffix, t->refcount,
414 atomic_get(&t->active_calls));
415 for (j = 0; j < IPC_MAX_PHONES; j++) {
416 if (t->phones[j].callee)
417 printf(" %zd:%#zx", j,
418 t->phones[j].callee);
420 printf("\n");
422 spinlock_unlock(&t->lock);
426 spinlock_unlock(&tasks_lock);
427 interrupts_restore(ipl);
430 /** Kernel thread used to cleanup the task after it is killed. */
431 void ktaskclnp(void *arg)
433 ipl_t ipl;
434 thread_t *t = NULL, *main_thread;
435 link_t *cur;
436 bool again;
438 thread_detach(THREAD);
440 loop:
441 ipl = interrupts_disable();
442 spinlock_lock(&TASK->lock);
444 main_thread = TASK->main_thread;
447 * Find a thread to join.
449 again = false;
450 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
451 t = list_get_instance(cur, thread_t, th_link);
453 spinlock_lock(&t->lock);
454 if (t == THREAD) {
455 spinlock_unlock(&t->lock);
456 continue;
457 } else if (t == main_thread) {
458 spinlock_unlock(&t->lock);
459 continue;
460 } else if (t->join_type != None) {
461 spinlock_unlock(&t->lock);
462 again = true;
463 continue;
464 } else {
465 t->join_type = TaskClnp;
466 spinlock_unlock(&t->lock);
467 again = false;
468 break;
472 spinlock_unlock(&TASK->lock);
473 interrupts_restore(ipl);
475 if (again) {
477 * Other cleanup (e.g. ktaskgc) is in progress.
479 scheduler();
480 goto loop;
483 if (t != THREAD) {
484 ASSERT(t != main_thread); /* uninit is joined and detached
485 * in ktaskgc */
486 thread_join(t);
487 thread_detach(t);
488 goto loop; /* go for another thread */
492 * Now there are no other threads in this task
493 * and no new threads can be created.
496 ipc_cleanup();
497 futex_cleanup();
498 klog_printf("Cleanup of task %lld completed.", TASK->taskid);
501 /** Kernel thread used to kill the userspace task when its main thread exits.
503 * This thread waits until the main userspace thread (i.e. uninit) exits.
504 * When this happens, the task is killed. In the meantime, exited threads
505 * are garbage collected.
507 * @param arg Pointer to the thread structure of the task's main thread.
509 void ktaskgc(void *arg)
511 thread_t *t = (thread_t *) arg;
512 loop:
514 * Userspace threads cannot detach themselves,
515 * therefore the thread pointer is guaranteed to be valid.
517 if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
518 ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */
519 ipl_t ipl;
520 link_t *cur;
521 thread_t *thr = NULL;
524 * The join timed out. Try to do some garbage collection of
525 * Undead threads.
527 more_gc:
528 ipl = interrupts_disable();
529 spinlock_lock(&TASK->lock);
531 for (cur = TASK->th_head.next; cur != &TASK->th_head;
532 cur = cur->next) {
533 thr = list_get_instance(cur, thread_t, th_link);
534 spinlock_lock(&thr->lock);
535 if (thr != t && thr->state == Undead &&
536 thr->join_type == None) {
537 thr->join_type = TaskGC;
538 spinlock_unlock(&thr->lock);
539 break;
541 spinlock_unlock(&thr->lock);
542 thr = NULL;
544 spinlock_unlock(&TASK->lock);
545 interrupts_restore(ipl);
547 if (thr) {
548 thread_join(thr);
549 thread_detach(thr);
550 scheduler();
551 goto more_gc;
554 goto loop;
556 thread_detach(t);
557 task_kill(TASK->taskid);
560 /** @}