2 * Copyright (c) 2001-2004 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup genericproc
35 * @brief Task management.
38 #include <main/uinit.h>
39 #include <proc/thread.h>
40 #include <proc/task.h>
41 #include <proc/uarg.h>
44 #include <synch/spinlock.h>
45 #include <synch/waitq.h>
48 #include <adt/btree.h>
51 #include <security/cap.h>
57 #include <syscall/copy.h>
58 #include <console/klog.h>
60 #ifndef LOADED_PROG_STACK_PAGES_NO
61 #define LOADED_PROG_STACK_PAGES_NO 1
64 /** Spinlock protecting the tasks_btree B+tree. */
65 SPINLOCK_INITIALIZE(tasks_lock
);
67 /** B+tree of active tasks.
69 * The task is guaranteed to exist after it was found in the tasks_btree as
71 * @li the tasks_lock is held,
72 * @li the task's lock is held when task's lock is acquired before releasing
74 * @li the task's refcount is greater than 0
79 static task_id_t task_counter
= 0;
81 static void ktaskclnp(void *arg
);
82 static void ktaskgc(void *arg
);
86 * Initialize kernel tasks support.
92 btree_create(&tasks_btree
);
98 * Create new task with no threads.
100 * @param as Task's address space.
101 * @param name Symbolic name.
103 * @return New task's structure
106 task_t
*task_create(as_t
*as
, char *name
)
112 ta
= (task_t
*) malloc(sizeof(task_t
), 0);
114 task_create_arch(ta
);
116 spinlock_initialize(&ta
->lock
, "task_ta_lock");
117 list_initialize(&ta
->th_head
);
120 ta
->main_thread
= NULL
;
122 ta
->context
= CONTEXT
;
124 ta
->capabilities
= 0;
125 ta
->accept_new_threads
= true;
128 ipc_answerbox_init(&ta
->answerbox
);
129 for (i
= 0; i
< IPC_MAX_PHONES
; i
++)
130 ipc_phone_init(&ta
->phones
[i
]);
131 if ((ipc_phone_0
) && (context_check(ipc_phone_0
->task
->context
,
133 ipc_phone_connect(&ta
->phones
[0], ipc_phone_0
);
134 atomic_set(&ta
->active_calls
, 0);
136 mutex_initialize(&ta
->futexes_lock
);
137 btree_create(&ta
->futexes
);
139 ipl
= interrupts_disable();
142 * Increment address space reference count.
143 * TODO: Reconsider the locking scheme.
145 mutex_lock(&as
->lock
);
147 mutex_unlock(&as
->lock
);
149 spinlock_lock(&tasks_lock
);
151 ta
->taskid
= ++task_counter
;
152 btree_insert(&tasks_btree
, (btree_key_t
) ta
->taskid
, (void *) ta
, NULL
);
154 spinlock_unlock(&tasks_lock
);
155 interrupts_restore(ipl
);
162 * @param t Task to be destroyed.
164 void task_destroy(task_t
*t
)
166 task_destroy_arch(t
);
167 btree_destroy(&t
->futexes
);
169 mutex_lock_active(&t
->as
->lock
);
170 if (--t
->as
->refcount
== 0) {
171 mutex_unlock(&t
->as
->lock
);
174 * t->as is destroyed.
177 mutex_unlock(&t
->as
->lock
);
183 /** Create new task with 1 thread and run it
185 * @param program_addr Address of program executable image.
186 * @param name Program name.
188 * @return Task of the running program or NULL on error.
190 task_t
* task_run_program(void *program_addr
, char *name
)
197 uspace_arg_t
*kernel_uarg
;
202 rc
= elf_load((elf_header_t
*) program_addr
, as
);
208 kernel_uarg
= (uspace_arg_t
*) malloc(sizeof(uspace_arg_t
), 0);
209 kernel_uarg
->uspace_entry
=
210 (void *) ((elf_header_t
*) program_addr
)->e_entry
;
211 kernel_uarg
->uspace_stack
= (void *) USTACK_ADDRESS
;
212 kernel_uarg
->uspace_thread_function
= NULL
;
213 kernel_uarg
->uspace_thread_arg
= NULL
;
214 kernel_uarg
->uspace_uarg
= NULL
;
216 task
= task_create(as
, name
);
220 * Create the data as_area.
222 a
= as_area_create(as
, AS_AREA_READ
| AS_AREA_WRITE
| AS_AREA_CACHEABLE
,
223 LOADED_PROG_STACK_PAGES_NO
* PAGE_SIZE
, USTACK_ADDRESS
,
224 AS_AREA_ATTR_NONE
, &anon_backend
, NULL
);
227 * Create the main thread.
229 t1
= thread_create(uinit
, kernel_uarg
, task
, THREAD_FLAG_USPACE
,
234 * Create killer thread for the new task.
236 t2
= thread_create(ktaskgc
, t1
, task
, 0, "ktaskgc", true);
245 /** Syscall for reading task ID from userspace.
247 * @param uspace_task_id Userspace address of 8-byte buffer where to store
250 * @return 0 on success or an error code from @ref errno.h.
252 unative_t
sys_task_get_id(task_id_t
*uspace_task_id
)
255 * No need to acquire lock on TASK because taskid
256 * remains constant for the lifespan of the task.
258 return (unative_t
) copy_to_uspace(uspace_task_id
, &TASK
->taskid
,
259 sizeof(TASK
->taskid
));
262 /** Find task structure corresponding to task ID.
264 * The tasks_lock must be already held by the caller of this function
265 * and interrupts must be disabled.
269 * @return Task structure address or NULL if there is no such task ID.
271 task_t
*task_find_by_id(task_id_t id
)
275 return (task_t
*) btree_search(&tasks_btree
, (btree_key_t
) id
, &leaf
);
278 /** Get accounting data of given task.
280 * Note that task lock of 't' must be already held and
281 * interrupts must be already disabled.
283 * @param t Pointer to thread.
286 uint64_t task_get_accounting(task_t
*t
)
288 /* Accumulated value of task */
289 uint64_t ret
= t
->cycles
;
291 /* Current values of threads */
293 for (cur
= t
->th_head
.next
; cur
!= &t
->th_head
; cur
= cur
->next
) {
294 thread_t
*thr
= list_get_instance(cur
, thread_t
, th_link
);
296 spinlock_lock(&thr
->lock
);
297 /* Process only counted threads */
298 if (!thr
->uncounted
) {
300 /* Update accounting of current thread */
301 thread_update_accounting();
305 spinlock_unlock(&thr
->lock
);
313 * @param id ID of the task to be killed.
315 * @return 0 on success or an error code from errno.h
317 int task_kill(task_id_t id
)
327 ipl
= interrupts_disable();
328 spinlock_lock(&tasks_lock
);
330 if (!(ta
= task_find_by_id(id
))) {
331 spinlock_unlock(&tasks_lock
);
332 interrupts_restore(ipl
);
336 spinlock_lock(&ta
->lock
);
338 spinlock_unlock(&ta
->lock
);
340 btree_remove(&tasks_btree
, ta
->taskid
, NULL
);
341 spinlock_unlock(&tasks_lock
);
343 t
= thread_create(ktaskclnp
, NULL
, ta
, 0, "ktaskclnp", true);
345 spinlock_lock(&ta
->lock
);
346 ta
->accept_new_threads
= false;
350 * Interrupt all threads except ktaskclnp.
352 for (cur
= ta
->th_head
.next
; cur
!= &ta
->th_head
; cur
= cur
->next
) {
354 bool sleeping
= false;
356 thr
= list_get_instance(cur
, thread_t
, th_link
);
360 spinlock_lock(&thr
->lock
);
361 thr
->interrupted
= true;
362 if (thr
->state
== Sleeping
)
364 spinlock_unlock(&thr
->lock
);
367 waitq_interrupt_sleep(thr
);
370 spinlock_unlock(&ta
->lock
);
371 interrupts_restore(ipl
);
379 /** Print task list */
380 void task_print_list(void)
385 /* Messing with thread structures, avoid deadlock */
386 ipl
= interrupts_disable();
387 spinlock_lock(&tasks_lock
);
389 printf("taskid name ctx address as cycles threads "
391 printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n");
393 for (cur
= tasks_btree
.leaf_head
.next
; cur
!= &tasks_btree
.leaf_head
;
398 node
= list_get_instance(cur
, btree_node_t
, leaf_link
);
399 for (i
= 0; i
< node
->keys
; i
++) {
403 t
= (task_t
*) node
->value
[i
];
405 spinlock_lock(&t
->lock
);
409 order(task_get_accounting(t
), &cycles
, &suffix
);
411 printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
412 "%6zd", t
->taskid
, t
->name
, t
->context
, t
, t
->as
,
413 cycles
, suffix
, t
->refcount
,
414 atomic_get(&t
->active_calls
));
415 for (j
= 0; j
< IPC_MAX_PHONES
; j
++) {
416 if (t
->phones
[j
].callee
)
417 printf(" %zd:%#zx", j
,
418 t
->phones
[j
].callee
);
422 spinlock_unlock(&t
->lock
);
426 spinlock_unlock(&tasks_lock
);
427 interrupts_restore(ipl
);
430 /** Kernel thread used to cleanup the task after it is killed. */
431 void ktaskclnp(void *arg
)
434 thread_t
*t
= NULL
, *main_thread
;
438 thread_detach(THREAD
);
441 ipl
= interrupts_disable();
442 spinlock_lock(&TASK
->lock
);
444 main_thread
= TASK
->main_thread
;
447 * Find a thread to join.
450 for (cur
= TASK
->th_head
.next
; cur
!= &TASK
->th_head
; cur
= cur
->next
) {
451 t
= list_get_instance(cur
, thread_t
, th_link
);
453 spinlock_lock(&t
->lock
);
455 spinlock_unlock(&t
->lock
);
457 } else if (t
== main_thread
) {
458 spinlock_unlock(&t
->lock
);
460 } else if (t
->join_type
!= None
) {
461 spinlock_unlock(&t
->lock
);
465 t
->join_type
= TaskClnp
;
466 spinlock_unlock(&t
->lock
);
472 spinlock_unlock(&TASK
->lock
);
473 interrupts_restore(ipl
);
477 * Other cleanup (e.g. ktaskgc) is in progress.
484 ASSERT(t
!= main_thread
); /* uninit is joined and detached
488 goto loop
; /* go for another thread */
492 * Now there are no other threads in this task
493 * and no new threads can be created.
498 klog_printf("Cleanup of task %lld completed.", TASK
->taskid
);
501 /** Kernel thread used to kill the userspace task when its main thread exits.
503 * This thread waits until the main userspace thread (i.e. uninit) exits.
504 * When this happens, the task is killed. In the meantime, exited threads
505 * are garbage collected.
507 * @param arg Pointer to the thread structure of the task's main thread.
509 void ktaskgc(void *arg
)
511 thread_t
*t
= (thread_t
*) arg
;
514 * Userspace threads cannot detach themselves,
515 * therefore the thread pointer is guaranteed to be valid.
517 if (thread_join_timeout(t
, 1000000, SYNCH_FLAGS_NONE
) ==
518 ESYNCH_TIMEOUT
) { /* sleep uninterruptibly here! */
521 thread_t
*thr
= NULL
;
524 * The join timed out. Try to do some garbage collection of
528 ipl
= interrupts_disable();
529 spinlock_lock(&TASK
->lock
);
531 for (cur
= TASK
->th_head
.next
; cur
!= &TASK
->th_head
;
533 thr
= list_get_instance(cur
, thread_t
, th_link
);
534 spinlock_lock(&thr
->lock
);
535 if (thr
!= t
&& thr
->state
== Undead
&&
536 thr
->join_type
== None
) {
537 thr
->join_type
= TaskGC
;
538 spinlock_unlock(&thr
->lock
);
541 spinlock_unlock(&thr
->lock
);
544 spinlock_unlock(&TASK
->lock
);
545 interrupts_restore(ipl
);
557 task_kill(TASK
->taskid
);