more kernel elf loader hacking.
[newos.git] / kernel / thread.c
blob82863b00ef524cea85c0718811e6f65f358de5de
1 /*
2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/debug.h>
7 #include <kernel/console.h>
8 #include <kernel/thread.h>
9 #include <kernel/arch/thread.h>
10 #include <kernel/khash.h>
11 #include <kernel/int.h>
12 #include <kernel/smp.h>
13 #include <kernel/timer.h>
14 #include <kernel/cpu.h>
15 #include <kernel/arch/cpu.h>
16 #include <kernel/arch/int.h>
17 #include <kernel/arch/vm.h>
18 #include <kernel/sem.h>
19 #include <kernel/port.h>
20 #include <kernel/vfs.h>
21 #include <kernel/elf.h>
22 #include <kernel/heap.h>
23 #include <newos/user_runtime.h>
24 #include <newos/errors.h>
25 #include <boot/stage2.h>
26 #include <string.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <sys/resource.h>
31 struct proc_key {
32 proc_id id;
35 struct thread_key {
36 thread_id id;
39 struct proc_arg {
40 char *path;
41 char **args;
42 unsigned int argc;
45 static struct proc *create_proc_struct(const char *name, bool kernel);
46 static int proc_struct_compare(void *_p, const void *_key);
47 static unsigned int proc_struct_hash(void *_p, const void *_key, unsigned int range);
49 // global
50 spinlock_t thread_spinlock = 0;
52 // proc list
53 static void *proc_hash = NULL;
54 static struct proc *kernel_proc = NULL;
55 static proc_id next_proc_id = 1;
56 static spinlock_t proc_spinlock = 0;
57 // NOTE: PROC lock can be held over a THREAD lock acquisition,
58 // but not the other way (to avoid deadlock)
59 #define GRAB_PROC_LOCK() acquire_spinlock(&proc_spinlock)
60 #define RELEASE_PROC_LOCK() release_spinlock(&proc_spinlock)
62 // thread list
63 static struct thread *idle_threads[MAX_BOOT_CPUS];
64 static void *thread_hash = NULL;
65 static thread_id next_thread_id = 1;
67 static sem_id snooze_sem = -1;
69 // death stacks
70 // used temporarily as a thread cleans itself up
71 struct death_stack {
72 region_id rid;
73 addr address;
74 bool in_use;
76 static struct death_stack *death_stacks;
77 static unsigned int num_death_stacks;
78 static unsigned int volatile death_stack_bitmap;
79 static sem_id death_stack_sem;
81 // thread queues
82 static struct thread_queue run_q[THREAD_NUM_PRIORITY_LEVELS] = { { NULL, NULL }, };
83 static struct thread_queue dead_q;
85 static int _rand(void);
86 static void thread_entry(void);
87 static struct thread *thread_get_thread_struct_locked(thread_id id);
88 static struct proc *proc_get_proc_struct(proc_id id);
89 static struct proc *proc_get_proc_struct_locked(proc_id id);
90 static void thread_kthread_exit(void);
91 static void deliver_signal(struct thread *t, int signal);
93 // insert a thread onto the tail of a queue
94 void thread_enqueue(struct thread *t, struct thread_queue *q)
96 t->q_next = NULL;
97 if(q->head == NULL) {
98 q->head = t;
99 q->tail = t;
100 } else {
101 q->tail->q_next = t;
102 q->tail = t;
106 struct thread *thread_lookat_queue(struct thread_queue *q)
108 return q->head;
111 struct thread *thread_dequeue(struct thread_queue *q)
113 struct thread *t;
115 t = q->head;
116 if(t != NULL) {
117 q->head = t->q_next;
118 if(q->tail == t)
119 q->tail = NULL;
121 return t;
124 struct thread *thread_dequeue_id(struct thread_queue *q, thread_id thr_id)
126 struct thread *t;
127 struct thread *last = NULL;
129 t = q->head;
130 while(t != NULL) {
131 if(t->id == thr_id) {
132 if(last == NULL) {
133 q->head = t->q_next;
134 } else {
135 last->q_next = t->q_next;
137 if(q->tail == t)
138 q->tail = last;
139 break;
141 last = t;
142 t = t->q_next;
144 return t;
147 struct thread *thread_lookat_run_q(int priority)
149 return thread_lookat_queue(&run_q[priority]);
152 void thread_enqueue_run_q(struct thread *t)
154 // these shouldn't exist
155 if(t->priority > THREAD_MAX_PRIORITY)
156 t->priority = THREAD_MAX_PRIORITY;
157 if(t->priority < 0)
158 t->priority = 0;
160 thread_enqueue(t, &run_q[t->priority]);
163 struct thread *thread_dequeue_run_q(int priority)
165 return thread_dequeue(&run_q[priority]);
168 static void insert_thread_into_proc(struct proc *p, struct thread *t)
170 t->proc_next = p->thread_list;
171 p->thread_list = t;
172 p->num_threads++;
173 if(p->num_threads == 1) {
174 // this was the first thread
175 p->main_thread = t;
177 t->proc = p;
180 static void remove_thread_from_proc(struct proc *p, struct thread *t)
182 struct thread *temp, *last = NULL;
184 for(temp = p->thread_list; temp != NULL; temp = temp->proc_next) {
185 if(temp == t) {
186 if(last == NULL) {
187 p->thread_list = temp->proc_next;
188 } else {
189 last->proc_next = temp->proc_next;
191 p->num_threads--;
192 break;
194 last = temp;
198 static int thread_struct_compare(void *_t, const void *_key)
200 struct thread *t = _t;
201 const struct thread_key *key = _key;
203 if(t->id == key->id) return 0;
204 else return 1;
207 // Frees the argument list
208 // Parameters
209 // args argument list.
210 // args number of arguments
212 static void free_arg_list(char **args, int argc)
214 int cnt = argc;
216 if(args != NULL) {
217 for(cnt = 0; cnt < argc; cnt++){
218 kfree(args[cnt]);
221 kfree(args);
225 // Copy argument list from userspace to kernel space
226 // Parameters
227 // args userspace parameters
228 // argc number of parameters
229 // kargs usespace parameters
230 // return < 0 on error and **kargs = NULL
232 static int user_copy_arg_list(char **args, int argc, char ***kargs)
234 char **largs;
235 int err;
236 int cnt;
237 char *source;
238 char buf[SYS_THREAD_ARG_LENGTH_MAX];
240 *kargs = NULL;
242 if((addr)args >= KERNEL_BASE && (addr)args <= KERNEL_TOP)
243 return ERR_VM_BAD_USER_MEMORY;
245 largs = kmalloc((argc + 1) * sizeof(char *));
246 if(largs == NULL){
247 return ERR_NO_MEMORY;
250 // scan all parameters and copy to kernel space
252 for(cnt = 0; cnt < argc; cnt++) {
253 err = user_memcpy(&source, &(args[cnt]), sizeof(char *));
254 if(err < 0)
255 goto error;
257 if((addr)source >= KERNEL_BASE && (addr)source <= KERNEL_TOP){
258 err = ERR_VM_BAD_USER_MEMORY;
259 goto error;
262 err = user_strncpy(buf,source, SYS_THREAD_ARG_LENGTH_MAX - 1);
263 if(err < 0)
264 goto error;
265 buf[SYS_THREAD_ARG_LENGTH_MAX - 1] = 0;
267 largs[cnt] = kstrdup(buf);
268 if(largs[cnt] == NULL){
269 err = ERR_NO_MEMORY;
270 goto error;
274 largs[argc] = NULL;
276 *kargs = largs;
277 return NO_ERROR;
279 error:
280 free_arg_list(largs,cnt);
281 dprintf("user_copy_arg_list failed %d \n",err);
282 return err;
285 static unsigned int thread_struct_hash(void *_t, const void *_key, unsigned int range)
287 struct thread *t = _t;
288 const struct thread_key *key = _key;
290 if(t != NULL)
291 return (t->id % range);
292 else
293 return (key->id % range);
296 static struct thread *create_thread_struct(const char *name)
298 struct thread *t;
299 int state;
301 state = int_disable_interrupts();
302 GRAB_THREAD_LOCK();
303 t = thread_dequeue(&dead_q);
304 RELEASE_THREAD_LOCK();
305 int_restore_interrupts(state);
307 if(t == NULL) {
308 t = (struct thread *)kmalloc(sizeof(struct thread));
309 if(t == NULL)
310 goto err;
313 strncpy(&t->name[0], name, SYS_MAX_OS_NAME_LEN-1);
314 t->name[SYS_MAX_OS_NAME_LEN-1] = 0;
316 t->id = atomic_add(&next_thread_id, 1);
317 t->proc = NULL;
318 t->cpu = NULL;
319 t->sem_blocking = -1;
320 t->fault_handler = 0;
321 t->kernel_stack_region_id = -1;
322 t->kernel_stack_base = 0;
323 t->user_stack_region_id = -1;
324 t->user_stack_base = 0;
325 t->proc_next = NULL;
326 t->q_next = NULL;
327 t->priority = -1;
328 t->args = NULL;
329 t->pending_signals = SIG_NONE;
330 t->in_kernel = true;
331 t->user_time = 0;
332 t->kernel_time = 0;
333 t->last_time = 0;
335 char temp[64];
337 sprintf(temp, "thread_0x%x_retcode_sem", t->id);
338 t->return_code_sem = sem_create(0, temp);
339 if(t->return_code_sem < 0)
340 goto err1;
343 if(arch_thread_init_thread_struct(t) < 0)
344 goto err2;
346 return t;
348 err2:
349 sem_delete_etc(t->return_code_sem, -1);
350 err1:
351 kfree(t);
352 err:
353 return NULL;
356 static void delete_thread_struct(struct thread *t)
358 if(t->return_code_sem >= 0)
359 sem_delete_etc(t->return_code_sem, -1);
360 kfree(t);
363 static int _create_user_thread_kentry(void)
365 struct thread *t;
367 t = thread_get_current_thread();
369 // a signal may have been delivered here
370 thread_atkernel_exit();
372 // jump to the entry point in user space
373 arch_thread_enter_uspace((addr)t->entry, t->args, t->user_stack_base + STACK_SIZE);
375 // never get here
376 return 0;
379 static int _create_kernel_thread_kentry(void)
381 int (*func)(void *args);
382 struct thread *t;
384 t = thread_get_current_thread();
386 // call the entry function with the appropriate args
387 func = (void *)t->entry;
389 return func(t->args);
392 static thread_id _create_thread(const char *name, proc_id pid, addr entry, void *args, bool kernel)
394 struct thread *t;
395 struct proc *p;
396 int state;
397 char stack_name[64];
398 bool abort = false;
400 t = create_thread_struct(name);
401 if(t == NULL)
402 return ERR_NO_MEMORY;
404 t->priority = THREAD_MEDIUM_PRIORITY;
405 t->state = THREAD_STATE_BIRTH;
406 t->next_state = THREAD_STATE_SUSPENDED;
408 state = int_disable_interrupts();
409 GRAB_THREAD_LOCK();
411 // insert into global list
412 hash_insert(thread_hash, t);
413 RELEASE_THREAD_LOCK();
415 GRAB_PROC_LOCK();
416 // look at the proc, make sure it's not being deleted
417 p = proc_get_proc_struct_locked(pid);
418 if(p != NULL && p->state != PROC_STATE_DEATH) {
419 insert_thread_into_proc(p, t);
420 } else {
421 abort = true;
423 RELEASE_PROC_LOCK();
424 if(abort) {
425 GRAB_THREAD_LOCK();
426 hash_remove(thread_hash, t);
427 RELEASE_THREAD_LOCK();
429 int_restore_interrupts(state);
430 if(abort) {
431 delete_thread_struct(t);
432 return ERR_TASK_PROC_DELETED;
435 sprintf(stack_name, "%s_kstack", name);
436 t->kernel_stack_region_id = vm_create_anonymous_region(vm_get_kernel_aspace_id(), stack_name,
437 (void **)&t->kernel_stack_base, REGION_ADDR_ANY_ADDRESS, KSTACK_SIZE,
438 REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
439 if(t->kernel_stack_region_id < 0)
440 panic("_create_thread: error creating kernel stack!\n");
442 t->args = args;
443 t->entry = entry;
445 if(kernel) {
446 // this sets up an initial kthread stack that runs the entry
447 arch_thread_initialize_kthread_stack(t, &_create_kernel_thread_kentry, &thread_entry, &thread_kthread_exit);
448 } else {
449 // create user stack
450 // XXX make this better. For now just keep trying to create a stack
451 // until we find a spot.
452 t->user_stack_base = (USER_STACK_REGION - STACK_SIZE) + USER_STACK_REGION_SIZE;
453 while(t->user_stack_base > USER_STACK_REGION) {
454 sprintf(stack_name, "%s_stack%d", p->name, t->id);
455 t->user_stack_region_id = vm_create_anonymous_region(p->aspace_id, stack_name,
456 (void **)&t->user_stack_base,
457 REGION_ADDR_ANY_ADDRESS, STACK_SIZE, REGION_WIRING_LAZY, LOCK_RW);
458 if(t->user_stack_region_id < 0) {
459 t->user_stack_base -= STACK_SIZE;
460 } else {
461 // we created a region
462 break;
465 if(t->user_stack_region_id < 0)
466 panic("_create_thread: unable to create user stack!\n");
468 // copy the user entry over to the args field in the thread struct
469 // the function this will call will immediately switch the thread into
470 // user space.
471 arch_thread_initialize_kthread_stack(t, &_create_user_thread_kentry, &thread_entry, &thread_kthread_exit);
474 t->state = THREAD_STATE_SUSPENDED;
476 return t->id;
479 thread_id user_thread_create_user_thread(char *uname, proc_id pid, addr entry, void *args)
481 char name[SYS_MAX_OS_NAME_LEN];
482 int rc;
484 if((addr)uname >= KERNEL_BASE && (addr)uname <= KERNEL_TOP)
485 return ERR_VM_BAD_USER_MEMORY;
486 if(entry >= KERNEL_BASE && entry <= KERNEL_TOP)
487 return ERR_VM_BAD_USER_MEMORY;
489 rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
490 if(rc < 0)
491 return rc;
492 name[SYS_MAX_OS_NAME_LEN-1] = 0;
494 return thread_create_user_thread(name, pid, entry, args);
497 thread_id thread_create_user_thread(char *name, proc_id pid, addr entry, void *args)
499 return _create_thread(name, pid, entry, args, false);
502 thread_id thread_create_kernel_thread(const char *name, int (*func)(void *), void *args)
504 return _create_thread(name, proc_get_kernel_proc()->id, (addr)func, args, true);
507 static thread_id thread_create_kernel_thread_etc(const char *name, int (*func)(void *), void *args, struct proc *p)
509 return _create_thread(name, p->id, (addr)func, args, true);
512 int thread_suspend_thread(thread_id id)
514 int state;
515 struct thread *t;
516 int retval;
517 bool global_resched = false;
519 state = int_disable_interrupts();
520 GRAB_THREAD_LOCK();
522 t = thread_get_current_thread();
523 if(t->id != id) {
524 t = thread_get_thread_struct_locked(id);
527 if(t != NULL) {
528 if(t->proc == kernel_proc) {
529 // no way
530 retval = ERR_NOT_ALLOWED;
531 } else if(t->in_kernel == true) {
532 t->pending_signals |= SIG_SUSPEND;
533 retval = NO_ERROR;
534 } else {
535 t->next_state = THREAD_STATE_SUSPENDED;
536 global_resched = true;
537 retval = NO_ERROR;
539 } else {
540 retval = ERR_INVALID_HANDLE;
543 RELEASE_THREAD_LOCK();
544 int_restore_interrupts(state);
546 if(global_resched) {
547 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
550 return retval;
553 int thread_resume_thread(thread_id id)
555 int state;
556 struct thread *t;
557 int retval;
559 state = int_disable_interrupts();
560 GRAB_THREAD_LOCK();
562 t = thread_get_thread_struct_locked(id);
563 if(t != NULL && t->state == THREAD_STATE_SUSPENDED) {
564 t->state = THREAD_STATE_READY;
565 t->next_state = THREAD_STATE_READY;
567 thread_enqueue_run_q(t);
568 retval = NO_ERROR;
569 } else {
570 retval = ERR_INVALID_HANDLE;
573 RELEASE_THREAD_LOCK();
574 int_restore_interrupts(state);
576 return retval;
579 int thread_set_priority(thread_id id, int priority)
581 struct thread *t;
582 int retval;
584 // make sure the passed in priority is within bounds
585 if(priority > THREAD_MAX_PRIORITY)
586 priority = THREAD_MAX_PRIORITY;
587 if(priority < THREAD_MIN_PRIORITY)
588 priority = THREAD_MIN_PRIORITY;
590 t = thread_get_current_thread();
591 if(t->id == id) {
592 // it's ourself, so we know we aren't in a run queue, and we can manipulate
593 // our structure directly
594 t->priority = priority;
595 retval = NO_ERROR;
596 } else {
597 int state = int_disable_interrupts();
598 GRAB_THREAD_LOCK();
600 t = thread_get_thread_struct_locked(id);
601 if(t) {
602 if(t->state == THREAD_STATE_READY && t->priority != priority) {
603 // this thread is in a ready queue right now, so it needs to be reinserted
604 thread_dequeue_id(&run_q[t->priority], t->id);
605 t->priority = priority;
606 thread_enqueue_run_q(t);
607 } else {
608 t->priority = priority;
610 retval = NO_ERROR;
611 } else {
612 retval = ERR_INVALID_HANDLE;
615 RELEASE_THREAD_LOCK();
616 int_restore_interrupts(state);
619 return retval;
622 int thread_get_thread_info(thread_id id, struct thread_info *outinfo)
624 int state;
625 struct thread *t;
626 struct thread_info info;
627 int err;
629 state = int_disable_interrupts();
630 GRAB_THREAD_LOCK();
632 t = thread_get_thread_struct_locked(id);
633 if(!t) {
634 err = ERR_INVALID_HANDLE;
635 goto out;
638 /* found the thread, copy the data out */
639 info.id = id;
640 info.owner_proc_id = t->proc->id;
641 strncpy(info.name, t->name, SYS_MAX_OS_NAME_LEN-1);
642 info.name[SYS_MAX_OS_NAME_LEN-1] = '\0';
643 info.state = t->state;
644 info.user_stack_base = t->user_stack_base;
645 info.user_time = t->user_time;
646 info.kernel_time = t->kernel_time;
648 err = NO_ERROR;
650 out:
651 RELEASE_THREAD_LOCK();
652 int_restore_interrupts(state);
654 if(err >= 0)
655 memcpy(outinfo, &info, sizeof(info));
657 return err;
660 int user_thread_get_thread_info(thread_id id, struct thread_info *uinfo)
662 struct thread_info info;
663 int err, err2;
665 if((addr)uinfo >= KERNEL_BASE && (addr)uinfo <= KERNEL_TOP) {
666 return ERR_VM_BAD_USER_MEMORY;
669 err = thread_get_thread_info(id, &info);
670 if(err < 0)
671 return err;
673 err2 = user_memcpy(uinfo, &info, sizeof(info));
674 if(err2 < 0)
675 return err2;
677 return err;
680 int thread_get_next_thread_info(uint32 *_cookie, proc_id pid, struct thread_info *outinfo)
682 int state;
683 struct thread *t;
684 struct proc *p;
685 struct thread_info info;
686 thread_id tid;
687 int err;
688 thread_id cookie;
690 cookie = (thread_id)*_cookie;
692 state = int_disable_interrupts();
693 GRAB_PROC_LOCK();
695 p = proc_get_proc_struct_locked(pid);
696 if(!p) {
697 err = ERR_INVALID_HANDLE;
698 goto out;
701 /* find the next thread in the list of threads in the proc structure */
702 if(cookie == 0) {
703 t = p->thread_list;
704 } else {
705 for(t = p->thread_list; t; t = t->proc_next) {
706 if(t->id == cookie) {
707 /* we found what the last search got us, walk one past the last search */
708 t = t->proc_next;
709 break;
714 if(!t) {
715 err = ERR_NOT_FOUND;
716 goto out;
719 /* found the thread, copy the data out */
720 info.id = t->id;
721 info.owner_proc_id = t->proc->id;
722 strncpy(info.name, t->name, SYS_MAX_OS_NAME_LEN-1);
723 info.name[SYS_MAX_OS_NAME_LEN-1] = '\0';
724 info.state = t->state;
725 info.user_stack_base = t->user_stack_base;
726 info.user_time = t->user_time;
727 info.kernel_time = t->kernel_time;
729 err = NO_ERROR;
731 *_cookie = (uint32)t->id;
733 out:
734 RELEASE_PROC_LOCK();
735 int_restore_interrupts(state);
737 if(err >= 0)
738 memcpy(outinfo, &info, sizeof(info));
740 return err;
743 int user_thread_get_next_thread_info(uint32 *ucookie, proc_id pid, struct thread_info *uinfo)
745 struct thread_info info;
746 uint32 cookie;
747 int err, err2;
749 if((addr)ucookie >= KERNEL_BASE && (addr)ucookie <= KERNEL_TOP) {
750 return ERR_VM_BAD_USER_MEMORY;
753 if((addr)uinfo >= KERNEL_BASE && (addr)uinfo <= KERNEL_TOP) {
754 return ERR_VM_BAD_USER_MEMORY;
757 err2 = user_memcpy(&cookie, ucookie, sizeof(cookie));
758 if(err2 < 0)
759 return err2;
761 err = thread_get_next_thread_info(&cookie, pid, &info);
762 if(err < 0)
763 return err;
765 err2 = user_memcpy(uinfo, &info, sizeof(info));
766 if(err2 < 0)
767 return err2;
769 err2 = user_memcpy(ucookie, &cookie, sizeof(cookie));
770 if(err2 < 0)
771 return err2;
773 return err;
777 static void _dump_proc_info(struct proc *p)
779 dprintf("PROC: %p\n", p);
780 dprintf("id: 0x%x\n", p->id);
781 dprintf("name: '%s'\n", p->name);
782 dprintf("next: %p\n", p->next);
783 dprintf("num_threads: %d\n", p->num_threads);
784 dprintf("state: %d\n", p->state);
785 dprintf("pending_signals: 0x%x\n", p->pending_signals);
786 dprintf("ioctx: %p\n", p->ioctx);
787 dprintf("aspace_id: 0x%x\n", p->aspace_id);
788 dprintf("aspace: %p\n", p->aspace);
789 dprintf("kaspace: %p\n", p->kaspace);
790 dprintf("main_thread: %p\n", p->main_thread);
791 dprintf("thread_list: %p\n", p->thread_list);
794 static void dump_proc_info(int argc, char **argv)
796 struct proc *p;
797 int id = -1;
798 unsigned long num;
799 struct hash_iterator i;
801 if(argc < 2) {
802 dprintf("proc: not enough arguments\n");
803 return;
806 // if the argument looks like a hex number, treat it as such
807 if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
808 num = atoul(argv[1]);
809 if(num > vm_get_kernel_aspace()->virtual_map.base) {
810 // XXX semi-hack
811 _dump_proc_info((struct proc*)num);
812 return;
813 } else {
814 id = num;
818 // walk through the thread list, trying to match name or id
819 hash_open(proc_hash, &i);
820 while((p = hash_next(proc_hash, &i)) != NULL) {
821 if((p->name && strcmp(argv[1], p->name) == 0) || p->id == id) {
822 _dump_proc_info(p);
823 break;
826 hash_close(proc_hash, &i, false);
830 static const char *state_to_text(int state)
832 switch(state) {
833 case THREAD_STATE_READY:
834 return "READY";
835 case THREAD_STATE_RUNNING:
836 return "RUNNING";
837 case THREAD_STATE_WAITING:
838 return "WAITING";
839 case THREAD_STATE_SUSPENDED:
840 return "SUSPEND";
841 case THREAD_STATE_FREE_ON_RESCHED:
842 return "DEATH";
843 case THREAD_STATE_BIRTH:
844 return "BIRTH";
845 default:
846 return "UNKNOWN";
850 static struct thread *last_thread_dumped = NULL;
852 static void _dump_thread_info(struct thread *t)
854 dprintf("THREAD: %p\n", t);
855 dprintf("id: 0x%x\n", t->id);
856 dprintf("name: '%s'\n", t->name);
857 dprintf("all_next: %p\nproc_next: %p\nq_next: %p\n",
858 t->all_next, t->proc_next, t->q_next);
859 dprintf("priority: 0x%x\n", t->priority);
860 dprintf("state: %s\n", state_to_text(t->state));
861 dprintf("next_state: %s\n", state_to_text(t->next_state));
862 dprintf("cpu: %p ", t->cpu);
863 if(t->cpu)
864 dprintf("(%d)\n", t->cpu->info.cpu_num);
865 else
866 dprintf("\n");
867 dprintf("pending_signals: 0x%x\n", t->pending_signals);
868 dprintf("in_kernel: %d\n", t->in_kernel);
869 dprintf("sem_blocking:0x%x\n", t->sem_blocking);
870 dprintf("sem_count: 0x%x\n", t->sem_count);
871 dprintf("sem_deleted_retcode: 0x%x\n", t->sem_deleted_retcode);
872 dprintf("sem_errcode: 0x%x\n", t->sem_errcode);
873 dprintf("sem_flags: 0x%x\n", t->sem_flags);
874 dprintf("fault_handler: 0x%lx\n", t->fault_handler);
875 dprintf("args: %p\n", t->args);
876 dprintf("entry: 0x%lx\n", t->entry);
877 dprintf("proc: %p\n", t->proc);
878 dprintf("return_code_sem: 0x%x\n", t->return_code_sem);
879 dprintf("kernel_stack_region_id: 0x%x\n", t->kernel_stack_region_id);
880 dprintf("kernel_stack_base: 0x%lx\n", t->kernel_stack_base);
881 dprintf("user_stack_region_id: 0x%x\n", t->user_stack_region_id);
882 dprintf("user_stack_base: 0x%lx\n", t->user_stack_base);
883 dprintf("kernel_time: %Ld\n", t->kernel_time);
884 dprintf("user_time: %Ld\n", t->user_time);
885 dprintf("architecture dependant section:\n");
886 arch_thread_dump_info(&t->arch_info);
888 last_thread_dumped = t;
891 static void dump_thread_info(int argc, char **argv)
893 struct thread *t;
894 int id = -1;
895 unsigned long num;
896 struct hash_iterator i;
898 if(argc < 2) {
899 dprintf("thread: not enough arguments\n");
900 return;
903 // if the argument looks like a hex number, treat it as such
904 if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
905 num = atoul(argv[1]);
906 if(num > vm_get_kernel_aspace()->virtual_map.base) {
907 // XXX semi-hack
908 _dump_thread_info((struct thread *)num);
909 return;
910 } else {
911 id = num;
915 // walk through the thread list, trying to match name or id
916 hash_open(thread_hash, &i);
917 while((t = hash_next(thread_hash, &i)) != NULL) {
918 if((t->name && strcmp(argv[1], t->name) == 0) || t->id == id) {
919 _dump_thread_info(t);
920 break;
923 hash_close(thread_hash, &i, false);
926 static void dump_thread_list(int argc, char **argv)
928 struct thread *t;
929 struct hash_iterator i;
931 hash_open(thread_hash, &i);
932 while((t = hash_next(thread_hash, &i)) != NULL) {
933 dprintf("%p", t);
934 if(t->name != NULL)
935 dprintf("\t%32s", t->name);
936 else
937 dprintf("\t%32s", "<NULL>");
938 dprintf("\t0x%x", t->id);
939 dprintf("\t%16s", state_to_text(t->state));
940 if(t->cpu)
941 dprintf("\t%d", t->cpu->info.cpu_num);
942 else
943 dprintf("\tNOCPU");
944 dprintf("\t0x%lx\n", t->kernel_stack_base);
946 hash_close(thread_hash, &i, false);
949 static void dump_next_thread_in_q(int argc, char **argv)
951 struct thread *t = last_thread_dumped;
953 if(t == NULL) {
954 dprintf("no thread previously dumped. Examine a thread first.\n");
955 return;
958 dprintf("next thread in queue after thread @ %p\n", t);
959 if(t->q_next != NULL) {
960 _dump_thread_info(t->q_next);
961 } else {
962 dprintf("NULL\n");
966 static void dump_next_thread_in_all_list(int argc, char **argv)
968 struct thread *t = last_thread_dumped;
970 if(t == NULL) {
971 dprintf("no thread previously dumped. Examine a thread first.\n");
972 return;
975 dprintf("next thread in global list after thread @ %p\n", t);
976 if(t->all_next != NULL) {
977 _dump_thread_info(t->all_next);
978 } else {
979 dprintf("NULL\n");
983 static void dump_next_thread_in_proc(int argc, char **argv)
985 struct thread *t = last_thread_dumped;
987 if(t == NULL) {
988 dprintf("no thread previously dumped. Examine a thread first.\n");
989 return;
992 dprintf("next thread in proc after thread @ %p\n", t);
993 if(t->proc_next != NULL) {
994 _dump_thread_info(t->proc_next);
995 } else {
996 dprintf("NULL\n");
1000 static int get_death_stack(void)
1002 int i;
1003 unsigned int bit;
1004 int state;
1006 sem_acquire(death_stack_sem, 1);
1008 // grap the thread lock, find a free spot and release
1009 state = int_disable_interrupts();
1010 GRAB_THREAD_LOCK();
1011 bit = death_stack_bitmap;
1012 bit = (~bit)&~((~bit)-1);
1013 death_stack_bitmap |= bit;
1014 RELEASE_THREAD_LOCK();
1017 // sanity checks
1018 if( !bit ) {
1019 panic("get_death_stack: couldn't find free stack!\n");
1021 if( bit & (bit-1)) {
1022 panic("get_death_stack: impossible bitmap result!\n");
1026 // bit to number
1027 i= -1;
1028 while(bit) {
1029 bit >>= 1;
1030 i += 1;
1033 // dprintf("get_death_stack: returning 0x%lx\n", death_stacks[i].address);
1035 return i;
1038 static void put_death_stack_and_reschedule(unsigned int index)
1040 // dprintf("put_death_stack...: passed %d\n", index);
1042 if(index >= num_death_stacks)
1043 panic("put_death_stack: passed invalid stack index %d\n", index);
1045 if(!(death_stack_bitmap & (1 << index)))
1046 panic("put_death_stack: passed invalid stack index %d\n", index);
1048 int_disable_interrupts();
1049 GRAB_THREAD_LOCK();
1051 death_stack_bitmap &= ~(1 << index);
1053 sem_release_etc(death_stack_sem, 1, SEM_FLAG_NO_RESCHED);
1055 thread_resched();
1058 int thread_init(kernel_args *ka)
1060 struct thread *t;
1061 unsigned int i;
1063 // dprintf("thread_init: entry\n");
1065 // create the process hash table
1066 proc_hash = hash_init(15, (addr)&kernel_proc->next - (addr)kernel_proc,
1067 &proc_struct_compare, &proc_struct_hash);
1069 // create the kernel process
1070 kernel_proc = create_proc_struct("kernel", true);
1071 if(kernel_proc == NULL)
1072 panic("could not create kernel proc!\n");
1073 kernel_proc->state = PROC_STATE_NORMAL;
1075 kernel_proc->ioctx = vfs_new_ioctx(NULL);
1076 if(kernel_proc->ioctx == NULL)
1077 panic("could not create ioctx for kernel proc!\n");
1079 // stick it in the process hash
1080 hash_insert(proc_hash, kernel_proc);
1082 // create the thread hash table
1083 thread_hash = hash_init(15, (addr)&t->all_next - (addr)t,
1084 &thread_struct_compare, &thread_struct_hash);
1086 // zero out the run queues
1087 memset(run_q, 0, sizeof(run_q));
1089 // zero out the dead thread structure q
1090 memset(&dead_q, 0, sizeof(dead_q));
1092 // allocate a snooze sem
1093 snooze_sem = sem_create(0, "snooze sem");
1094 if(snooze_sem < 0) {
1095 panic("error creating snooze sem\n");
1096 return snooze_sem;
1099 // create an idle thread for each cpu
1100 for(i=0; i<ka->num_cpus; i++) {
1101 char temp[64];
1102 vm_region *region;
1104 sprintf(temp, "idle_thread%d", i);
1105 t = create_thread_struct(temp);
1106 if(t == NULL) {
1107 panic("error creating idle thread struct\n");
1108 return ERR_NO_MEMORY;
1110 t->proc = proc_get_kernel_proc();
1111 t->priority = THREAD_IDLE_PRIORITY;
1112 t->state = THREAD_STATE_RUNNING;
1113 t->next_state = THREAD_STATE_READY;
1114 sprintf(temp, "idle_thread%d_kstack", i);
1115 t->kernel_stack_region_id = vm_find_region_by_name(vm_get_kernel_aspace_id(), temp);
1116 region = vm_get_region_by_id(t->kernel_stack_region_id);
1117 if(!region) {
1118 panic("error finding idle kstack region\n");
1120 t->kernel_stack_base = region->base;
1121 vm_put_region(region);
1122 hash_insert(thread_hash, t);
1123 insert_thread_into_proc(t->proc, t);
1124 idle_threads[i] = t;
1125 if(i == 0)
1126 arch_thread_set_current_thread(t);
1127 t->cpu = &cpu[i];
1130 // create a set of death stacks
1131 num_death_stacks = smp_get_num_cpus();
1132 if(num_death_stacks > 8*sizeof(death_stack_bitmap)) {
1134 * clamp values for really beefy machines
1136 num_death_stacks = 8*sizeof(death_stack_bitmap);
1138 death_stack_bitmap = 0;
1139 death_stacks = (struct death_stack *)kmalloc(num_death_stacks * sizeof(struct death_stack));
1140 if(death_stacks == NULL) {
1141 panic("error creating death stacks\n");
1142 return ERR_NO_MEMORY;
1145 char temp[64];
1147 for(i=0; i<num_death_stacks; i++) {
1148 sprintf(temp, "death_stack%d", i);
1149 death_stacks[i].rid = vm_create_anonymous_region(vm_get_kernel_aspace_id(), temp,
1150 (void **)&death_stacks[i].address,
1151 REGION_ADDR_ANY_ADDRESS, KSTACK_SIZE, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
1152 if(death_stacks[i].rid < 0) {
1153 panic("error creating death stacks\n");
1154 return death_stacks[i].rid;
1156 death_stacks[i].in_use = false;
1159 death_stack_sem = sem_create(num_death_stacks, "death_stack_noavail_sem");
1161 // set up some debugger commands
1162 dbg_add_command(dump_thread_list, "threads", "list all threads");
1163 dbg_add_command(dump_thread_info, "thread", "list info about a particular thread");
1164 dbg_add_command(dump_next_thread_in_q, "next_q", "dump the next thread in the queue of last thread viewed");
1165 dbg_add_command(dump_next_thread_in_all_list, "next_all", "dump the next thread in the global list of the last thread viewed");
1166 dbg_add_command(dump_next_thread_in_proc, "next_proc", "dump the next thread in the process of the last thread viewed");
1167 dbg_add_command(dump_proc_info, "proc", "list info about a particular process");
1169 return 0;
1172 int thread_init_percpu(int cpu_num)
1174 arch_thread_set_current_thread(idle_threads[cpu_num]);
1175 return 0;
1178 // this starts the scheduler. Must be run under the context of
1179 // the initial idle thread.
1180 void thread_start_threading(void)
1182 int state;
1184 // XXX may not be the best place for this
1185 // invalidate all of the other processors' TLB caches
1186 state = int_disable_interrupts();
1187 arch_cpu_global_TLB_invalidate();
1188 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
1189 int_restore_interrupts(state);
1191 // start the other processors
1192 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
1194 state = int_disable_interrupts();
1195 GRAB_THREAD_LOCK();
1197 thread_resched();
1199 RELEASE_THREAD_LOCK();
1200 int_restore_interrupts(state);
1203 int user_thread_snooze(bigtime_t time)
1205 thread_snooze(time);
1206 return NO_ERROR;
1209 void thread_snooze(bigtime_t time)
1211 sem_acquire_etc(snooze_sem, 1, SEM_FLAG_TIMEOUT, time, NULL);
1214 // this function gets run by a new thread before anything else
1215 static void thread_entry(void)
1217 // simulates the thread spinlock release that would occur if the thread had been
1218 // rescheded from. The resched didn't happen because the thread is new.
1219 RELEASE_THREAD_LOCK();
1220 int_enable_interrupts(); // this essentially simulates a return-from-interrupt
1223 // used to pass messages between thread_exit and thread_exit2
1224 struct thread_exit_args {
1225 struct thread *t;
1226 region_id old_kernel_stack;
1227 int int_state;
1228 unsigned int death_stack;
1231 static void thread_exit2(void *_args)
1233 struct thread_exit_args args;
1234 char *temp;
1236 // copy the arguments over, since the source is probably on the kernel stack we're about to delete
1237 memcpy(&args, _args, sizeof(struct thread_exit_args));
1239 // restore the interrupts
1240 int_restore_interrupts(args.int_state);
1242 // dprintf("thread_exit2, running on death stack 0x%lx\n", args.t->kernel_stack_base);
1244 // delete the old kernel stack region
1245 // dprintf("thread_exit2: deleting old kernel stack id 0x%x for thread 0x%x\n", args.old_kernel_stack, args.t->id);
1246 vm_delete_region(vm_get_kernel_aspace_id(), args.old_kernel_stack);
1248 // dprintf("thread_exit2: removing thread 0x%x from global lists\n", args.t->id);
1250 // remove this thread from all of the global lists
1251 int_disable_interrupts();
1252 GRAB_PROC_LOCK();
1253 remove_thread_from_proc(kernel_proc, args.t);
1254 RELEASE_PROC_LOCK();
1255 GRAB_THREAD_LOCK();
1256 hash_remove(thread_hash, args.t);
1257 RELEASE_THREAD_LOCK();
1259 // dprintf("thread_exit2: done removing thread from lists\n");
1261 // set the next state to be gone. Will return the thread structure to a ready pool upon reschedule
1262 args.t->next_state = THREAD_STATE_FREE_ON_RESCHED;
1264 // return the death stack and reschedule one last time
1265 put_death_stack_and_reschedule(args.death_stack);
1266 // never get to here
1267 panic("thread_exit2: made it where it shouldn't have!\n");
1270 void thread_exit(int retcode)
1272 int state;
1273 struct thread *t = thread_get_current_thread();
1274 struct proc *p = t->proc;
1275 bool delete_proc = false;
1276 unsigned int death_stack;
1278 dprintf("thread 0x%x exiting w/return code 0x%x\n", t->id, retcode);
1280 // boost our priority to get this over with
1281 thread_set_priority(t->id, THREAD_HIGH_PRIORITY);
1283 // delete the user stack region first
1284 if(p->aspace_id >= 0 && t->user_stack_region_id >= 0) {
1285 region_id rid = t->user_stack_region_id;
1286 t->user_stack_region_id = -1;
1287 vm_delete_region(p->aspace_id, rid);
1290 if(p != kernel_proc) {
1291 // remove this thread from the current process and add it to the kernel
1292 // put the thread into the kernel proc until it dies
1293 state = int_disable_interrupts();
1294 GRAB_PROC_LOCK();
1295 remove_thread_from_proc(p, t);
1296 insert_thread_into_proc(kernel_proc, t);
1297 if(p->main_thread == t) {
1298 // this was main thread in this process
1299 delete_proc = true;
1300 hash_remove(proc_hash, p);
1301 p->state = PROC_STATE_DEATH;
1303 RELEASE_PROC_LOCK();
1304 // swap address spaces, to make sure we're running on the kernel's pgdir
1305 vm_aspace_swap(kernel_proc->kaspace);
1306 int_restore_interrupts(state);
1308 // dprintf("thread_exit: thread 0x%x now a kernel thread!\n", t->id);
1311 // delete the process
1312 if(delete_proc) {
1313 if(p->num_threads > 0) {
1314 // there are other threads still in this process,
1315 // cycle through and signal kill on each of the threads
1316 // XXX this can be optimized. There's got to be a better solution.
1317 struct thread *temp_thread;
1319 state = int_disable_interrupts();
1320 GRAB_PROC_LOCK();
1321 // we can safely walk the list because of the lock. no new threads can be created
1322 // because of the PROC_STATE_DEATH flag on the process
1323 temp_thread = p->thread_list;
1324 while(temp_thread) {
1325 struct thread *next = temp_thread->proc_next;
1326 thread_kill_thread_nowait(temp_thread->id);
1327 temp_thread = next;
1329 RELEASE_PROC_LOCK();
1330 int_restore_interrupts(state);
1332 // Now wait for all of the threads to die
1333 // XXX block on a semaphore
1334 while((volatile int)p->num_threads > 0) {
1335 thread_snooze(10000); // 10 ms
1338 vm_put_aspace(p->aspace);
1339 vm_delete_aspace(p->aspace_id);
1340 port_delete_owned_ports(p->id);
1341 sem_delete_owned_sems(p->id);
1342 vfs_free_ioctx(p->ioctx);
1343 kfree(p);
1346 // delete the sem that others will use to wait on us and get the retcode
1348 sem_id s = t->return_code_sem;
1350 t->return_code_sem = -1;
1351 sem_delete_etc(s, retcode);
1354 death_stack = get_death_stack();
1356 struct thread_exit_args args;
1358 args.t = t;
1359 args.old_kernel_stack = t->kernel_stack_region_id;
1360 args.death_stack = death_stack;
1362 // disable the interrupts. Must remain disabled until the kernel stack pointer can be officially switched
1363 args.int_state = int_disable_interrupts();
1365 // set the new kernel stack officially to the death stack, wont be really switched until
1366 // the next function is called. This bookkeeping must be done now before a context switch
1367 // happens, or the processor will interrupt to the old stack
1368 t->kernel_stack_region_id = death_stacks[death_stack].rid;
1369 t->kernel_stack_base = death_stacks[death_stack].address;
1371 // we will continue in thread_exit2(), on the new stack
1372 arch_thread_switch_kstack_and_call(t, t->kernel_stack_base + KSTACK_SIZE, thread_exit2, &args);
1375 panic("never can get here\n");
1378 static int _thread_kill_thread(thread_id id, bool wait_on)
1380 int state;
1381 struct thread *t;
1382 int rc;
1384 // dprintf("_thread_kill_thread: id %d, wait_on %d\n", id, wait_on);
1386 state = int_disable_interrupts();
1387 GRAB_THREAD_LOCK();
1389 t = thread_get_thread_struct_locked(id);
1390 if(t != NULL) {
1391 if(t->proc == kernel_proc) {
1392 // can't touch this
1393 rc = ERR_NOT_ALLOWED;
1394 } else {
1395 deliver_signal(t, SIG_KILL);
1396 rc = NO_ERROR;
1397 if(t->id == thread_get_current_thread()->id)
1398 wait_on = false; // can't wait on ourself
1400 } else {
1401 rc = ERR_INVALID_HANDLE;
1404 RELEASE_THREAD_LOCK();
1405 int_restore_interrupts(state);
1406 if(rc < 0)
1407 return rc;
1409 if(wait_on)
1410 thread_wait_on_thread(id, NULL);
1412 return rc;
1415 int thread_kill_thread(thread_id id)
1417 return _thread_kill_thread(id, true);
1420 int thread_kill_thread_nowait(thread_id id)
1422 return _thread_kill_thread(id, false);
1425 static void thread_kthread_exit(void)
1427 thread_exit(0);
1430 int user_thread_wait_on_thread(thread_id id, int *uretcode)
1432 int retcode;
1433 int rc, rc2;
1435 if((addr)uretcode >= KERNEL_BASE && (addr)uretcode <= KERNEL_TOP)
1436 return ERR_VM_BAD_USER_MEMORY;
1438 rc = thread_wait_on_thread(id, &retcode);
1440 rc2 = user_memcpy(uretcode, &retcode, sizeof(retcode));
1441 if(rc2 < 0)
1442 return rc2;
1444 return rc;
1447 int thread_wait_on_thread(thread_id id, int *retcode)
1449 sem_id sem;
1450 int state;
1451 struct thread *t;
1452 int rc;
1454 state = int_disable_interrupts();
1455 GRAB_THREAD_LOCK();
1457 t = thread_get_thread_struct_locked(id);
1458 if(t != NULL) {
1459 sem = t->return_code_sem;
1460 } else {
1461 sem = ERR_INVALID_HANDLE;
1464 RELEASE_THREAD_LOCK();
1465 int_restore_interrupts(state);
1467 rc = sem_acquire_etc(sem, 1, 0, 0, retcode);
1469 /* This thread died the way it should, dont ripple a non-error up */
1470 if (rc == ERR_SEM_DELETED)
1471 rc = NO_ERROR;
1473 return rc;
1476 int user_proc_wait_on_proc(proc_id id, int *uretcode)
1478 int retcode;
1479 int rc, rc2;
1481 if((addr)uretcode >= KERNEL_BASE && (addr)uretcode <= KERNEL_TOP)
1482 return ERR_VM_BAD_USER_MEMORY;
1484 rc = proc_wait_on_proc(id, &retcode);
1485 if(rc < 0)
1486 return rc;
1488 rc2 = user_memcpy(uretcode, &retcode, sizeof(retcode));
1489 if(rc2 < 0)
1490 return rc2;
1492 return rc;
1495 int proc_wait_on_proc(proc_id id, int *retcode)
1497 struct proc *p;
1498 thread_id tid;
1499 int state;
1501 state = int_disable_interrupts();
1502 GRAB_PROC_LOCK();
1503 p = proc_get_proc_struct_locked(id);
1504 if(p && p->main_thread) {
1505 tid = p->main_thread->id;
1506 } else {
1507 tid = ERR_INVALID_HANDLE;
1509 RELEASE_PROC_LOCK();
1510 int_restore_interrupts(state);
1512 if(tid < 0)
1513 return tid;
1515 return thread_wait_on_thread(tid, retcode);
1518 struct thread *thread_get_thread_struct(thread_id id)
1520 struct thread *t;
1521 int state;
1523 state = int_disable_interrupts();
1524 GRAB_THREAD_LOCK();
1526 t = thread_get_thread_struct_locked(id);
1528 RELEASE_THREAD_LOCK();
1529 int_restore_interrupts(state);
1531 return t;
1534 static struct thread *thread_get_thread_struct_locked(thread_id id)
1536 struct thread_key key;
1538 key.id = id;
1540 return hash_lookup(thread_hash, &key);
1543 static struct proc *proc_get_proc_struct(proc_id id)
1545 struct proc *p;
1546 int state;
1548 state = int_disable_interrupts();
1549 GRAB_PROC_LOCK();
1551 p = proc_get_proc_struct_locked(id);
1553 RELEASE_PROC_LOCK();
1554 int_restore_interrupts(state);
1556 return p;
1559 static struct proc *proc_get_proc_struct_locked(proc_id id)
1561 struct proc_key key;
1563 key.id = id;
1565 return hash_lookup(proc_hash, &key);
1568 static void thread_context_switch(struct thread *t_from, struct thread *t_to)
1570 bigtime_t now;
1572 // track kernel time
1573 now = system_time();
1574 t_from->kernel_time += now - t_from->last_time;
1575 t_to->last_time = now;
1577 t_to->cpu = t_from->cpu;
1578 arch_thread_set_current_thread(t_to);
1579 t_from->cpu = NULL;
1580 arch_thread_context_switch(t_from, t_to);
1583 static int _rand(void)
1585 static int next = 0;
1587 if(next == 0)
1588 next = system_time();
1590 next = next * 1103515245 + 12345;
1591 return((next >> 16) & 0x7FFF);
1594 static int reschedule_event(void *unused)
1596 // this function is called as a result of the timer event set by the scheduler
1597 // returning this causes a reschedule on the timer event
1598 thread_get_current_thread()->cpu->info.preempted= 1;
1599 return INT_RESCHEDULE;
1602 // NOTE: expects thread_spinlock to be held
1603 void thread_resched(void)
1605 struct thread *next_thread = NULL;
1606 int last_thread_pri = -1;
1607 struct thread *old_thread = thread_get_current_thread();
1608 int i;
1609 bigtime_t quantum;
1610 struct timer_event *quantum_timer;
1612 // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), thread_get_current_thread());
1614 switch(old_thread->next_state) {
1615 case THREAD_STATE_RUNNING:
1616 case THREAD_STATE_READY:
1617 // dprintf("enqueueing thread 0x%x into run q. pri = %d\n", old_thread, old_thread->priority);
1618 thread_enqueue_run_q(old_thread);
1619 break;
1620 case THREAD_STATE_SUSPENDED:
1621 dprintf("suspending thread 0x%x\n", old_thread->id);
1622 break;
1623 case THREAD_STATE_FREE_ON_RESCHED:
1624 thread_enqueue(old_thread, &dead_q);
1625 break;
1626 default:
1627 // dprintf("not enqueueing thread 0x%x into run q. next_state = %d\n", old_thread, old_thread->next_state);
1630 old_thread->state = old_thread->next_state;
1632 // search the real-time queue
1633 for(i = THREAD_MAX_RT_PRIORITY; i >= THREAD_MIN_RT_PRIORITY; i--) {
1634 next_thread = thread_dequeue_run_q(i);
1635 if(next_thread)
1636 goto found_thread;
1639 // search the regular queue
1640 for(i = THREAD_MAX_PRIORITY; i > THREAD_IDLE_PRIORITY; i--) {
1641 next_thread = thread_lookat_run_q(i);
1642 if(next_thread != NULL) {
1643 // skip it sometimes
1644 if(_rand() > 0x3000) {
1645 next_thread = thread_dequeue_run_q(i);
1646 goto found_thread;
1648 last_thread_pri = i;
1649 next_thread = NULL;
1652 if(next_thread == NULL) {
1653 if(last_thread_pri != -1) {
1654 next_thread = thread_dequeue_run_q(last_thread_pri);
1655 if(next_thread == NULL)
1656 panic("next_thread == NULL! last_thread_pri = %d\n", last_thread_pri);
1657 } else {
1658 next_thread = thread_dequeue_run_q(THREAD_IDLE_PRIORITY);
1659 if(next_thread == NULL)
1660 panic("next_thread == NULL! no idle priorities!\n");
1664 found_thread:
1665 next_thread->state = THREAD_STATE_RUNNING;
1666 next_thread->next_state = THREAD_STATE_READY;
1668 // XXX should only reset the quantum timer if we are switching to a new thread,
1669 // or we got here as a result of a quantum expire.
1671 // XXX calculate quantum
1672 quantum = 10000;
1674 // get the quantum timer for this cpu
1675 quantum_timer = &old_thread->cpu->info.quantum_timer;
1676 if(!old_thread->cpu->info.preempted) {
1677 _local_timer_cancel_event(old_thread->cpu->info.cpu_num, quantum_timer);
1679 old_thread->cpu->info.preempted= 0;
1680 timer_setup_timer(&reschedule_event, NULL, quantum_timer);
1681 timer_set_event(quantum, TIMER_MODE_ONESHOT, quantum_timer);
1683 if(next_thread != old_thread) {
1684 // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",
1685 // smp_get_current_cpu(), old_thread->id, next_thread->id);
1686 thread_context_switch(old_thread, next_thread);
1690 static int proc_struct_compare(void *_p, const void *_key)
1692 struct proc *p = _p;
1693 const struct proc_key *key = _key;
1695 if(p->id == key->id) return 0;
1696 else return 1;
1699 static unsigned int proc_struct_hash(void *_p, const void *_key, unsigned int range)
1701 struct proc *p = _p;
1702 const struct proc_key *key = _key;
1704 if(p != NULL)
1705 return (p->id % range);
1706 else
1707 return (key->id % range);
1710 struct proc *proc_get_kernel_proc(void)
1712 return kernel_proc;
1715 proc_id proc_get_kernel_proc_id(void)
1717 if(!kernel_proc)
1718 return 0;
1719 else
1720 return kernel_proc->id;
1723 proc_id proc_get_current_proc_id(void)
1725 return thread_get_current_thread()->proc->id;
1728 static struct proc *create_proc_struct(const char *name, bool kernel)
1730 struct proc *p;
1732 p = (struct proc *)kmalloc(sizeof(struct proc));
1733 if(p == NULL)
1734 goto error;
1735 p->id = atomic_add(&next_proc_id, 1);
1736 strncpy(&p->name[0], name, SYS_MAX_OS_NAME_LEN-1);
1737 p->name[SYS_MAX_OS_NAME_LEN-1] = 0;
1738 p->num_threads = 0;
1739 p->ioctx = NULL;
1740 p->aspace_id = -1;
1741 p->aspace = NULL;
1742 p->kaspace = vm_get_kernel_aspace();
1743 vm_put_aspace(p->kaspace);
1744 p->thread_list = NULL;
1745 p->main_thread = NULL;
1746 p->state = PROC_STATE_BIRTH;
1747 p->pending_signals = SIG_NONE;
1749 if(arch_proc_init_proc_struct(p, kernel) < 0)
1750 goto error1;
1752 return p;
1754 error1:
1755 kfree(p);
1756 error:
1757 return NULL;
1760 static void delete_proc_struct(struct proc *p)
1762 kfree(p);
1765 int proc_get_proc_info(proc_id id, struct proc_info *outinfo)
1767 int state;
1768 struct proc *p;
1769 struct proc_info info;
1770 int err;
1772 state = int_disable_interrupts();
1773 GRAB_PROC_LOCK();
1775 p = proc_get_proc_struct_locked(id);
1776 if(!p) {
1777 err = ERR_INVALID_HANDLE;
1778 goto out;
1781 /* found the proc, copy the data out */
1782 info.id = id;
1783 strncpy(info.name, p->name, SYS_MAX_OS_NAME_LEN-1);
1784 info.name[SYS_MAX_OS_NAME_LEN-1] = '\0';
1785 info.state = p->state;
1786 info.num_threads = p->num_threads;
1788 err = NO_ERROR;
1790 out:
1791 RELEASE_PROC_LOCK();
1792 int_restore_interrupts(state);
1794 if(err >= 0)
1795 memcpy(outinfo, &info, sizeof(info));
1797 return err;
1800 int user_proc_get_proc_info(proc_id id, struct proc_info *uinfo)
1802 struct proc_info info;
1803 int err, err2;
1805 if((addr)uinfo >= KERNEL_BASE && (addr)uinfo <= KERNEL_TOP) {
1806 return ERR_VM_BAD_USER_MEMORY;
1809 err = proc_get_proc_info(id, &info);
1810 if(err < 0)
1811 return err;
1813 err2 = user_memcpy(uinfo, &info, sizeof(info));
1814 if(err2 < 0)
1815 return err2;
1817 return err;
1820 int proc_get_next_proc_info(uint32 *cookie, struct proc_info *outinfo)
1822 struct proc *p;
1823 struct proc_info info;
1824 int err;
1825 int state;
1826 struct hash_iterator i;
1827 proc_id id = (proc_id)*cookie;
1829 state = int_disable_interrupts();
1830 GRAB_PROC_LOCK();
1832 hash_open(proc_hash, &i);
1833 while((p = hash_next(proc_hash, &i)) != NULL) {
1834 if(id == 0)
1835 break; // initial search, return the first proc
1836 if(p->id == id) {
1837 // we found the last proc that was looked at, increment to the next one
1838 p = hash_next(proc_hash, &i);
1839 break;
1842 if(p == NULL) {
1843 err = ERR_NO_MORE_HANDLES;
1844 goto out;
1847 // we have the proc structure, copy the data out of it
1848 info.id = p->id;
1849 strncpy(info.name, p->name, SYS_MAX_OS_NAME_LEN-1);
1850 info.name[SYS_MAX_OS_NAME_LEN-1] = '\0';
1851 info.state = p->state;
1852 info.num_threads = p->num_threads;
1854 err = 0;
1856 *cookie = (uint32)p->id;
1858 out:
1859 RELEASE_PROC_LOCK();
1860 int_restore_interrupts(state);
1862 if(err >= 0)
1863 memcpy(outinfo, &info, sizeof(info));
1865 return err;
1868 int user_proc_get_next_proc_info(uint32 *ucookie, struct proc_info *uinfo)
1870 struct proc_info info;
1871 uint32 cookie;
1872 int err, err2;
1874 if((addr)ucookie >= KERNEL_BASE && (addr)ucookie <= KERNEL_TOP) {
1875 return ERR_VM_BAD_USER_MEMORY;
1878 if((addr)uinfo >= KERNEL_BASE && (addr)uinfo <= KERNEL_TOP) {
1879 return ERR_VM_BAD_USER_MEMORY;
1882 err2 = user_memcpy(&cookie, ucookie, sizeof(cookie));
1883 if(err2 < 0)
1884 return err2;
1886 err = proc_get_next_proc_info(&cookie, &info);
1887 if(err < 0)
1888 return err;
1890 err2 = user_memcpy(uinfo, &info, sizeof(info));
1891 if(err2 < 0)
1892 return err2;
1894 err2 = user_memcpy(ucookie, &cookie, sizeof(cookie));
1895 if(err2 < 0)
1896 return err2;
1898 return err;
1901 static int get_arguments_data_size(char **args,int argc)
1903 int cnt;
1904 int tot_size = 0;
1906 for(cnt = 0; cnt < argc; cnt++)
1907 tot_size += strlen(args[cnt]) + 1;
1908 tot_size += (argc + 1) * sizeof(char *);
1910 return tot_size + sizeof(struct uspace_prog_args_t);
1913 static int proc_create_proc2(void *args)
1915 int err;
1916 struct thread *t;
1917 struct proc *p;
1918 struct proc_arg *pargs = args;
1919 char *path;
1920 addr entry;
1921 char ustack_name[128];
1922 int tot_top_size;
1923 char **uargs;
1924 char *udest;
1925 struct uspace_prog_args_t *uspa;
1926 unsigned int cnt;
1928 t = thread_get_current_thread();
1929 p = t->proc;
1931 dprintf("proc_create_proc2: entry thread %d\n", t->id);
1933 // create an initial primary stack region
1935 tot_top_size = STACK_SIZE + PAGE_ALIGN(get_arguments_data_size(pargs->args,pargs->argc));
1936 t->user_stack_base = ((USER_STACK_REGION - tot_top_size) + USER_STACK_REGION_SIZE);
1937 sprintf(ustack_name, "%s_primary_stack", p->name);
1938 t->user_stack_region_id = vm_create_anonymous_region(p->aspace_id, ustack_name, (void **)&t->user_stack_base,
1939 REGION_ADDR_EXACT_ADDRESS, tot_top_size, REGION_WIRING_LAZY, LOCK_RW);
1940 if(t->user_stack_region_id < 0) {
1941 panic("proc_create_proc2: could not create default user stack region\n");
1942 return t->user_stack_region_id;
1945 uspa = (struct uspace_prog_args_t *)(t->user_stack_base + STACK_SIZE);
1946 uargs = (char **)(uspa + 1);
1947 udest = (char *)(uargs + pargs->argc + 1);
1948 // dprintf("addr: stack base=0x%x uargs = 0x%x udest=0x%x tot_top_size=%d \n\n",t->user_stack_base,uargs,udest,tot_top_size);
1950 for(cnt = 0;cnt < pargs->argc;cnt++){
1951 uargs[cnt] = udest;
1952 user_strcpy(udest, pargs->args[cnt]);
1953 udest += strlen(pargs->args[cnt]) + 1;
1955 uargs[cnt] = NULL;
1957 user_memcpy(uspa->prog_name, p->name, sizeof(uspa->prog_name));
1958 user_memcpy(uspa->prog_path, pargs->path, sizeof(uspa->prog_path));
1959 uspa->argc = cnt;
1960 uspa->argv = uargs;
1961 uspa->envc = 0;
1962 uspa->envp = 0;
1964 if(pargs->args != NULL)
1965 free_arg_list(pargs->args,pargs->argc);
1967 path = pargs->path;
1968 dprintf("proc_create_proc2: loading elf binary '%s'\n", path);
1970 err = elf_load_uspace("/boot/libexec/rld.so", p, 0, &entry);
1971 if(err < 0){
1972 // XXX clean up proc
1973 return err;
1976 // free the args
1977 kfree(pargs->path);
1978 kfree(pargs);
1980 dprintf("proc_create_proc2: loaded elf. entry = 0x%lx\n", entry);
1982 p->state = PROC_STATE_NORMAL;
1984 // jump to the entry point in user space
1985 arch_thread_enter_uspace(entry, uspa, t->user_stack_base + STACK_SIZE);
1987 // never gets here
1988 return 0;
1991 proc_id proc_create_proc(const char *path, const char *name, char **args, int argc, int priority)
1993 struct proc *p;
1994 thread_id tid;
1995 proc_id pid;
1996 int err;
1997 unsigned int state;
1998 int sem_retcode;
1999 struct proc_arg *pargs;
2001 dprintf("proc_create_proc: entry '%s', name '%s' args = %p argc = %d\n", path, name, args, argc);
2003 p = create_proc_struct(name, false);
2004 if(p == NULL)
2005 return ERR_NO_MEMORY;
2007 pid = p->id;
2009 state = int_disable_interrupts();
2010 GRAB_PROC_LOCK();
2011 hash_insert(proc_hash, p);
2012 RELEASE_PROC_LOCK();
2013 int_restore_interrupts(state);
2015 // copy the args over
2016 pargs = kmalloc(sizeof(struct proc_arg));
2017 if(pargs == NULL){
2018 err = ERR_NO_MEMORY;
2019 goto err1;
2021 pargs->path = kstrdup(path);
2022 if(pargs->path == NULL){
2023 err = ERR_NO_MEMORY;
2024 goto err2;
2026 pargs->argc = argc;
2027 pargs->args = args;
2029 // create a new ioctx for this process
2030 p->ioctx = vfs_new_ioctx(thread_get_current_thread()->proc->ioctx);
2031 if(!p->ioctx) {
2032 err = ERR_NO_MEMORY;
2033 goto err3;
2036 // create an address space for this process
2037 p->aspace_id = vm_create_aspace(p->name, USER_BASE, USER_SIZE, false);
2038 if(p->aspace_id < 0) {
2039 err = p->aspace_id;
2040 goto err4;
2042 p->aspace = vm_get_aspace_by_id(p->aspace_id);
2044 // create a kernel thread, but under the context of the new process
2045 tid = thread_create_kernel_thread_etc(name, proc_create_proc2, pargs, p);
2046 if(tid < 0) {
2047 err = tid;
2048 goto err5;
2051 thread_resume_thread(tid);
2053 return pid;
2055 err5:
2056 vm_put_aspace(p->aspace);
2057 vm_delete_aspace(p->aspace_id);
2058 err4:
2059 vfs_free_ioctx(p->ioctx);
2060 err3:
2061 kfree(pargs->path);
2062 err2:
2063 kfree(pargs);
2064 err1:
2065 // remove the proc structure from the proc hash table and delete the proc structure
2066 state = int_disable_interrupts();
2067 GRAB_PROC_LOCK();
2068 hash_remove(proc_hash, p);
2069 RELEASE_PROC_LOCK();
2070 int_restore_interrupts(state);
2071 delete_proc_struct(p);
2072 err:
2073 return err;
2076 proc_id user_proc_create_proc(const char *upath, const char *uname, char **args, int argc, int priority)
2078 char path[SYS_MAX_PATH_LEN];
2079 char name[SYS_MAX_OS_NAME_LEN];
2080 char **kargs;
2081 int rc;
2083 dprintf("user_proc_create_proc : argc=%d \n",argc);
2085 if((addr)upath >= KERNEL_BASE && (addr)upath <= KERNEL_TOP)
2086 return ERR_VM_BAD_USER_MEMORY;
2087 if((addr)uname >= KERNEL_BASE && (addr)uname <= KERNEL_TOP)
2088 return ERR_VM_BAD_USER_MEMORY;
2090 rc = user_copy_arg_list(args, argc, &kargs);
2091 if(rc < 0)
2092 goto error;
2094 rc = user_strncpy(path, upath, SYS_MAX_PATH_LEN-1);
2095 if(rc < 0)
2096 goto error;
2098 path[SYS_MAX_PATH_LEN-1] = 0;
2100 rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
2101 if(rc < 0)
2102 goto error;
2104 name[SYS_MAX_OS_NAME_LEN-1] = 0;
2106 return proc_create_proc(path, name, kargs, argc, priority);
2107 error:
2108 free_arg_list(kargs,argc);
2109 return rc;
2112 int proc_kill_proc(proc_id id)
2114 int state;
2115 struct proc *p;
2116 struct thread *t;
2117 thread_id tid = -1;
2118 int retval = 0;
2120 state = int_disable_interrupts();
2121 GRAB_PROC_LOCK();
2123 p = proc_get_proc_struct_locked(id);
2124 if(p != NULL) {
2125 tid = p->main_thread->id;
2126 } else {
2127 retval = ERR_INVALID_HANDLE;
2130 RELEASE_PROC_LOCK();
2131 int_restore_interrupts(state);
2132 if(retval < 0)
2133 return retval;
2135 // just kill the main thread in the process. The cleanup code there will
2136 // take care of the process
2137 return thread_kill_thread(tid);
2140 // sets the pending signal flag on a thread and possibly does some work to wake it up, etc.
2141 // expects the thread lock to be held
2142 static void deliver_signal(struct thread *t, int signal)
2144 // dprintf("deliver_signal: thread %p (%d), signal %d\n", t, t->id, signal);
2145 switch(signal) {
2146 case SIG_KILL:
2147 t->pending_signals |= SIG_KILL;
2148 switch(t->state) {
2149 case THREAD_STATE_SUSPENDED:
2150 t->state = THREAD_STATE_READY;
2151 t->next_state = THREAD_STATE_READY;
2153 thread_enqueue_run_q(t);
2154 break;
2155 case THREAD_STATE_WAITING:
2156 sem_interrupt_thread(t);
2157 break;
2158 default:
2161 break;
2162 default:
2163 t->pending_signals |= signal;
2167 // expects the thread lock to be held
2168 static void _check_for_thread_sigs(struct thread *t, int state)
2170 if(t->pending_signals == SIG_NONE)
2171 return;
2173 if(t->pending_signals & SIG_KILL) {
2174 t->pending_signals &= ~SIG_KILL;
2176 RELEASE_THREAD_LOCK();
2177 int_restore_interrupts(state);
2178 thread_exit(0);
2179 // never gets to here
2181 if(t->pending_signals & SIG_SUSPEND) {
2182 t->pending_signals &= ~SIG_SUSPEND;
2183 t->next_state = THREAD_STATE_SUSPENDED;
2184 // XXX will probably want to delay this
2185 thread_resched();
2189 // called in the int handler code when a thread enters the kernel for any reason
2190 void thread_atkernel_entry(void)
2192 int state;
2193 struct thread *t;
2194 bigtime_t now;
2196 // dprintf("thread_atkernel_entry: entry thread 0x%x\n", t->id);
2198 t = thread_get_current_thread();
2200 state = int_disable_interrupts();
2202 // track user time
2203 now = system_time();
2204 t->user_time += now - t->last_time;
2205 t->last_time = now;
2207 GRAB_THREAD_LOCK();
2209 t->in_kernel = true;
2211 _check_for_thread_sigs(t, state);
2213 RELEASE_THREAD_LOCK();
2214 int_restore_interrupts(state);
2217 // called when a thread exits kernel space to user space
2218 void thread_atkernel_exit(void)
2220 int state;
2221 struct thread *t;
2222 bigtime_t now;
2224 // dprintf("thread_atkernel_exit: entry\n");
2226 t = thread_get_current_thread();
2228 state = int_disable_interrupts();
2229 GRAB_THREAD_LOCK();
2231 _check_for_thread_sigs(t, state);
2233 t->in_kernel = false;
2235 RELEASE_THREAD_LOCK();
2237 // track kernel time
2238 now = system_time();
2239 t->kernel_time += now - t->last_time;
2240 t->last_time = now;
2242 int_restore_interrupts(state);
2245 int user_getrlimit(int resource, struct rlimit * urlp)
2247 int ret;
2248 struct rlimit rl;
2250 if (urlp == NULL) {
2251 return ERR_INVALID_ARGS;
2253 if((addr)urlp >= KERNEL_BASE && (addr)urlp <= KERNEL_TOP) {
2254 return ERR_VM_BAD_USER_MEMORY;
2257 ret = getrlimit(resource, &rl);
2259 if (ret == 0) {
2260 ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2261 if (ret < 0) {
2262 return ret;
2264 return 0;
2267 return ret;
2270 int getrlimit(int resource, struct rlimit * rlp)
2272 if (!rlp) {
2273 return -1;
2276 switch(resource) {
2277 case RLIMIT_NOFILE:
2278 return vfs_getrlimit(resource, rlp);
2280 default:
2281 return -1;
2284 return 0;
2287 int user_setrlimit(int resource, const struct rlimit * urlp)
2289 int err;
2290 struct rlimit rl;
2292 if (urlp == NULL) {
2293 return ERR_INVALID_ARGS;
2295 if((addr)urlp >= KERNEL_BASE && (addr)urlp <= KERNEL_TOP) {
2296 return ERR_VM_BAD_USER_MEMORY;
2299 err = user_memcpy(&rl, urlp, sizeof(struct rlimit));
2300 if (err < 0) {
2301 return err;
2304 return setrlimit(resource, &rl);
2307 int setrlimit(int resource, const struct rlimit * rlp)
2309 if (!rlp) {
2310 return -1;
2313 switch(resource) {
2314 case RLIMIT_NOFILE:
2315 return vfs_setrlimit(resource, rlp);
2317 default:
2318 return -1;
2321 return 0;