checkin if some disabled code that checks for interrupts being disabled when we take...
[newos.git] / kernel / thread.c
blob96641b436ecd05314a993ae36cb70298d4b3b387
1 /*
2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/debug.h>
7 #include <kernel/console.h>
8 #include <kernel/thread.h>
9 #include <kernel/arch/thread.h>
10 #include <kernel/khash.h>
11 #include <kernel/int.h>
12 #include <kernel/smp.h>
13 #include <kernel/timer.h>
14 #include <kernel/cpu.h>
15 #include <kernel/arch/cpu.h>
16 #include <kernel/arch/int.h>
17 #include <kernel/arch/vm.h>
18 #include <kernel/sem.h>
19 #include <kernel/port.h>
20 #include <kernel/vfs.h>
21 #include <kernel/elf.h>
22 #include <kernel/heap.h>
23 #include <newos/user_runtime.h>
24 #include <newos/errors.h>
25 #include <boot/stage2.h>
26 #include <string.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <sys/resource.h>
31 struct proc_key {
32 proc_id id;
35 struct thread_key {
36 thread_id id;
39 struct proc_arg {
40 char *path;
41 char **args;
42 unsigned int argc;
45 static struct proc *create_proc_struct(const char *name, bool kernel);
46 static int proc_struct_compare(void *_p, const void *_key);
47 static unsigned int proc_struct_hash(void *_p, const void *_key, unsigned int range);
49 // global
50 spinlock_t thread_spinlock = 0;
52 // proc list
53 static void *proc_hash = NULL;
54 static struct proc *kernel_proc = NULL;
55 static proc_id next_proc_id = 0;
56 static spinlock_t proc_spinlock = 0;
57 // NOTE: PROC lock can be held over a THREAD lock acquisition,
58 // but not the other way (to avoid deadlock)
59 #define GRAB_PROC_LOCK() acquire_spinlock(&proc_spinlock)
60 #define RELEASE_PROC_LOCK() release_spinlock(&proc_spinlock)
62 // thread list
63 static struct thread *idle_threads[MAX_BOOT_CPUS];
64 static void *thread_hash = NULL;
65 static thread_id next_thread_id = 0;
67 static sem_id snooze_sem = -1;
69 // death stacks
70 // used temporarily as a thread cleans itself up
71 struct death_stack {
72 region_id rid;
73 addr address;
74 bool in_use;
76 static struct death_stack *death_stacks;
77 static unsigned int num_death_stacks;
78 static unsigned int volatile death_stack_bitmap;
79 static sem_id death_stack_sem;
81 // thread queues
82 static struct thread_queue run_q[THREAD_NUM_PRIORITY_LEVELS] = { { NULL, NULL }, };
83 static struct thread_queue dead_q;
85 static int _rand(void);
86 static void thread_entry(void);
87 static struct thread *thread_get_thread_struct_locked(thread_id id);
88 static struct proc *proc_get_proc_struct(proc_id id);
89 static struct proc *proc_get_proc_struct_locked(proc_id id);
90 static void thread_kthread_exit(void);
91 static void deliver_signal(struct thread *t, int signal);
93 // insert a thread onto the tail of a queue
94 void thread_enqueue(struct thread *t, struct thread_queue *q)
96 t->q_next = NULL;
97 if(q->head == NULL) {
98 q->head = t;
99 q->tail = t;
100 } else {
101 q->tail->q_next = t;
102 q->tail = t;
106 struct thread *thread_lookat_queue(struct thread_queue *q)
108 return q->head;
111 struct thread *thread_dequeue(struct thread_queue *q)
113 struct thread *t;
115 t = q->head;
116 if(t != NULL) {
117 q->head = t->q_next;
118 if(q->tail == t)
119 q->tail = NULL;
121 return t;
124 struct thread *thread_dequeue_id(struct thread_queue *q, thread_id thr_id)
126 struct thread *t;
127 struct thread *last = NULL;
129 t = q->head;
130 while(t != NULL) {
131 if(t->id == thr_id) {
132 if(last == NULL) {
133 q->head = t->q_next;
134 } else {
135 last->q_next = t->q_next;
137 if(q->tail == t)
138 q->tail = last;
139 break;
141 last = t;
142 t = t->q_next;
144 return t;
147 struct thread *thread_lookat_run_q(int priority)
149 return thread_lookat_queue(&run_q[priority]);
152 void thread_enqueue_run_q(struct thread *t)
154 // these shouldn't exist
155 if(t->priority > THREAD_MAX_PRIORITY)
156 t->priority = THREAD_MAX_PRIORITY;
157 if(t->priority < 0)
158 t->priority = 0;
160 thread_enqueue(t, &run_q[t->priority]);
163 struct thread *thread_dequeue_run_q(int priority)
165 return thread_dequeue(&run_q[priority]);
168 static void insert_thread_into_proc(struct proc *p, struct thread *t)
170 t->proc_next = p->thread_list;
171 p->thread_list = t;
172 p->num_threads++;
173 if(p->num_threads == 1) {
174 // this was the first thread
175 p->main_thread = t;
177 t->proc = p;
180 static void remove_thread_from_proc(struct proc *p, struct thread *t)
182 struct thread *temp, *last = NULL;
184 for(temp = p->thread_list; temp != NULL; temp = temp->proc_next) {
185 if(temp == t) {
186 if(last == NULL) {
187 p->thread_list = temp->proc_next;
188 } else {
189 last->proc_next = temp->proc_next;
191 p->num_threads--;
192 break;
194 last = temp;
198 static int thread_struct_compare(void *_t, const void *_key)
200 struct thread *t = _t;
201 const struct thread_key *key = _key;
203 if(t->id == key->id) return 0;
204 else return 1;
207 // Frees the argument list
208 // Parameters
209 // args argument list.
210 // args number of arguments
212 static void free_arg_list(char **args, int argc)
214 int cnt = argc;
216 if(args != NULL) {
217 for(cnt = 0; cnt < argc; cnt++){
218 kfree(args[cnt]);
221 kfree(args);
225 // Copy argument list from userspace to kernel space
226 // Parameters
227 // args userspace parameters
228 // argc number of parameters
229 // kargs usespace parameters
230 // return < 0 on error and **kargs = NULL
232 static int user_copy_arg_list(char **args, int argc, char ***kargs)
234 char **largs;
235 int err;
236 int cnt;
237 char *source;
238 char buf[SYS_THREAD_ARG_LENGTH_MAX];
240 *kargs = NULL;
242 if((addr)args >= KERNEL_BASE && (addr)args <= KERNEL_TOP)
243 return ERR_VM_BAD_USER_MEMORY;
245 largs = kmalloc((argc + 1) * sizeof(char *));
246 if(largs == NULL){
247 return ERR_NO_MEMORY;
250 // scan all parameters and copy to kernel space
252 for(cnt = 0; cnt < argc; cnt++) {
253 err = user_memcpy(&source, &(args[cnt]), sizeof(char *));
254 if(err < 0)
255 goto error;
257 if((addr)source >= KERNEL_BASE && (addr)source <= KERNEL_TOP){
258 err = ERR_VM_BAD_USER_MEMORY;
259 goto error;
262 err = user_strncpy(buf,source, SYS_THREAD_ARG_LENGTH_MAX - 1);
263 if(err < 0)
264 goto error;
265 buf[SYS_THREAD_ARG_LENGTH_MAX - 1] = 0;
267 largs[cnt] = kstrdup(buf);
268 if(largs[cnt] == NULL){
269 err = ERR_NO_MEMORY;
270 goto error;
274 largs[argc] = NULL;
276 *kargs = largs;
277 return NO_ERROR;
279 error:
280 free_arg_list(largs,cnt);
281 dprintf("user_copy_arg_list failed %d \n",err);
282 return err;
285 static unsigned int thread_struct_hash(void *_t, const void *_key, unsigned int range)
287 struct thread *t = _t;
288 const struct thread_key *key = _key;
290 if(t != NULL)
291 return (t->id % range);
292 else
293 return (key->id % range);
296 static struct thread *create_thread_struct(const char *name)
298 struct thread *t;
299 int state;
301 state = int_disable_interrupts();
302 GRAB_THREAD_LOCK();
303 t = thread_dequeue(&dead_q);
304 RELEASE_THREAD_LOCK();
305 int_restore_interrupts(state);
307 if(t == NULL) {
308 t = (struct thread *)kmalloc(sizeof(struct thread));
309 if(t == NULL)
310 goto err;
313 strncpy(&t->name[0], name, SYS_MAX_OS_NAME_LEN-1);
314 t->name[SYS_MAX_OS_NAME_LEN-1] = 0;
316 t->id = atomic_add(&next_thread_id, 1);
317 t->proc = NULL;
318 t->cpu = NULL;
319 t->sem_blocking = -1;
320 t->fault_handler = 0;
321 t->kernel_stack_region_id = -1;
322 t->kernel_stack_base = 0;
323 t->user_stack_region_id = -1;
324 t->user_stack_base = 0;
325 t->proc_next = NULL;
326 t->q_next = NULL;
327 t->priority = -1;
328 t->args = NULL;
329 t->pending_signals = SIG_NONE;
330 t->in_kernel = true;
331 t->user_time = 0;
332 t->kernel_time = 0;
333 t->last_time = 0;
335 char temp[64];
337 sprintf(temp, "thread_0x%x_retcode_sem", t->id);
338 t->return_code_sem = sem_create(0, temp);
339 if(t->return_code_sem < 0)
340 goto err1;
343 if(arch_thread_init_thread_struct(t) < 0)
344 goto err2;
346 return t;
348 err2:
349 sem_delete_etc(t->return_code_sem, -1);
350 err1:
351 kfree(t);
352 err:
353 return NULL;
356 static void delete_thread_struct(struct thread *t)
358 if(t->return_code_sem >= 0)
359 sem_delete_etc(t->return_code_sem, -1);
360 kfree(t);
363 static int _create_user_thread_kentry(void)
365 struct thread *t;
367 t = thread_get_current_thread();
369 // a signal may have been delivered here
370 thread_atkernel_exit();
372 // jump to the entry point in user space
373 arch_thread_enter_uspace((addr)t->entry, t->args, t->user_stack_base + STACK_SIZE);
375 // never get here
376 return 0;
379 static int _create_kernel_thread_kentry(void)
381 int (*func)(void *args);
382 struct thread *t;
384 t = thread_get_current_thread();
386 // call the entry function with the appropriate args
387 func = (void *)t->entry;
389 return func(t->args);
392 static thread_id _create_thread(const char *name, proc_id pid, addr entry, void *args, bool kernel)
394 struct thread *t;
395 struct proc *p;
396 int state;
397 char stack_name[64];
398 bool abort = false;
400 t = create_thread_struct(name);
401 if(t == NULL)
402 return ERR_NO_MEMORY;
404 t->priority = THREAD_MEDIUM_PRIORITY;
405 t->state = THREAD_STATE_BIRTH;
406 t->next_state = THREAD_STATE_SUSPENDED;
408 state = int_disable_interrupts();
409 GRAB_THREAD_LOCK();
411 // insert into global list
412 hash_insert(thread_hash, t);
413 RELEASE_THREAD_LOCK();
415 GRAB_PROC_LOCK();
416 // look at the proc, make sure it's not being deleted
417 p = proc_get_proc_struct_locked(pid);
418 if(p != NULL && p->state != PROC_STATE_DEATH) {
419 insert_thread_into_proc(p, t);
420 } else {
421 abort = true;
423 RELEASE_PROC_LOCK();
424 if(abort) {
425 GRAB_THREAD_LOCK();
426 hash_remove(thread_hash, t);
427 RELEASE_THREAD_LOCK();
429 int_restore_interrupts(state);
430 if(abort) {
431 delete_thread_struct(t);
432 return ERR_TASK_PROC_DELETED;
435 sprintf(stack_name, "%s_kstack", name);
436 t->kernel_stack_region_id = vm_create_anonymous_region(vm_get_kernel_aspace_id(), stack_name,
437 (void **)&t->kernel_stack_base, REGION_ADDR_ANY_ADDRESS, KSTACK_SIZE,
438 REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
439 if(t->kernel_stack_region_id < 0)
440 panic("_create_thread: error creating kernel stack!\n");
442 t->args = args;
443 t->entry = entry;
445 if(kernel) {
446 // this sets up an initial kthread stack that runs the entry
447 arch_thread_initialize_kthread_stack(t, &_create_kernel_thread_kentry, &thread_entry, &thread_kthread_exit);
448 } else {
449 // create user stack
450 // XXX make this better. For now just keep trying to create a stack
451 // until we find a spot.
452 t->user_stack_base = (USER_STACK_REGION - STACK_SIZE) + USER_STACK_REGION_SIZE;
453 while(t->user_stack_base > USER_STACK_REGION) {
454 sprintf(stack_name, "%s_stack%d", p->name, t->id);
455 t->user_stack_region_id = vm_create_anonymous_region(p->aspace_id, stack_name,
456 (void **)&t->user_stack_base,
457 REGION_ADDR_ANY_ADDRESS, STACK_SIZE, REGION_WIRING_LAZY, LOCK_RW);
458 if(t->user_stack_region_id < 0) {
459 t->user_stack_base -= STACK_SIZE;
460 } else {
461 // we created a region
462 break;
465 if(t->user_stack_region_id < 0)
466 panic("_create_thread: unable to create user stack!\n");
468 // copy the user entry over to the args field in the thread struct
469 // the function this will call will immediately switch the thread into
470 // user space.
471 arch_thread_initialize_kthread_stack(t, &_create_user_thread_kentry, &thread_entry, &thread_kthread_exit);
474 t->state = THREAD_STATE_SUSPENDED;
476 return t->id;
479 thread_id user_thread_create_user_thread(char *uname, proc_id pid, addr entry, void *args)
481 char name[SYS_MAX_OS_NAME_LEN];
482 int rc;
484 if((addr)uname >= KERNEL_BASE && (addr)uname <= KERNEL_TOP)
485 return ERR_VM_BAD_USER_MEMORY;
486 if(entry >= KERNEL_BASE && entry <= KERNEL_TOP)
487 return ERR_VM_BAD_USER_MEMORY;
489 rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
490 if(rc < 0)
491 return rc;
492 name[SYS_MAX_OS_NAME_LEN-1] = 0;
494 return thread_create_user_thread(name, pid, entry, args);
497 thread_id thread_create_user_thread(char *name, proc_id pid, addr entry, void *args)
499 return _create_thread(name, pid, entry, args, false);
502 thread_id thread_create_kernel_thread(const char *name, int (*func)(void *), void *args)
504 return _create_thread(name, proc_get_kernel_proc()->id, (addr)func, args, true);
507 static thread_id thread_create_kernel_thread_etc(const char *name, int (*func)(void *), void *args, struct proc *p)
509 return _create_thread(name, p->id, (addr)func, args, true);
512 int thread_suspend_thread(thread_id id)
514 int state;
515 struct thread *t;
516 int retval;
517 bool global_resched = false;
519 state = int_disable_interrupts();
520 GRAB_THREAD_LOCK();
522 t = thread_get_current_thread();
523 if(t->id != id) {
524 t = thread_get_thread_struct_locked(id);
527 if(t != NULL) {
528 if(t->proc == kernel_proc) {
529 // no way
530 retval = ERR_NOT_ALLOWED;
531 } else if(t->in_kernel == true) {
532 t->pending_signals |= SIG_SUSPEND;
533 } else {
534 t->next_state = THREAD_STATE_SUSPENDED;
535 global_resched = true;
537 retval = NO_ERROR;
538 } else {
539 retval = ERR_INVALID_HANDLE;
542 RELEASE_THREAD_LOCK();
543 int_restore_interrupts(state);
545 if(global_resched) {
546 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
549 return retval;
552 int thread_resume_thread(thread_id id)
554 int state;
555 struct thread *t;
556 int retval;
558 state = int_disable_interrupts();
559 GRAB_THREAD_LOCK();
561 t = thread_get_thread_struct_locked(id);
562 if(t != NULL && t->state == THREAD_STATE_SUSPENDED) {
563 t->state = THREAD_STATE_READY;
564 t->next_state = THREAD_STATE_READY;
566 thread_enqueue_run_q(t);
567 retval = NO_ERROR;
568 } else {
569 retval = ERR_INVALID_HANDLE;
572 RELEASE_THREAD_LOCK();
573 int_restore_interrupts(state);
575 return retval;
578 int thread_set_priority(thread_id id, int priority)
580 struct thread *t;
581 int retval;
583 // make sure the passed in priority is within bounds
584 if(priority > THREAD_MAX_PRIORITY)
585 priority = THREAD_MAX_PRIORITY;
586 if(priority < THREAD_MIN_PRIORITY)
587 priority = THREAD_MIN_PRIORITY;
589 t = thread_get_current_thread();
590 if(t->id == id) {
591 // it's ourself, so we know we aren't in a run queue, and we can manipulate
592 // our structure directly
593 t->priority = priority;
594 retval = NO_ERROR;
595 } else {
596 int state = int_disable_interrupts();
597 GRAB_THREAD_LOCK();
599 t = thread_get_thread_struct_locked(id);
600 if(t) {
601 if(t->state == THREAD_STATE_READY && t->priority != priority) {
602 // this thread is in a ready queue right now, so it needs to be reinserted
603 thread_dequeue_id(&run_q[t->priority], t->id);
604 t->priority = priority;
605 thread_enqueue_run_q(t);
606 } else {
607 t->priority = priority;
609 retval = NO_ERROR;
610 } else {
611 retval = ERR_INVALID_HANDLE;
614 RELEASE_THREAD_LOCK();
615 int_restore_interrupts(state);
618 return retval;
621 static void _dump_proc_info(struct proc *p)
623 dprintf("PROC: %p\n", p);
624 dprintf("id: 0x%x\n", p->id);
625 dprintf("name: '%s'\n", p->name);
626 dprintf("next: %p\n", p->next);
627 dprintf("num_threads: %d\n", p->num_threads);
628 dprintf("state: %d\n", p->state);
629 dprintf("pending_signals: 0x%x\n", p->pending_signals);
630 dprintf("ioctx: %p\n", p->ioctx);
631 dprintf("aspace_id: 0x%x\n", p->aspace_id);
632 dprintf("aspace: %p\n", p->aspace);
633 dprintf("kaspace: %p\n", p->kaspace);
634 dprintf("main_thread: %p\n", p->main_thread);
635 dprintf("thread_list: %p\n", p->thread_list);
638 static void dump_proc_info(int argc, char **argv)
640 struct proc *p;
641 int id = -1;
642 unsigned long num;
643 struct hash_iterator i;
645 if(argc < 2) {
646 dprintf("proc: not enough arguments\n");
647 return;
650 // if the argument looks like a hex number, treat it as such
651 if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
652 num = atoul(argv[1]);
653 if(num > vm_get_kernel_aspace()->virtual_map.base) {
654 // XXX semi-hack
655 _dump_proc_info((struct proc*)num);
656 return;
657 } else {
658 id = num;
662 // walk through the thread list, trying to match name or id
663 hash_open(proc_hash, &i);
664 while((p = hash_next(proc_hash, &i)) != NULL) {
665 if((p->name && strcmp(argv[1], p->name) == 0) || p->id == id) {
666 _dump_proc_info(p);
667 break;
670 hash_close(proc_hash, &i, false);
674 static const char *state_to_text(int state)
676 switch(state) {
677 case THREAD_STATE_READY:
678 return "READY";
679 case THREAD_STATE_RUNNING:
680 return "RUNNING";
681 case THREAD_STATE_WAITING:
682 return "WAITING";
683 case THREAD_STATE_SUSPENDED:
684 return "SUSPEND";
685 case THREAD_STATE_FREE_ON_RESCHED:
686 return "DEATH";
687 case THREAD_STATE_BIRTH:
688 return "BIRTH";
689 default:
690 return "UNKNOWN";
694 static struct thread *last_thread_dumped = NULL;
696 static void _dump_thread_info(struct thread *t)
698 dprintf("THREAD: %p\n", t);
699 dprintf("id: 0x%x\n", t->id);
700 dprintf("name: '%s'\n", t->name);
701 dprintf("all_next: %p\nproc_next: %p\nq_next: %p\n",
702 t->all_next, t->proc_next, t->q_next);
703 dprintf("priority: 0x%x\n", t->priority);
704 dprintf("state: %s\n", state_to_text(t->state));
705 dprintf("next_state: %s\n", state_to_text(t->next_state));
706 dprintf("cpu: %p ", t->cpu);
707 if(t->cpu)
708 dprintf("(%d)\n", t->cpu->info.cpu_num);
709 else
710 dprintf("\n");
711 dprintf("pending_signals: 0x%x\n", t->pending_signals);
712 dprintf("in_kernel: %d\n", t->in_kernel);
713 dprintf("sem_blocking:0x%x\n", t->sem_blocking);
714 dprintf("sem_count: 0x%x\n", t->sem_count);
715 dprintf("sem_deleted_retcode: 0x%x\n", t->sem_deleted_retcode);
716 dprintf("sem_errcode: 0x%x\n", t->sem_errcode);
717 dprintf("sem_flags: 0x%x\n", t->sem_flags);
718 dprintf("fault_handler: 0x%lx\n", t->fault_handler);
719 dprintf("args: %p\n", t->args);
720 dprintf("entry: 0x%lx\n", t->entry);
721 dprintf("proc: %p\n", t->proc);
722 dprintf("return_code_sem: 0x%x\n", t->return_code_sem);
723 dprintf("kernel_stack_region_id: 0x%x\n", t->kernel_stack_region_id);
724 dprintf("kernel_stack_base: 0x%lx\n", t->kernel_stack_base);
725 dprintf("user_stack_region_id: 0x%x\n", t->user_stack_region_id);
726 dprintf("user_stack_base: 0x%lx\n", t->user_stack_base);
727 dprintf("kernel_time: %Ld\n", t->kernel_time);
728 dprintf("user_time: %Ld\n", t->user_time);
729 dprintf("architecture dependant section:\n");
730 arch_thread_dump_info(&t->arch_info);
732 last_thread_dumped = t;
735 static void dump_thread_info(int argc, char **argv)
737 struct thread *t;
738 int id = -1;
739 unsigned long num;
740 struct hash_iterator i;
742 if(argc < 2) {
743 dprintf("thread: not enough arguments\n");
744 return;
747 // if the argument looks like a hex number, treat it as such
748 if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
749 num = atoul(argv[1]);
750 if(num > vm_get_kernel_aspace()->virtual_map.base) {
751 // XXX semi-hack
752 _dump_thread_info((struct thread *)num);
753 return;
754 } else {
755 id = num;
759 // walk through the thread list, trying to match name or id
760 hash_open(thread_hash, &i);
761 while((t = hash_next(thread_hash, &i)) != NULL) {
762 if((t->name && strcmp(argv[1], t->name) == 0) || t->id == id) {
763 _dump_thread_info(t);
764 break;
767 hash_close(thread_hash, &i, false);
770 static void dump_thread_list(int argc, char **argv)
772 struct thread *t;
773 struct hash_iterator i;
775 hash_open(thread_hash, &i);
776 while((t = hash_next(thread_hash, &i)) != NULL) {
777 dprintf("%p", t);
778 if(t->name != NULL)
779 dprintf("\t%32s", t->name);
780 else
781 dprintf("\t%32s", "<NULL>");
782 dprintf("\t0x%x", t->id);
783 dprintf("\t%16s", state_to_text(t->state));
784 if(t->cpu)
785 dprintf("\t%d", t->cpu->info.cpu_num);
786 else
787 dprintf("\tNOCPU");
788 dprintf("\t0x%lx\n", t->kernel_stack_base);
790 hash_close(thread_hash, &i, false);
793 static void dump_next_thread_in_q(int argc, char **argv)
795 struct thread *t = last_thread_dumped;
797 if(t == NULL) {
798 dprintf("no thread previously dumped. Examine a thread first.\n");
799 return;
802 dprintf("next thread in queue after thread @ %p\n", t);
803 if(t->q_next != NULL) {
804 _dump_thread_info(t->q_next);
805 } else {
806 dprintf("NULL\n");
810 static void dump_next_thread_in_all_list(int argc, char **argv)
812 struct thread *t = last_thread_dumped;
814 if(t == NULL) {
815 dprintf("no thread previously dumped. Examine a thread first.\n");
816 return;
819 dprintf("next thread in global list after thread @ %p\n", t);
820 if(t->all_next != NULL) {
821 _dump_thread_info(t->all_next);
822 } else {
823 dprintf("NULL\n");
827 static void dump_next_thread_in_proc(int argc, char **argv)
829 struct thread *t = last_thread_dumped;
831 if(t == NULL) {
832 dprintf("no thread previously dumped. Examine a thread first.\n");
833 return;
836 dprintf("next thread in proc after thread @ %p\n", t);
837 if(t->proc_next != NULL) {
838 _dump_thread_info(t->proc_next);
839 } else {
840 dprintf("NULL\n");
844 static int get_death_stack(void)
846 int i;
847 unsigned int bit;
848 int state;
850 sem_acquire(death_stack_sem, 1);
852 // grap the thread lock, find a free spot and release
853 state = int_disable_interrupts();
854 GRAB_THREAD_LOCK();
855 bit = death_stack_bitmap;
856 bit = (~bit)&~((~bit)-1);
857 death_stack_bitmap |= bit;
858 RELEASE_THREAD_LOCK();
861 // sanity checks
862 if( !bit ) {
863 panic("get_death_stack: couldn't find free stack!\n");
865 if( bit & (bit-1)) {
866 panic("get_death_stack: impossible bitmap result!\n");
870 // bit to number
871 i= -1;
872 while(bit) {
873 bit >>= 1;
874 i += 1;
877 // dprintf("get_death_stack: returning 0x%lx\n", death_stacks[i].address);
879 return i;
882 static void put_death_stack_and_reschedule(unsigned int index)
884 // dprintf("put_death_stack...: passed %d\n", index);
886 if(index >= num_death_stacks)
887 panic("put_death_stack: passed invalid stack index %d\n", index);
889 if(!(death_stack_bitmap & (1 << index)))
890 panic("put_death_stack: passed invalid stack index %d\n", index);
892 int_disable_interrupts();
893 GRAB_THREAD_LOCK();
895 death_stack_bitmap &= ~(1 << index);
897 sem_release_etc(death_stack_sem, 1, SEM_FLAG_NO_RESCHED);
899 thread_resched();
902 int thread_init(kernel_args *ka)
904 struct thread *t;
905 unsigned int i;
907 // dprintf("thread_init: entry\n");
909 // create the process hash table
910 proc_hash = hash_init(15, (addr)&kernel_proc->next - (addr)kernel_proc,
911 &proc_struct_compare, &proc_struct_hash);
913 // create the kernel process
914 kernel_proc = create_proc_struct("kernel_proc", true);
915 if(kernel_proc == NULL)
916 panic("could not create kernel proc!\n");
917 kernel_proc->state = PROC_STATE_NORMAL;
919 kernel_proc->ioctx = vfs_new_ioctx(NULL);
920 if(kernel_proc->ioctx == NULL)
921 panic("could not create ioctx for kernel proc!\n");
923 // stick it in the process hash
924 hash_insert(proc_hash, kernel_proc);
926 // create the thread hash table
927 thread_hash = hash_init(15, (addr)&t->all_next - (addr)t,
928 &thread_struct_compare, &thread_struct_hash);
930 // zero out the run queues
931 memset(run_q, 0, sizeof(run_q));
933 // zero out the dead thread structure q
934 memset(&dead_q, 0, sizeof(dead_q));
936 // allocate a snooze sem
937 snooze_sem = sem_create(0, "snooze sem");
938 if(snooze_sem < 0) {
939 panic("error creating snooze sem\n");
940 return snooze_sem;
943 // create an idle thread for each cpu
944 for(i=0; i<ka->num_cpus; i++) {
945 char temp[64];
946 vm_region *region;
948 sprintf(temp, "idle_thread%d", i);
949 t = create_thread_struct(temp);
950 if(t == NULL) {
951 panic("error creating idle thread struct\n");
952 return ERR_NO_MEMORY;
954 t->proc = proc_get_kernel_proc();
955 t->priority = THREAD_IDLE_PRIORITY;
956 t->state = THREAD_STATE_RUNNING;
957 t->next_state = THREAD_STATE_READY;
958 sprintf(temp, "idle_thread%d_kstack", i);
959 t->kernel_stack_region_id = vm_find_region_by_name(vm_get_kernel_aspace_id(), temp);
960 region = vm_get_region_by_id(t->kernel_stack_region_id);
961 if(!region) {
962 panic("error finding idle kstack region\n");
964 t->kernel_stack_base = region->base;
965 vm_put_region(region);
966 hash_insert(thread_hash, t);
967 insert_thread_into_proc(t->proc, t);
968 idle_threads[i] = t;
969 if(i == 0)
970 arch_thread_set_current_thread(t);
971 t->cpu = &cpu[i];
974 // create a set of death stacks
975 num_death_stacks = smp_get_num_cpus();
976 if(num_death_stacks > 8*sizeof(death_stack_bitmap)) {
978 * clamp values for really beefy machines
980 num_death_stacks = 8*sizeof(death_stack_bitmap);
982 death_stack_bitmap = 0;
983 death_stacks = (struct death_stack *)kmalloc(num_death_stacks * sizeof(struct death_stack));
984 if(death_stacks == NULL) {
985 panic("error creating death stacks\n");
986 return ERR_NO_MEMORY;
989 char temp[64];
991 for(i=0; i<num_death_stacks; i++) {
992 sprintf(temp, "death_stack%d", i);
993 death_stacks[i].rid = vm_create_anonymous_region(vm_get_kernel_aspace_id(), temp,
994 (void **)&death_stacks[i].address,
995 REGION_ADDR_ANY_ADDRESS, KSTACK_SIZE, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
996 if(death_stacks[i].rid < 0) {
997 panic("error creating death stacks\n");
998 return death_stacks[i].rid;
1000 death_stacks[i].in_use = false;
1003 death_stack_sem = sem_create(num_death_stacks, "death_stack_noavail_sem");
1005 // set up some debugger commands
1006 dbg_add_command(dump_thread_list, "threads", "list all threads");
1007 dbg_add_command(dump_thread_info, "thread", "list info about a particular thread");
1008 dbg_add_command(dump_next_thread_in_q, "next_q", "dump the next thread in the queue of last thread viewed");
1009 dbg_add_command(dump_next_thread_in_all_list, "next_all", "dump the next thread in the global list of the last thread viewed");
1010 dbg_add_command(dump_next_thread_in_proc, "next_proc", "dump the next thread in the process of the last thread viewed");
1011 dbg_add_command(dump_proc_info, "proc", "list info about a particular process");
1013 return 0;
1016 int thread_init_percpu(int cpu_num)
1018 arch_thread_set_current_thread(idle_threads[cpu_num]);
1019 return 0;
1022 // this starts the scheduler. Must be run under the context of
1023 // the initial idle thread.
1024 void thread_start_threading(void)
1026 int state;
1028 // XXX may not be the best place for this
1029 // invalidate all of the other processors' TLB caches
1030 state = int_disable_interrupts();
1031 arch_cpu_global_TLB_invalidate();
1032 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
1033 int_restore_interrupts(state);
1035 // start the other processors
1036 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
1038 state = int_disable_interrupts();
1039 GRAB_THREAD_LOCK();
1041 thread_resched();
1043 RELEASE_THREAD_LOCK();
1044 int_restore_interrupts(state);
1047 int user_thread_snooze(bigtime_t time)
1049 thread_snooze(time);
1050 return NO_ERROR;
1053 void thread_snooze(bigtime_t time)
1055 sem_acquire_etc(snooze_sem, 1, SEM_FLAG_TIMEOUT, time, NULL);
1058 // this function gets run by a new thread before anything else
1059 static void thread_entry(void)
1061 // simulates the thread spinlock release that would occur if the thread had been
1062 // rescheded from. The resched didn't happen because the thread is new.
1063 RELEASE_THREAD_LOCK();
1064 int_enable_interrupts(); // this essentially simulates a return-from-interrupt
1067 // used to pass messages between thread_exit and thread_exit2
1068 struct thread_exit_args {
1069 struct thread *t;
1070 region_id old_kernel_stack;
1071 int int_state;
1072 unsigned int death_stack;
1075 static void thread_exit2(void *_args)
1077 struct thread_exit_args args;
1078 char *temp;
1080 // copy the arguments over, since the source is probably on the kernel stack we're about to delete
1081 memcpy(&args, _args, sizeof(struct thread_exit_args));
1083 // restore the interrupts
1084 int_restore_interrupts(args.int_state);
1086 // dprintf("thread_exit2, running on death stack 0x%lx\n", args.t->kernel_stack_base);
1088 // delete the old kernel stack region
1089 // dprintf("thread_exit2: deleting old kernel stack id 0x%x for thread 0x%x\n", args.old_kernel_stack, args.t->id);
1090 vm_delete_region(vm_get_kernel_aspace_id(), args.old_kernel_stack);
1092 // dprintf("thread_exit2: removing thread 0x%x from global lists\n", args.t->id);
1094 // remove this thread from all of the global lists
1095 int_disable_interrupts();
1096 GRAB_PROC_LOCK();
1097 remove_thread_from_proc(kernel_proc, args.t);
1098 RELEASE_PROC_LOCK();
1099 GRAB_THREAD_LOCK();
1100 hash_remove(thread_hash, args.t);
1101 RELEASE_THREAD_LOCK();
1103 // dprintf("thread_exit2: done removing thread from lists\n");
1105 // set the next state to be gone. Will return the thread structure to a ready pool upon reschedule
1106 args.t->next_state = THREAD_STATE_FREE_ON_RESCHED;
1108 // return the death stack and reschedule one last time
1109 put_death_stack_and_reschedule(args.death_stack);
1110 // never get to here
1111 panic("thread_exit2: made it where it shouldn't have!\n");
1114 void thread_exit(int retcode)
1116 int state;
1117 struct thread *t = thread_get_current_thread();
1118 struct proc *p = t->proc;
1119 bool delete_proc = false;
1120 unsigned int death_stack;
1122 dprintf("thread 0x%x exiting w/return code 0x%x\n", t->id, retcode);
1124 // boost our priority to get this over with
1125 thread_set_priority(t->id, THREAD_HIGH_PRIORITY);
1127 // delete the user stack region first
1128 if(p->aspace_id >= 0 && t->user_stack_region_id >= 0) {
1129 region_id rid = t->user_stack_region_id;
1130 t->user_stack_region_id = -1;
1131 vm_delete_region(p->aspace_id, rid);
1134 if(p != kernel_proc) {
1135 // remove this thread from the current process and add it to the kernel
1136 // put the thread into the kernel proc until it dies
1137 state = int_disable_interrupts();
1138 GRAB_PROC_LOCK();
1139 remove_thread_from_proc(p, t);
1140 insert_thread_into_proc(kernel_proc, t);
1141 if(p->main_thread == t) {
1142 // this was main thread in this process
1143 delete_proc = true;
1144 hash_remove(proc_hash, p);
1145 p->state = PROC_STATE_DEATH;
1147 RELEASE_PROC_LOCK();
1148 // swap address spaces, to make sure we're running on the kernel's pgdir
1149 vm_aspace_swap(kernel_proc->kaspace);
1150 int_restore_interrupts(state);
1152 // dprintf("thread_exit: thread 0x%x now a kernel thread!\n", t->id);
1155 // delete the process
1156 if(delete_proc) {
1157 if(p->num_threads > 0) {
1158 // there are other threads still in this process,
1159 // cycle through and signal kill on each of the threads
1160 // XXX this can be optimized. There's got to be a better solution.
1161 struct thread *temp_thread;
1163 state = int_disable_interrupts();
1164 GRAB_PROC_LOCK();
1165 // we can safely walk the list because of the lock. no new threads can be created
1166 // because of the PROC_STATE_DEATH flag on the process
1167 temp_thread = p->thread_list;
1168 while(temp_thread) {
1169 struct thread *next = temp_thread->proc_next;
1170 thread_kill_thread_nowait(temp_thread->id);
1171 temp_thread = next;
1173 RELEASE_PROC_LOCK();
1174 int_restore_interrupts(state);
1176 // Now wait for all of the threads to die
1177 // XXX block on a semaphore
1178 while((volatile int)p->num_threads > 0) {
1179 thread_snooze(10000); // 10 ms
1182 vm_put_aspace(p->aspace);
1183 vm_delete_aspace(p->aspace_id);
1184 port_delete_owned_ports(p->id);
1185 sem_delete_owned_sems(p->id);
1186 vfs_free_ioctx(p->ioctx);
1187 kfree(p);
1190 // delete the sem that others will use to wait on us and get the retcode
1192 sem_id s = t->return_code_sem;
1194 t->return_code_sem = -1;
1195 sem_delete_etc(s, retcode);
1198 death_stack = get_death_stack();
1200 struct thread_exit_args args;
1202 args.t = t;
1203 args.old_kernel_stack = t->kernel_stack_region_id;
1204 args.death_stack = death_stack;
1206 // disable the interrupts. Must remain disabled until the kernel stack pointer can be officially switched
1207 args.int_state = int_disable_interrupts();
1209 // set the new kernel stack officially to the death stack, wont be really switched until
1210 // the next function is called. This bookkeeping must be done now before a context switch
1211 // happens, or the processor will interrupt to the old stack
1212 t->kernel_stack_region_id = death_stacks[death_stack].rid;
1213 t->kernel_stack_base = death_stacks[death_stack].address;
1215 // we will continue in thread_exit2(), on the new stack
1216 arch_thread_switch_kstack_and_call(t, t->kernel_stack_base + KSTACK_SIZE, thread_exit2, &args);
1219 panic("never can get here\n");
1222 static int _thread_kill_thread(thread_id id, bool wait_on)
1224 int state;
1225 struct thread *t;
1226 int rc;
1228 // dprintf("_thread_kill_thread: id %d, wait_on %d\n", id, wait_on);
1230 state = int_disable_interrupts();
1231 GRAB_THREAD_LOCK();
1233 t = thread_get_thread_struct_locked(id);
1234 if(t != NULL) {
1235 if(t->proc == kernel_proc) {
1236 // can't touch this
1237 rc = ERR_NOT_ALLOWED;
1238 } else {
1239 deliver_signal(t, SIG_KILL);
1240 rc = NO_ERROR;
1241 if(t->id == thread_get_current_thread()->id)
1242 wait_on = false; // can't wait on ourself
1244 } else {
1245 rc = ERR_INVALID_HANDLE;
1248 RELEASE_THREAD_LOCK();
1249 int_restore_interrupts(state);
1250 if(rc < 0)
1251 return rc;
1253 if(wait_on)
1254 thread_wait_on_thread(id, NULL);
1256 return rc;
1259 int thread_kill_thread(thread_id id)
1261 return _thread_kill_thread(id, true);
1264 int thread_kill_thread_nowait(thread_id id)
1266 return _thread_kill_thread(id, false);
1269 static void thread_kthread_exit(void)
1271 thread_exit(0);
1274 int user_thread_wait_on_thread(thread_id id, int *uretcode)
1276 int retcode;
1277 int rc, rc2;
1279 if((addr)uretcode >= KERNEL_BASE && (addr)uretcode <= KERNEL_TOP)
1280 return ERR_VM_BAD_USER_MEMORY;
1282 rc = thread_wait_on_thread(id, &retcode);
1284 rc2 = user_memcpy(uretcode, &retcode, sizeof(retcode));
1285 if(rc2 < 0)
1286 return rc2;
1288 return rc;
1291 int thread_wait_on_thread(thread_id id, int *retcode)
1293 sem_id sem;
1294 int state;
1295 struct thread *t;
1296 int rc;
1298 state = int_disable_interrupts();
1299 GRAB_THREAD_LOCK();
1301 t = thread_get_thread_struct_locked(id);
1302 if(t != NULL) {
1303 sem = t->return_code_sem;
1304 } else {
1305 sem = ERR_INVALID_HANDLE;
1308 RELEASE_THREAD_LOCK();
1309 int_restore_interrupts(state);
1311 rc = sem_acquire_etc(sem, 1, 0, 0, retcode);
1313 /* This thread died the way it should, dont ripple a non-error up */
1314 if (rc == ERR_SEM_DELETED)
1315 rc = NO_ERROR;
1317 return rc;
1320 int user_proc_wait_on_proc(proc_id id, int *uretcode)
1322 int retcode;
1323 int rc, rc2;
1325 if((addr)uretcode >= KERNEL_BASE && (addr)uretcode <= KERNEL_TOP)
1326 return ERR_VM_BAD_USER_MEMORY;
1328 rc = proc_wait_on_proc(id, &retcode);
1329 if(rc < 0)
1330 return rc;
1332 rc2 = user_memcpy(uretcode, &retcode, sizeof(retcode));
1333 if(rc2 < 0)
1334 return rc2;
1336 return rc;
1339 int proc_wait_on_proc(proc_id id, int *retcode)
1341 struct proc *p;
1342 thread_id tid;
1343 int state;
1345 state = int_disable_interrupts();
1346 GRAB_PROC_LOCK();
1347 p = proc_get_proc_struct_locked(id);
1348 if(p && p->main_thread) {
1349 tid = p->main_thread->id;
1350 } else {
1351 tid = ERR_INVALID_HANDLE;
1353 RELEASE_PROC_LOCK();
1354 int_restore_interrupts(state);
1356 if(tid < 0)
1357 return tid;
1359 return thread_wait_on_thread(tid, retcode);
1362 struct thread *thread_get_thread_struct(thread_id id)
1364 struct thread *t;
1365 int state;
1367 state = int_disable_interrupts();
1368 GRAB_THREAD_LOCK();
1370 t = thread_get_thread_struct_locked(id);
1372 RELEASE_THREAD_LOCK();
1373 int_restore_interrupts(state);
1375 return t;
1378 static struct thread *thread_get_thread_struct_locked(thread_id id)
1380 struct thread_key key;
1382 key.id = id;
1384 return hash_lookup(thread_hash, &key);
1387 static struct proc *proc_get_proc_struct(proc_id id)
1389 struct proc *p;
1390 int state;
1392 state = int_disable_interrupts();
1393 GRAB_PROC_LOCK();
1395 p = proc_get_proc_struct_locked(id);
1397 RELEASE_PROC_LOCK();
1398 int_restore_interrupts(state);
1400 return p;
1403 static struct proc *proc_get_proc_struct_locked(proc_id id)
1405 struct proc_key key;
1407 key.id = id;
1409 return hash_lookup(proc_hash, &key);
1412 static void thread_context_switch(struct thread *t_from, struct thread *t_to)
1414 bigtime_t now;
1416 // track kernel time
1417 now = system_time();
1418 t_from->kernel_time += now - t_from->last_time;
1419 t_to->last_time = now;
1421 t_to->cpu = t_from->cpu;
1422 arch_thread_set_current_thread(t_to);
1423 t_from->cpu = NULL;
1424 arch_thread_context_switch(t_from, t_to);
1427 static int _rand(void)
1429 static int next = 0;
1431 if(next == 0)
1432 next = system_time();
1434 next = next * 1103515245 + 12345;
1435 return((next >> 16) & 0x7FFF);
1438 static int reschedule_event(void *unused)
1440 // this function is called as a result of the timer event set by the scheduler
1441 // returning this causes a reschedule on the timer event
1442 thread_get_current_thread()->cpu->info.preempted= 1;
1443 return INT_RESCHEDULE;
1446 // NOTE: expects thread_spinlock to be held
1447 void thread_resched(void)
1449 struct thread *next_thread = NULL;
1450 int last_thread_pri = -1;
1451 struct thread *old_thread = thread_get_current_thread();
1452 int i;
1453 bigtime_t quantum;
1454 struct timer_event *quantum_timer;
1456 // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), thread_get_current_thread());
1458 switch(old_thread->next_state) {
1459 case THREAD_STATE_RUNNING:
1460 case THREAD_STATE_READY:
1461 // dprintf("enqueueing thread 0x%x into run q. pri = %d\n", old_thread, old_thread->priority);
1462 thread_enqueue_run_q(old_thread);
1463 break;
1464 case THREAD_STATE_SUSPENDED:
1465 dprintf("suspending thread 0x%x\n", old_thread->id);
1466 break;
1467 case THREAD_STATE_FREE_ON_RESCHED:
1468 thread_enqueue(old_thread, &dead_q);
1469 break;
1470 default:
1471 // dprintf("not enqueueing thread 0x%x into run q. next_state = %d\n", old_thread, old_thread->next_state);
1474 old_thread->state = old_thread->next_state;
1476 // search the real-time queue
1477 for(i = THREAD_MAX_RT_PRIORITY; i >= THREAD_MIN_RT_PRIORITY; i--) {
1478 next_thread = thread_dequeue_run_q(i);
1479 if(next_thread)
1480 goto found_thread;
1483 // search the regular queue
1484 for(i = THREAD_MAX_PRIORITY; i > THREAD_IDLE_PRIORITY; i--) {
1485 next_thread = thread_lookat_run_q(i);
1486 if(next_thread != NULL) {
1487 // skip it sometimes
1488 if(_rand() > 0x3000) {
1489 next_thread = thread_dequeue_run_q(i);
1490 goto found_thread;
1492 last_thread_pri = i;
1493 next_thread = NULL;
1496 if(next_thread == NULL) {
1497 if(last_thread_pri != -1) {
1498 next_thread = thread_dequeue_run_q(last_thread_pri);
1499 if(next_thread == NULL)
1500 panic("next_thread == NULL! last_thread_pri = %d\n", last_thread_pri);
1501 } else {
1502 next_thread = thread_dequeue_run_q(THREAD_IDLE_PRIORITY);
1503 if(next_thread == NULL)
1504 panic("next_thread == NULL! no idle priorities!\n");
1508 found_thread:
1509 next_thread->state = THREAD_STATE_RUNNING;
1510 next_thread->next_state = THREAD_STATE_READY;
1512 // XXX should only reset the quantum timer if we are switching to a new thread,
1513 // or we got here as a result of a quantum expire.
1515 // XXX calculate quantum
1516 quantum = 10000;
1518 // get the quantum timer for this cpu
1519 quantum_timer = &old_thread->cpu->info.quantum_timer;
1520 if(!old_thread->cpu->info.preempted) {
1521 _local_timer_cancel_event(old_thread->cpu->info.cpu_num, quantum_timer);
1523 old_thread->cpu->info.preempted= 0;
1524 timer_setup_timer(&reschedule_event, NULL, quantum_timer);
1525 timer_set_event(quantum, TIMER_MODE_ONESHOT, quantum_timer);
1527 if(next_thread != old_thread) {
1528 // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",
1529 // smp_get_current_cpu(), old_thread->id, next_thread->id);
1530 thread_context_switch(old_thread, next_thread);
1534 static int proc_struct_compare(void *_p, const void *_key)
1536 struct proc *p = _p;
1537 const struct proc_key *key = _key;
1539 if(p->id == key->id) return 0;
1540 else return 1;
1543 static unsigned int proc_struct_hash(void *_p, const void *_key, unsigned int range)
1545 struct proc *p = _p;
1546 const struct proc_key *key = _key;
1548 if(p != NULL)
1549 return (p->id % range);
1550 else
1551 return (key->id % range);
1554 struct proc *proc_get_kernel_proc(void)
1556 return kernel_proc;
1559 proc_id proc_get_kernel_proc_id(void)
1561 if(!kernel_proc)
1562 return 0;
1563 else
1564 return kernel_proc->id;
1567 proc_id proc_get_current_proc_id(void)
1569 return thread_get_current_thread()->proc->id;
1572 static struct proc *create_proc_struct(const char *name, bool kernel)
1574 struct proc *p;
1576 p = (struct proc *)kmalloc(sizeof(struct proc));
1577 if(p == NULL)
1578 goto error;
1579 p->id = atomic_add(&next_proc_id, 1);
1580 strncpy(&p->name[0], name, SYS_MAX_OS_NAME_LEN-1);
1581 p->name[SYS_MAX_OS_NAME_LEN-1] = 0;
1582 p->num_threads = 0;
1583 p->ioctx = NULL;
1584 p->aspace_id = -1;
1585 p->aspace = NULL;
1586 p->kaspace = vm_get_kernel_aspace();
1587 vm_put_aspace(p->kaspace);
1588 p->thread_list = NULL;
1589 p->main_thread = NULL;
1590 p->state = PROC_STATE_BIRTH;
1591 p->pending_signals = SIG_NONE;
1593 if(arch_proc_init_proc_struct(p, kernel) < 0)
1594 goto error1;
1596 return p;
1598 error1:
1599 kfree(p);
1600 error:
1601 return NULL;
1604 static void delete_proc_struct(struct proc *p)
1606 kfree(p);
1609 static int get_arguments_data_size(char **args,int argc)
1611 int cnt;
1612 int tot_size = 0;
1614 for(cnt = 0; cnt < argc; cnt++)
1615 tot_size += strlen(args[cnt]) + 1;
1616 tot_size += (argc + 1) * sizeof(char *);
1618 return tot_size + sizeof(struct uspace_prog_args_t);
1621 static int proc_create_proc2(void *args)
1623 int err;
1624 struct thread *t;
1625 struct proc *p;
1626 struct proc_arg *pargs = args;
1627 char *path;
1628 addr entry;
1629 char ustack_name[128];
1630 int tot_top_size;
1631 char **uargs;
1632 char *udest;
1633 struct uspace_prog_args_t *uspa;
1634 unsigned int cnt;
1636 t = thread_get_current_thread();
1637 p = t->proc;
1639 dprintf("proc_create_proc2: entry thread %d\n", t->id);
1641 // create an initial primary stack region
1643 tot_top_size = STACK_SIZE + PAGE_ALIGN(get_arguments_data_size(pargs->args,pargs->argc));
1644 t->user_stack_base = ((USER_STACK_REGION - tot_top_size) + USER_STACK_REGION_SIZE);
1645 sprintf(ustack_name, "%s_primary_stack", p->name);
1646 t->user_stack_region_id = vm_create_anonymous_region(p->aspace_id, ustack_name, (void **)&t->user_stack_base,
1647 REGION_ADDR_EXACT_ADDRESS, tot_top_size, REGION_WIRING_LAZY, LOCK_RW);
1648 if(t->user_stack_region_id < 0) {
1649 panic("proc_create_proc2: could not create default user stack region\n");
1650 return t->user_stack_region_id;
1653 uspa = (struct uspace_prog_args_t *)(t->user_stack_base + STACK_SIZE);
1654 uargs = (char **)(uspa + 1);
1655 udest = (char *)(uargs + pargs->argc + 1);
1656 // dprintf("addr: stack base=0x%x uargs = 0x%x udest=0x%x tot_top_size=%d \n\n",t->user_stack_base,uargs,udest,tot_top_size);
1658 for(cnt = 0;cnt < pargs->argc;cnt++){
1659 uargs[cnt] = udest;
1660 user_strcpy(udest, pargs->args[cnt]);
1661 udest += strlen(pargs->args[cnt]) + 1;
1663 uargs[cnt] = NULL;
1665 user_memcpy(uspa->prog_name, p->name, sizeof(uspa->prog_name));
1666 user_memcpy(uspa->prog_path, pargs->path, sizeof(uspa->prog_path));
1667 uspa->argc = cnt;
1668 uspa->argv = uargs;
1669 uspa->envc = 0;
1670 uspa->envp = 0;
1672 if(pargs->args != NULL)
1673 free_arg_list(pargs->args,pargs->argc);
1675 path = pargs->path;
1676 dprintf("proc_create_proc2: loading elf binary '%s'\n", path);
1678 err = elf_load_uspace("/boot/libexec/rld.so", p, 0, &entry);
1679 if(err < 0){
1680 // XXX clean up proc
1681 return err;
1684 // free the args
1685 kfree(pargs->path);
1686 kfree(pargs);
1688 dprintf("proc_create_proc2: loaded elf. entry = 0x%lx\n", entry);
1690 p->state = PROC_STATE_NORMAL;
1692 // jump to the entry point in user space
1693 arch_thread_enter_uspace(entry, uspa, t->user_stack_base + STACK_SIZE);
1695 // never gets here
1696 return 0;
1699 proc_id proc_create_proc(const char *path, const char *name, char **args, int argc, int priority)
1701 struct proc *p;
1702 thread_id tid;
1703 proc_id pid;
1704 int err;
1705 unsigned int state;
1706 int sem_retcode;
1707 struct proc_arg *pargs;
1709 dprintf("proc_create_proc: entry '%s', name '%s' args = %p argc = %d\n", path, name, args, argc);
1711 p = create_proc_struct(name, false);
1712 if(p == NULL)
1713 return ERR_NO_MEMORY;
1715 pid = p->id;
1717 state = int_disable_interrupts();
1718 GRAB_PROC_LOCK();
1719 hash_insert(proc_hash, p);
1720 RELEASE_PROC_LOCK();
1721 int_restore_interrupts(state);
1723 // copy the args over
1724 pargs = kmalloc(sizeof(struct proc_arg));
1725 if(pargs == NULL){
1726 err = ERR_NO_MEMORY;
1727 goto err1;
1729 pargs->path = kstrdup(path);
1730 if(pargs->path == NULL){
1731 err = ERR_NO_MEMORY;
1732 goto err2;
1734 pargs->argc = argc;
1735 pargs->args = args;
1737 // create a new ioctx for this process
1738 p->ioctx = vfs_new_ioctx(thread_get_current_thread()->proc->ioctx);
1739 if(!p->ioctx) {
1740 err = ERR_NO_MEMORY;
1741 goto err3;
1744 // create an address space for this process
1745 p->aspace_id = vm_create_aspace(p->name, USER_BASE, USER_SIZE, false);
1746 if(p->aspace_id < 0) {
1747 err = p->aspace_id;
1748 goto err4;
1750 p->aspace = vm_get_aspace_by_id(p->aspace_id);
1752 // create a kernel thread, but under the context of the new process
1753 tid = thread_create_kernel_thread_etc(name, proc_create_proc2, pargs, p);
1754 if(tid < 0) {
1755 err = tid;
1756 goto err5;
1759 thread_resume_thread(tid);
1761 return pid;
1763 err5:
1764 vm_put_aspace(p->aspace);
1765 vm_delete_aspace(p->aspace_id);
1766 err4:
1767 vfs_free_ioctx(p->ioctx);
1768 err3:
1769 kfree(pargs->path);
1770 err2:
1771 kfree(pargs);
1772 err1:
1773 // remove the proc structure from the proc hash table and delete the proc structure
1774 state = int_disable_interrupts();
1775 GRAB_PROC_LOCK();
1776 hash_remove(proc_hash, p);
1777 RELEASE_PROC_LOCK();
1778 int_restore_interrupts(state);
1779 delete_proc_struct(p);
1780 err:
1781 return err;
1784 proc_id user_proc_create_proc(const char *upath, const char *uname, char **args, int argc, int priority)
1786 char path[SYS_MAX_PATH_LEN];
1787 char name[SYS_MAX_OS_NAME_LEN];
1788 char **kargs;
1789 int rc;
1791 dprintf("user_proc_create_proc : argc=%d \n",argc);
1793 if((addr)upath >= KERNEL_BASE && (addr)upath <= KERNEL_TOP)
1794 return ERR_VM_BAD_USER_MEMORY;
1795 if((addr)uname >= KERNEL_BASE && (addr)uname <= KERNEL_TOP)
1796 return ERR_VM_BAD_USER_MEMORY;
1798 rc = user_copy_arg_list(args, argc, &kargs);
1799 if(rc < 0)
1800 goto error;
1802 rc = user_strncpy(path, upath, SYS_MAX_PATH_LEN-1);
1803 if(rc < 0)
1804 goto error;
1806 path[SYS_MAX_PATH_LEN-1] = 0;
1808 rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
1809 if(rc < 0)
1810 goto error;
1812 name[SYS_MAX_OS_NAME_LEN-1] = 0;
1814 return proc_create_proc(path, name, kargs, argc, priority);
1815 error:
1816 free_arg_list(kargs,argc);
1817 return rc;
1821 // used by PS command and anything else interested in a process list
1822 int user_proc_get_table(struct proc_info *pbuf, size_t len)
1824 struct proc *p;
1825 struct hash_iterator i;
1826 struct proc_info pi;
1827 int state;
1828 int count=0;
1829 int max = (len / sizeof(struct proc_info));
1831 if((addr)pbuf >= KERNEL_BASE && (addr)pbuf <= KERNEL_TOP)
1832 return ERR_VM_BAD_USER_MEMORY;
1834 state = int_disable_interrupts();
1835 GRAB_PROC_LOCK();
1837 hash_open(proc_hash, &i);
1838 while(((p = hash_next(proc_hash, &i)) != NULL) && (count < max)) {
1839 pi.id = p->id;
1840 strcpy(pi.name, p->name);
1841 pi.state = p->state;
1842 pi.num_threads = p->num_threads;
1843 count++;
1844 user_memcpy(pbuf, &pi, sizeof(struct proc_info));
1845 pbuf=pbuf + sizeof(struct proc_info);
1847 hash_close(proc_hash, &i, false);
1849 RELEASE_PROC_LOCK();
1850 int_restore_interrupts(state);
1852 if (count < max)
1853 return count;
1854 else
1855 return ERR_NO_MEMORY;
1859 int proc_kill_proc(proc_id id)
1861 int state;
1862 struct proc *p;
1863 struct thread *t;
1864 thread_id tid = -1;
1865 int retval = 0;
1867 state = int_disable_interrupts();
1868 GRAB_PROC_LOCK();
1870 p = proc_get_proc_struct_locked(id);
1871 if(p != NULL) {
1872 tid = p->main_thread->id;
1873 } else {
1874 retval = ERR_INVALID_HANDLE;
1877 RELEASE_PROC_LOCK();
1878 int_restore_interrupts(state);
1879 if(retval < 0)
1880 return retval;
1882 // just kill the main thread in the process. The cleanup code there will
1883 // take care of the process
1884 return thread_kill_thread(tid);
1887 // sets the pending signal flag on a thread and possibly does some work to wake it up, etc.
1888 // expects the thread lock to be held
1889 static void deliver_signal(struct thread *t, int signal)
1891 // dprintf("deliver_signal: thread %p (%d), signal %d\n", t, t->id, signal);
1892 switch(signal) {
1893 case SIG_KILL:
1894 t->pending_signals |= SIG_KILL;
1895 switch(t->state) {
1896 case THREAD_STATE_SUSPENDED:
1897 t->state = THREAD_STATE_READY;
1898 t->next_state = THREAD_STATE_READY;
1900 thread_enqueue_run_q(t);
1901 break;
1902 case THREAD_STATE_WAITING:
1903 sem_interrupt_thread(t);
1904 break;
1905 default:
1908 break;
1909 default:
1910 t->pending_signals |= signal;
1914 // expects the thread lock to be held
1915 static void _check_for_thread_sigs(struct thread *t, int state)
1917 if(t->pending_signals == SIG_NONE)
1918 return;
1920 if(t->pending_signals & SIG_KILL) {
1921 t->pending_signals &= ~SIG_KILL;
1923 RELEASE_THREAD_LOCK();
1924 int_restore_interrupts(state);
1925 thread_exit(0);
1926 // never gets to here
1928 if(t->pending_signals & SIG_SUSPEND) {
1929 t->pending_signals &= ~SIG_SUSPEND;
1930 t->next_state = THREAD_STATE_SUSPENDED;
1931 // XXX will probably want to delay this
1932 thread_resched();
1936 // called in the int handler code when a thread enters the kernel for any reason
1937 void thread_atkernel_entry(void)
1939 int state;
1940 struct thread *t;
1941 bigtime_t now;
1943 // dprintf("thread_atkernel_entry: entry thread 0x%x\n", t->id);
1945 t = thread_get_current_thread();
1947 state = int_disable_interrupts();
1949 // track user time
1950 now = system_time();
1951 t->user_time += now - t->last_time;
1952 t->last_time = now;
1954 GRAB_THREAD_LOCK();
1956 t->in_kernel = true;
1958 _check_for_thread_sigs(t, state);
1960 RELEASE_THREAD_LOCK();
1961 int_restore_interrupts(state);
1964 // called when a thread exits kernel space to user space
1965 void thread_atkernel_exit(void)
1967 int state;
1968 struct thread *t;
1969 bigtime_t now;
1971 // dprintf("thread_atkernel_exit: entry\n");
1973 t = thread_get_current_thread();
1975 state = int_disable_interrupts();
1976 GRAB_THREAD_LOCK();
1978 _check_for_thread_sigs(t, state);
1980 t->in_kernel = false;
1982 RELEASE_THREAD_LOCK();
1984 // track kernel time
1985 now = system_time();
1986 t->kernel_time += now - t->last_time;
1987 t->last_time = now;
1989 int_restore_interrupts(state);
1992 int user_getrlimit(int resource, struct rlimit * urlp)
1994 int ret;
1995 struct rlimit rl;
1997 if (urlp == NULL) {
1998 return ERR_INVALID_ARGS;
2000 if((addr)urlp >= KERNEL_BASE && (addr)urlp <= KERNEL_TOP) {
2001 return ERR_VM_BAD_USER_MEMORY;
2004 ret = getrlimit(resource, &rl);
2006 if (ret == 0) {
2007 ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2008 if (ret < 0) {
2009 return ret;
2011 return 0;
2014 return ret;
2017 int getrlimit(int resource, struct rlimit * rlp)
2019 if (!rlp) {
2020 return -1;
2023 switch(resource) {
2024 case RLIMIT_NOFILE:
2025 return vfs_getrlimit(resource, rlp);
2027 default:
2028 return -1;
2031 return 0;
2034 int user_setrlimit(int resource, const struct rlimit * urlp)
2036 int err;
2037 struct rlimit rl;
2039 if (urlp == NULL) {
2040 return ERR_INVALID_ARGS;
2042 if((addr)urlp >= KERNEL_BASE && (addr)urlp <= KERNEL_TOP) {
2043 return ERR_VM_BAD_USER_MEMORY;
2046 err = user_memcpy(&rl, urlp, sizeof(struct rlimit));
2047 if (err < 0) {
2048 return err;
2051 return setrlimit(resource, &rl);
2054 int setrlimit(int resource, const struct rlimit * rlp)
2056 if (!rlp) {
2057 return -1;
2060 switch(resource) {
2061 case RLIMIT_NOFILE:
2062 return vfs_setrlimit(resource, rlp);
2064 default:
2065 return -1;
2068 return 0;