Working on a kernel elf loader.
[newos.git] / kernel / thread.c
blobb053545d67a127ce9148e74ef5838e3a0c1592a7
1 /*
2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/debug.h>
7 #include <kernel/console.h>
8 #include <kernel/thread.h>
9 #include <kernel/arch/thread.h>
10 #include <kernel/khash.h>
11 #include <kernel/int.h>
12 #include <kernel/smp.h>
13 #include <kernel/timer.h>
14 #include <kernel/arch/cpu.h>
15 #include <kernel/arch/int.h>
16 #include <kernel/sem.h>
17 #include <kernel/vfs.h>
18 #include <kernel/elf.h>
19 #include <kernel/heap.h>
20 #include <sys/errors.h>
21 #include <boot/stage2.h>
22 #include <libc/string.h>
23 #include <libc/printf.h>
25 struct proc_key {
26 proc_id id;
29 struct thread_key {
30 thread_id id;
33 static struct proc *create_proc_struct(const char *name, bool kernel);
34 static int proc_struct_compare(void *_p, void *_key);
35 static unsigned int proc_struct_hash(void *_p, void *_key, int range);
37 // global
38 spinlock_t thread_spinlock = 0;
40 // proc list
41 static void *proc_hash = NULL;
42 static struct proc *kernel_proc = NULL;
43 static proc_id next_proc_id = 0;
44 static spinlock_t proc_spinlock = 0;
45 // NOTE: PROC lock can be held over a THREAD lock acquisition,
46 // but not the other way (to avoid deadlock)
47 #define GRAB_PROC_LOCK() acquire_spinlock(&proc_spinlock)
48 #define RELEASE_PROC_LOCK() release_spinlock(&proc_spinlock)
50 // scheduling timer
51 #define LOCAL_CPU_TIMER timers[smp_get_current_cpu()]
52 static struct timer_event *timers = NULL;
54 // thread list
55 #define CURR_THREAD cur_thread[smp_get_current_cpu()]
56 static struct thread **cur_thread = NULL;
57 static void *thread_hash = NULL;
58 static thread_id next_thread_id = 0;
60 static sem_id snooze_sem = -1;
62 // death stacks
63 // used temporarily as a thread cleans itself up
64 struct death_stack {
65 region_id rid;
66 addr address;
67 bool in_use;
69 static struct death_stack *death_stacks;
70 static unsigned int num_death_stacks;
71 static unsigned int num_free_death_stacks;
72 static sem_id death_stack_sem;
73 static spinlock_t death_stack_spinlock;
75 // thread queues
76 static struct thread_queue run_q[THREAD_NUM_PRIORITY_LEVELS] = { { NULL, NULL }, };
77 static struct thread_queue dead_q;
79 static int _rand();
80 static void thread_entry(void);
81 static struct thread *thread_get_thread_struct_locked(thread_id id);
82 static struct proc *proc_get_proc_struct(proc_id id);
83 static struct proc *proc_get_proc_struct_locked(proc_id id);
84 static void thread_kthread_exit();
85 static void deliver_signal(struct thread *t, int signal);
87 // insert a thread onto the tail of a queue
88 void thread_enqueue(struct thread *t, struct thread_queue *q)
90 t->q_next = NULL;
91 if(q->head == NULL) {
92 q->head = t;
93 q->tail = t;
94 } else {
95 q->tail->q_next = t;
96 q->tail = t;
100 struct thread *thread_lookat_queue(struct thread_queue *q)
102 return q->head;
105 struct thread *thread_dequeue(struct thread_queue *q)
107 struct thread *t;
109 t = q->head;
110 if(t != NULL) {
111 q->head = t->q_next;
112 if(q->tail == t)
113 q->tail = NULL;
115 return t;
118 struct thread *thread_dequeue_id(struct thread_queue *q, thread_id thr_id)
120 struct thread *t;
121 struct thread *last = NULL;
123 t = q->head;
124 while(t != NULL) {
125 if(t->id == thr_id) {
126 if(last == NULL) {
127 q->head = t->q_next;
128 } else {
129 last->q_next = t->q_next;
131 if(q->tail == t)
132 q->tail = last;
133 break;
135 last = t;
136 t = t->q_next;
138 return t;
141 struct thread *thread_lookat_run_q(int priority)
143 return thread_lookat_queue(&run_q[priority]);
146 void thread_enqueue_run_q(struct thread *t)
148 // these shouldn't exist
149 if(t->priority > THREAD_MAX_PRIORITY)
150 t->priority = THREAD_MAX_PRIORITY;
151 if(t->priority < 0)
152 t->priority = 0;
154 thread_enqueue(t, &run_q[t->priority]);
157 struct thread *thread_dequeue_run_q(int priority)
159 return thread_dequeue(&run_q[priority]);
162 static void insert_thread_into_proc(struct proc *p, struct thread *t)
164 t->proc_next = p->thread_list;
165 p->thread_list = t;
166 p->num_threads++;
167 if(p->num_threads == 1) {
168 // this was the first thread
169 p->main_thread = t;
171 t->proc = p;
174 static void remove_thread_from_proc(struct proc *p, struct thread *t)
176 struct thread *temp, *last = NULL;
178 for(temp = p->thread_list; temp != NULL; temp = temp->proc_next) {
179 if(temp == t) {
180 if(last == NULL) {
181 p->thread_list = temp->proc_next;
182 } else {
183 last->proc_next = temp->proc_next;
185 p->num_threads--;
186 break;
188 last = temp;
192 static int thread_struct_compare(void *_t, void *_key)
194 struct thread *t = _t;
195 struct thread_key *key = _key;
197 if(t->id == key->id) return 0;
198 else return 1;
201 static unsigned int thread_struct_hash(void *_t, void *_key, int range)
203 struct thread *t = _t;
204 struct thread_key *key = _key;
206 if(t != NULL)
207 return (t->id % range);
208 else
209 return (key->id % range);
212 static struct thread *create_thread_struct(const char *name)
214 struct thread *t;
215 int state;
217 state = int_disable_interrupts();
218 GRAB_THREAD_LOCK();
219 t = thread_dequeue(&dead_q);
220 RELEASE_THREAD_LOCK();
221 int_restore_interrupts(state);
223 if(t == NULL) {
224 t = (struct thread *)kmalloc(sizeof(struct thread));
225 if(t == NULL)
226 goto err;
228 t->name = (char *)kmalloc(strlen(name) + 1);
229 if(t->name == NULL)
230 goto err1;
231 strcpy(t->name, name);
232 t->id = atomic_add(&next_thread_id, 1);
233 t->proc = NULL;
234 t->sem_blocking = -1;
235 t->fault_handler = 0;
236 t->kernel_stack_region_id = -1;
237 t->kernel_stack_base = 0;
238 t->user_stack_region_id = -1;
239 t->user_stack_base = 0;
240 t->proc_next = NULL;
241 t->q_next = NULL;
242 t->priority = -1;
243 t->args = NULL;
244 t->pending_signals = SIG_NONE;
245 t->in_kernel = true;
247 char temp[64];
249 sprintf(temp, "thread_0x%x_retcode_sem", t->id);
250 t->return_code_sem = sem_create(0, temp);
251 if(t->return_code_sem < 0)
252 goto err2;
255 if(arch_thread_init_thread_struct(t) < 0)
256 goto err3;
258 return t;
260 err3:
261 sem_delete_etc(t->return_code_sem, -1);
262 err2:
263 kfree(t->name);
264 err1:
265 kfree(t);
266 err:
267 return NULL;
270 static int _create_user_thread_kentry(void)
272 struct thread *t;
274 t = thread_get_current_thread();
276 // a signal may have been delivered here
277 thread_atkernel_exit();
279 // jump to the entry point in user space
280 arch_thread_enter_uspace((addr)t->args, t->user_stack_base + STACK_SIZE);
282 // never get here
283 return 0;
286 static thread_id _create_thread(const char *name, proc_id pid, int priority, addr entry, bool kernel)
288 struct thread *t;
289 struct proc *p;
290 int state;
291 char stack_name[64];
292 bool abort = false;
294 t = create_thread_struct(name);
295 if(t == NULL)
296 return ERR_NO_MEMORY;
298 t->priority = priority;
299 t->state = THREAD_STATE_BIRTH;
300 t->next_state = THREAD_STATE_SUSPENDED;
302 state = int_disable_interrupts();
303 GRAB_THREAD_LOCK();
305 // insert into global list
306 hash_insert(thread_hash, t);
307 RELEASE_THREAD_LOCK();
309 GRAB_PROC_LOCK();
310 // look at the proc, make sure it's not being deleted
311 p = proc_get_proc_struct_locked(pid);
312 if(p != NULL && p->state != PROC_STATE_DEATH) {
313 insert_thread_into_proc(p, t);
314 } else {
315 abort = true;
317 RELEASE_PROC_LOCK();
318 if(abort) {
319 GRAB_THREAD_LOCK();
320 hash_remove(thread_hash, t);
321 RELEASE_THREAD_LOCK();
323 int_restore_interrupts(state);
324 if(abort) {
325 kfree(t->name);
326 kfree(t);
327 return ERR_TASK_PROC_DELETED;
330 sprintf(stack_name, "%s_kstack", name);
331 t->kernel_stack_region_id = vm_create_anonymous_region(vm_get_kernel_aspace_id(), stack_name,
332 (void **)&t->kernel_stack_base, REGION_ADDR_ANY_ADDRESS, KSTACK_SIZE,
333 REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
334 if(t->kernel_stack_region_id < 0)
335 panic("_create_thread: error creating kernel stack!\n");
337 if(kernel) {
338 // this sets up an initial kthread stack that runs the entry
339 arch_thread_initialize_kthread_stack(t, (void *)entry, &thread_entry, &thread_kthread_exit);
340 } else {
341 // create user stack
342 // XXX make this better. For now just keep trying to create a stack
343 // until we find a spot.
344 t->user_stack_base = (USER_STACK_REGION - STACK_SIZE) + USER_STACK_REGION_SIZE;
345 while(t->user_stack_base > USER_STACK_REGION) {
346 sprintf(stack_name, "%s_stack%d", p->name, t->id);
347 t->user_stack_region_id = vm_create_anonymous_region(p->aspace_id, stack_name,
348 (void **)&t->user_stack_base,
349 REGION_ADDR_ANY_ADDRESS, STACK_SIZE, REGION_WIRING_LAZY, LOCK_RW);
350 if(t->user_stack_region_id < 0) {
351 t->user_stack_base -= STACK_SIZE;
352 } else {
353 // we created a region
354 break;
357 if(t->user_stack_region_id < 0)
358 panic("_create_thread: unable to create user stack!\n");
360 // copy the user entry over to the args field in the thread struct
361 // the function this will call will immediately switch the thread into
362 // user space.
363 t->args = (void *)entry;
364 arch_thread_initialize_kthread_stack(t, &_create_user_thread_kentry, &thread_entry, &thread_kthread_exit);
367 t->state = THREAD_STATE_SUSPENDED;
369 return t->id;
372 thread_id user_thread_create_user_thread(char *uname, proc_id pid, int priority, addr entry)
374 char name[SYS_MAX_OS_NAME_LEN];
375 int rc;
377 if((addr)uname >= KERNEL_BASE && (addr)uname <= KERNEL_TOP)
378 return ERR_VM_BAD_USER_MEMORY;
379 if(entry >= KERNEL_BASE && entry <= KERNEL_TOP)
380 return ERR_VM_BAD_USER_MEMORY;
382 rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
383 if(rc < 0)
384 return rc;
385 name[SYS_MAX_OS_NAME_LEN-1] = 0;
387 return thread_create_user_thread(name, pid, priority, entry);
390 thread_id thread_create_user_thread(char *name, proc_id pid, int priority, addr entry)
392 return _create_thread(name, pid, priority, entry, false);
395 thread_id thread_create_kernel_thread(const char *name, int (*func)(void), int priority)
397 return _create_thread(name, proc_get_kernel_proc()->id, priority, (addr)func, true);
400 static thread_id thread_create_kernel_thread_etc(const char *name, int (*func)(void), int priority, struct proc *p)
402 return _create_thread(name, p->id, priority, (addr)func, true);
405 int thread_suspend_thread(thread_id id)
407 int state;
408 struct thread *t;
409 int retval;
410 bool global_resched = false;
412 state = int_disable_interrupts();
413 GRAB_THREAD_LOCK();
415 if(CURR_THREAD->id == id) {
416 t = CURR_THREAD;
417 } else {
418 t = thread_get_thread_struct_locked(id);
421 if(t != NULL) {
422 if(t->proc == kernel_proc) {
423 // no way
424 retval = ERR_NOT_ALLOWED;
425 } else if(t->in_kernel == true) {
426 t->pending_signals |= SIG_SUSPEND;
427 } else {
428 t->next_state = THREAD_STATE_SUSPENDED;
429 global_resched = true;
431 retval = NO_ERROR;
432 } else {
433 retval = ERR_INVALID_HANDLE;
436 RELEASE_THREAD_LOCK();
437 int_restore_interrupts(state);
439 if(global_resched) {
440 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
443 return retval;
446 int thread_resume_thread(thread_id id)
448 int state;
449 struct thread *t;
450 int retval;
452 state = int_disable_interrupts();
453 GRAB_THREAD_LOCK();
455 t = thread_get_thread_struct_locked(id);
456 if(t != NULL && t->state == THREAD_STATE_SUSPENDED) {
457 t->state = THREAD_STATE_READY;
458 t->next_state = THREAD_STATE_READY;
460 thread_enqueue_run_q(t);
461 retval = NO_ERROR;
462 } else {
463 retval = ERR_INVALID_HANDLE;
466 RELEASE_THREAD_LOCK();
467 int_restore_interrupts(state);
469 return retval;
472 static void _dump_proc_info(struct proc *p)
474 dprintf("PROC: 0x%x\n", p);
475 dprintf("id: 0x%x\n", p->id);
476 dprintf("name: '%s'\n", p->name);
477 dprintf("next: 0x%x\n", p->next);
478 dprintf("num_threads: %d\n", p->num_threads);
479 dprintf("state: %d\n", p->state);
480 dprintf("pending_signals: 0x%x\n", p->pending_signals);
481 dprintf("ioctx: 0x%x\n", p->ioctx);
482 dprintf("args: 0x%x\n", p->args);
483 dprintf("proc_creation_sem: 0x%x\n", p->proc_creation_sem);
484 dprintf("aspace_id: 0x%x\n", p->aspace_id);
485 dprintf("aspace: 0x%x\n", p->aspace);
486 dprintf("kaspace: 0x%x\n", p->kaspace);
487 dprintf("main_thread: 0x%x\n", p->main_thread);
488 dprintf("thread_list: 0x%x\n", p->thread_list);
491 static void dump_proc_info(int argc, char **argv)
493 struct proc *p;
494 int id = -1;
495 unsigned long num;
496 struct hash_iterator i;
498 if(argc < 2) {
499 dprintf("proc: not enough arguments\n");
500 return;
503 // if the argument looks like a hex number, treat it as such
504 if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
505 num = atoul(argv[1]);
506 if(num > vm_get_kernel_aspace()->virtual_map.base) {
507 // XXX semi-hack
508 _dump_proc_info((struct proc*)num);
509 return;
510 } else {
511 id = num;
515 // walk through the thread list, trying to match name or id
516 hash_open(proc_hash, &i);
517 while((p = hash_next(proc_hash, &i)) != NULL) {
518 if((p->name && strcmp(argv[1], p->name) == 0) || p->id == id) {
519 _dump_proc_info(p);
520 break;
523 hash_close(proc_hash, &i, false);
527 static const char *state_to_text(int state)
529 switch(state) {
530 case THREAD_STATE_READY:
531 return "READY";
532 case THREAD_STATE_RUNNING:
533 return "RUNNING";
534 case THREAD_STATE_WAITING:
535 return "WAITING";
536 case THREAD_STATE_SUSPENDED:
537 return "SUSPEND";
538 case THREAD_STATE_FREE_ON_RESCHED:
539 return "DEATH";
540 case THREAD_STATE_BIRTH:
541 return "BIRTH";
542 default:
543 return "UNKNOWN";
547 static struct thread *last_thread_dumped = NULL;
549 static void _dump_thread_info(struct thread *t)
551 dprintf("THREAD: 0x%x\n", t);
552 dprintf("id: 0x%x\n", t->id);
553 dprintf("name: '%s'\n", t->name);
554 dprintf("all_next: 0x%x\nproc_next: 0x%x\nq_next: 0x%x\n",
555 t->all_next, t->proc_next, t->q_next);
556 dprintf("priority: 0x%x\n", t->priority);
557 dprintf("state: %s\n", state_to_text(t->state));
558 dprintf("next_state: %s\n", state_to_text(t->next_state));
559 dprintf("pending_signals: 0x%x\n", t->pending_signals);
560 dprintf("in_kernel: %d\n", t->in_kernel);
561 dprintf("sem_blocking:0x%x\n", t->sem_blocking);
562 dprintf("sem_count: 0x%x\n", t->sem_count);
563 dprintf("sem_deleted_retcode: 0x%x\n", t->sem_deleted_retcode);
564 dprintf("sem_errcode: 0x%x\n", t->sem_errcode);
565 dprintf("args: 0x%x\n", t->args);
566 dprintf("proc: 0x%x\n", t->proc);
567 dprintf("return_code_sem: 0x%x\n", t->return_code_sem);
568 dprintf("kernel_stack_region_id: 0x%x\n", t->kernel_stack_region_id);
569 dprintf("kernel_stack_base: 0x%x\n", t->kernel_stack_base);
570 dprintf("user_stack_region_id: 0x%x\n", t->user_stack_region_id);
571 dprintf("user_stack_base: 0x%x\n", t->user_stack_base);
572 dprintf("architecture dependant section:\n");
573 arch_thread_dump_info(&t->arch_info);
575 last_thread_dumped = t;
578 static void dump_thread_info(int argc, char **argv)
580 struct thread *t;
581 int id = -1;
582 unsigned long num;
583 struct hash_iterator i;
585 if(argc < 2) {
586 dprintf("thread: not enough arguments\n");
587 return;
590 // if the argument looks like a hex number, treat it as such
591 if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
592 num = atoul(argv[1]);
593 if(num > vm_get_kernel_aspace()->virtual_map.base) {
594 // XXX semi-hack
595 _dump_thread_info((struct thread *)num);
596 return;
597 } else {
598 id = num;
602 // walk through the thread list, trying to match name or id
603 hash_open(thread_hash, &i);
604 while((t = hash_next(thread_hash, &i)) != NULL) {
605 if((t->name && strcmp(argv[1], t->name) == 0) || t->id == id) {
606 _dump_thread_info(t);
607 break;
610 hash_close(thread_hash, &i, false);
613 static void dump_thread_list(int argc, char **argv)
615 struct thread *t;
616 struct hash_iterator i;
618 hash_open(thread_hash, &i);
619 while((t = hash_next(thread_hash, &i)) != NULL) {
620 dprintf("0x%x", t);
621 if(t->name != NULL)
622 dprintf("\t%32s", t->name);
623 else
624 dprintf("\t%32s", "<NULL>");
625 dprintf("\t0x%x", t->id);
626 dprintf("\t%16s", state_to_text(t->state));
627 dprintf("\t0x%x\n", t->kernel_stack_base);
629 hash_close(thread_hash, &i, false);
632 static void dump_next_thread_in_q(int argc, char **argv)
634 struct thread *t = last_thread_dumped;
636 if(t == NULL) {
637 dprintf("no thread previously dumped. Examine a thread first.\n");
638 return;
641 dprintf("next thread in queue after thread @ 0x%x\n", t);
642 if(t->q_next != NULL) {
643 _dump_thread_info(t->q_next);
644 } else {
645 dprintf("NULL\n");
649 static void dump_next_thread_in_all_list(int argc, char **argv)
651 struct thread *t = last_thread_dumped;
653 if(t == NULL) {
654 dprintf("no thread previously dumped. Examine a thread first.\n");
655 return;
658 dprintf("next thread in global list after thread @ 0x%x\n", t);
659 if(t->all_next != NULL) {
660 _dump_thread_info(t->all_next);
661 } else {
662 dprintf("NULL\n");
666 static void dump_next_thread_in_proc(int argc, char **argv)
668 struct thread *t = last_thread_dumped;
670 if(t == NULL) {
671 dprintf("no thread previously dumped. Examine a thread first.\n");
672 return;
675 dprintf("next thread in proc after thread @ 0x%x\n", t);
676 if(t->proc_next != NULL) {
677 _dump_thread_info(t->proc_next);
678 } else {
679 dprintf("NULL\n");
683 int get_death_stack(void)
685 unsigned int i;
686 int state;
688 sem_acquire(death_stack_sem, 1);
690 // grab the thread lock around the search for a death stack to make sure it doesn't
691 // find a death stack that has been returned by a thread that still hasn't been
692 // rescheduled for the last time. Localized hack here and put_death_stack_and_reschedule.
693 state = int_disable_interrupts();
694 acquire_spinlock(&death_stack_spinlock);
695 GRAB_THREAD_LOCK();
696 release_spinlock(&death_stack_spinlock);
698 for(i=0; i<num_death_stacks; i++) {
699 if(death_stacks[i].in_use == false) {
700 death_stacks[i].in_use = true;
701 break;
705 RELEASE_THREAD_LOCK();
706 int_restore_interrupts(state);
708 if(i >= num_death_stacks) {
709 panic("get_death_stack: couldn't find free stack!\n");
712 dprintf("get_death_stack: returning 0x%x\n", death_stacks[i].address);
714 return i;
717 static void put_death_stack_and_reschedule(unsigned int index)
719 dprintf("put_death_stack...: passed %d\n", index);
721 if(index >= num_death_stacks || death_stacks[index].in_use == false)
722 panic("put_death_stack_and_reschedule: passed invalid stack index %d\n", index);
723 death_stacks[index].in_use = false;
725 // disable the interrupts around the semaphore release to prevent the get_death_stack
726 // function from allocating this stack before the reschedule. Kind of a hack, but localized
727 // not an easy way around it.
728 int_disable_interrupts();
730 acquire_spinlock(&death_stack_spinlock);
731 sem_release_etc(death_stack_sem, 1, SEM_FLAG_NO_RESCHED);
733 GRAB_THREAD_LOCK();
734 release_spinlock(&death_stack_spinlock);
736 thread_resched();
739 int thread_init(kernel_args *ka)
741 struct thread *t;
742 unsigned int i;
744 dprintf("thread_init: entry\n");
746 // create the process hash table
747 proc_hash = hash_init(15, (addr)&kernel_proc->next - (addr)kernel_proc,
748 &proc_struct_compare, &proc_struct_hash);
750 // create the kernel process
751 kernel_proc = create_proc_struct("kernel_proc", true);
752 if(kernel_proc == NULL)
753 panic("could not create kernel proc!\n");
754 kernel_proc->state = PROC_STATE_NORMAL;
756 kernel_proc->ioctx = vfs_new_ioctx();
757 if(kernel_proc->ioctx == NULL)
758 panic("could not create ioctx for kernel proc!\n");
760 // stick it in the process hash
761 hash_insert(proc_hash, kernel_proc);
763 // create the thread hash table
764 thread_hash = hash_init(15, (addr)&t->all_next - (addr)t,
765 &thread_struct_compare, &thread_struct_hash);
767 // zero out the run queues
768 memset(run_q, 0, sizeof(run_q));
770 // zero out the dead thread structure q
771 memset(&dead_q, 0, sizeof(dead_q));
773 // allocate as many CUR_THREAD slots as there are cpus
774 cur_thread = (struct thread **)kmalloc(sizeof(struct thread *) * smp_get_num_cpus());
775 if(cur_thread == NULL) {
776 panic("error allocating cur_thread slots\n");
777 return ERR_NO_MEMORY;
779 memset(cur_thread, 0, sizeof(struct thread *) * smp_get_num_cpus());
781 // allocate a timer structure per cpu
782 timers = (struct timer_event *)kmalloc(sizeof(struct timer_event) * smp_get_num_cpus());
783 if(timers == NULL) {
784 panic("error allocating scheduling timers\n");
785 return ERR_NO_MEMORY;
787 memset(timers, 0, sizeof(struct timer_event) * smp_get_num_cpus());
789 // allocate a snooze sem
790 snooze_sem = sem_create(0, "snooze sem");
791 if(snooze_sem < 0) {
792 panic("error creating snooze sem\n");
793 return snooze_sem;
796 // create an idle thread for each cpu
797 for(i=0; i<ka->num_cpus; i++) {
798 char temp[64];
800 sprintf(temp, "idle_thread%d", i);
801 t = create_thread_struct(temp);
802 if(t == NULL) {
803 panic("error creating idle thread struct\n");
804 return ERR_NO_MEMORY;
806 t->proc = proc_get_kernel_proc();
807 t->priority = THREAD_IDLE_PRIORITY;
808 t->state = THREAD_STATE_RUNNING;
809 t->next_state = THREAD_STATE_READY;
810 sprintf(temp, "idle_thread%d_kstack", i);
811 t->kernel_stack_region_id = vm_find_region_by_name(vm_get_kernel_aspace_id(), temp);
812 hash_insert(thread_hash, t);
813 insert_thread_into_proc(t->proc, t);
814 cur_thread[i] = t;
817 // create a set of death stacks
818 num_death_stacks = smp_get_num_cpus();
819 num_free_death_stacks = smp_get_num_cpus();
820 death_stacks = (struct death_stack *)kmalloc(num_death_stacks * sizeof(struct death_stack));
821 if(death_stacks == NULL) {
822 panic("error creating death stacks\n");
823 return ERR_NO_MEMORY;
826 char temp[64];
828 for(i=0; i<num_death_stacks; i++) {
829 sprintf(temp, "death_stack%d", i);
830 death_stacks[i].rid = vm_create_anonymous_region(vm_get_kernel_aspace_id(), temp,
831 (void **)&death_stacks[i].address,
832 REGION_ADDR_ANY_ADDRESS, KSTACK_SIZE, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
833 if(death_stacks[i].rid < 0) {
834 panic("error creating death stacks\n");
835 return death_stacks[i].rid;
837 death_stacks[i].in_use = false;
840 death_stack_sem = sem_create(num_death_stacks, "death_stack_noavail_sem");
841 death_stack_spinlock = 0;
843 // set up some debugger commands
844 dbg_add_command(dump_thread_list, "threads", "list all threads");
845 dbg_add_command(dump_thread_info, "thread", "list info about a particular thread");
846 dbg_add_command(dump_next_thread_in_q, "next_q", "dump the next thread in the queue of last thread viewed");
847 dbg_add_command(dump_next_thread_in_all_list, "next_all", "dump the next thread in the global list of the last thread viewed");
848 dbg_add_command(dump_next_thread_in_proc, "next_proc", "dump the next thread in the process of the last thread viewed");
849 dbg_add_command(dump_proc_info, "proc", "list info about a particular process");
851 return 0;
854 // this starts the scheduler. Must be run under the context of
855 // the initial idle thread.
856 void thread_start_threading()
858 int state;
860 // XXX may not be the best place for this
861 // invalidate all of the other processors' TLB caches
862 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
863 arch_cpu_global_TLB_invalidate();
865 // start the other processors
866 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
868 state = int_disable_interrupts();
869 GRAB_THREAD_LOCK();
871 thread_resched();
873 RELEASE_THREAD_LOCK();
874 int_restore_interrupts(state);
877 void thread_snooze(time_t time)
879 sem_acquire_etc(snooze_sem, 1, SEM_FLAG_TIMEOUT, time, NULL);
882 // this function gets run by a new thread before anything else
883 static void thread_entry(void)
885 // simulates the thread spinlock release that would occur if the thread had been
886 // rescheded from. The resched didn't happen because the thread is new.
887 RELEASE_THREAD_LOCK();
888 int_enable_interrupts(); // this essentially simulates a return-from-interrupt
891 // used to pass messages between thread_exit and thread_exit2
892 struct thread_exit_args {
893 struct thread *t;
894 region_id old_kernel_stack;
895 int int_state;
896 unsigned int death_stack;
899 static void thread_exit2(void *_args)
901 struct thread_exit_args args;
902 char *temp;
904 // copy the arguments over, since the source is probably on the kernel stack we're about to delete
905 memcpy(&args, _args, sizeof(struct thread_exit_args));
907 // restore the interrupts
908 int_restore_interrupts(args.int_state);
910 dprintf("thread_exit2, running on death stack 0x%x\n", args.t->kernel_stack_base);
912 // delete the old kernel stack region
913 dprintf("thread_exit2: deleting old kernel stack id 0x%x for thread 0x%x\n", args.old_kernel_stack, args.t->id);
914 vm_delete_region(vm_get_kernel_aspace_id(), args.old_kernel_stack);
916 dprintf("thread_exit2: freeing name for thid 0x%x\n", args.t->id);
918 // delete the name
919 temp = args.t->name;
920 args.t->name = NULL;
921 if(temp != NULL)
922 kfree(temp);
924 dprintf("thread_exit2: removing thread 0x%x from global lists\n", args.t->id);
926 // remove this thread from all of the global lists
927 int_disable_interrupts();
928 GRAB_PROC_LOCK();
929 remove_thread_from_proc(kernel_proc, args.t);
930 RELEASE_PROC_LOCK();
931 GRAB_THREAD_LOCK();
932 hash_remove(thread_hash, args.t);
933 RELEASE_THREAD_LOCK();
935 dprintf("thread_exit2: done removing thread from lists\n");
937 // set the next state to be gone. Will return the thread structure to a ready pool upon reschedule
938 args.t->next_state = THREAD_STATE_FREE_ON_RESCHED;
940 // return the death stack and reschedule one last time
941 put_death_stack_and_reschedule(args.death_stack);
942 // never get to here
943 panic("thread_exit2: made it where it shouldn't have!\n");
946 void thread_exit(int retcode)
948 int state;
949 struct thread *t = CURR_THREAD;
950 struct proc *p = t->proc;
951 bool delete_proc = false;
952 unsigned int death_stack;
954 dprintf("thread 0x%x exiting w/return code 0x%x\n", t->id, retcode);
956 // delete the user stack region first
957 if(p->aspace_id >= 0 && t->user_stack_region_id >= 0) {
958 region_id rid = t->user_stack_region_id;
959 t->user_stack_region_id = -1;
960 vm_delete_region(p->aspace_id, rid);
963 if(p != kernel_proc) {
964 // remove this thread from the current process and add it to the kernel
965 // put the thread into the kernel proc until it dies
966 state = int_disable_interrupts();
967 GRAB_PROC_LOCK();
968 remove_thread_from_proc(p, t);
969 t->proc = kernel_proc;
970 insert_thread_into_proc(kernel_proc, t);
971 if(p->main_thread == t) {
972 // this was main thread in this process
973 delete_proc = true;
974 hash_remove(proc_hash, p);
975 p->state = PROC_STATE_DEATH;
977 RELEASE_PROC_LOCK();
978 GRAB_THREAD_LOCK();
979 // reschedule, thus making sure this thread is running in the context of the kernel
980 thread_resched();
981 RELEASE_THREAD_LOCK();
982 int_restore_interrupts(state);
984 dprintf("thread_exit: thread 0x%x now a kernel thread!\n", t->id);
987 // delete the process
988 if(delete_proc) {
989 if(p->num_threads > 0) {
990 // there are other threads still in this process,
991 // cycle through and signal kill on each of the threads
992 // XXX this can be optimized. There's got to be a better solution.
993 struct thread *temp_thread;
995 state = int_disable_interrupts();
996 GRAB_PROC_LOCK();
997 // we can safely walk the list because of the lock. no new threads can be created
998 // because of the PROC_STATE_DEATH flag on the process
999 for(temp_thread = p->thread_list; temp_thread; temp_thread = temp_thread->proc_next) {
1000 thread_kill_thread_nowait(temp_thread->id);
1002 RELEASE_PROC_LOCK();
1003 int_restore_interrupts(state);
1005 // Now wait for all of the threads to die
1006 // XXX block on a semaphore
1007 while((volatile int)p->num_threads > 0) {
1008 thread_snooze(10000); // 10 ms
1011 vm_delete_aspace(p->aspace_id);
1012 vfs_free_ioctx(p->ioctx);
1013 kfree(p);
1016 // delete the sem that others will use to wait on us and get the retcode
1018 sem_id s = t->return_code_sem;
1020 t->return_code_sem = -1;
1021 sem_delete_etc(s, retcode);
1024 death_stack = get_death_stack();
1026 struct thread_exit_args args;
1028 args.t = t;
1029 args.old_kernel_stack = t->kernel_stack_region_id;
1030 args.death_stack = death_stack;
1032 // disable the interrupts. Must remain disabled until the kernel stack pointer can be officially switched
1033 args.int_state = int_disable_interrupts();
1035 // set the new kernel stack officially to the death stack, wont be really switched until
1036 // the next function is called. This bookkeeping must be done now before a context switch
1037 // happens, or the processor will interrupt to the old stack
1038 t->kernel_stack_region_id = death_stacks[death_stack].rid;
1039 t->kernel_stack_base = death_stacks[death_stack].address;
1041 // we will continue in thread_exit2(), on the new stack
1042 arch_thread_switch_kstack_and_call(t, t->kernel_stack_base + KSTACK_SIZE, thread_exit2, &args);
1045 panic("never can get here\n");
1048 static int _thread_kill_thread(thread_id id, bool wait_on)
1050 int state;
1051 struct thread *t;
1052 int rc;
1054 dprintf("_thread_kill_thread: id %d, wait_on %d\n", id, wait_on);
1056 state = int_disable_interrupts();
1057 GRAB_THREAD_LOCK();
1059 t = thread_get_thread_struct_locked(id);
1060 if(t != NULL) {
1061 if(t->proc == kernel_proc) {
1062 // can't touch this
1063 rc = ERR_NOT_ALLOWED;
1064 } else {
1065 deliver_signal(t, SIG_KILL);
1066 rc = NO_ERROR;
1067 if(t->id == CURR_THREAD->id)
1068 wait_on = false; // can't wait on ourself
1070 } else {
1071 rc = ERR_INVALID_HANDLE;
1074 RELEASE_THREAD_LOCK();
1075 int_restore_interrupts(state);
1076 if(rc < 0)
1077 return rc;
1079 if(wait_on)
1080 thread_wait_on_thread(id, NULL);
1082 return rc;
1085 int thread_kill_thread(thread_id id)
1087 return _thread_kill_thread(id, true);
1090 int thread_kill_thread_nowait(thread_id id)
1092 return _thread_kill_thread(id, false);
1095 static void thread_kthread_exit()
1097 thread_exit(0);
1100 int user_thread_wait_on_thread(thread_id id, int *uretcode)
1102 int retcode;
1103 int rc, rc2;
1105 if((addr)uretcode >= KERNEL_BASE && (addr)uretcode <= KERNEL_TOP)
1106 return ERR_VM_BAD_USER_MEMORY;
1108 rc = thread_wait_on_thread(id, &retcode);
1109 if(rc < 0)
1110 return rc;
1112 rc2 = user_memcpy(uretcode, &retcode, sizeof(retcode));
1113 if(rc2 < 0)
1114 return rc2;
1116 return rc;
1119 int thread_wait_on_thread(thread_id id, int *retcode)
1121 sem_id sem;
1122 int state;
1123 struct thread *t;
1125 state = int_disable_interrupts();
1126 GRAB_THREAD_LOCK();
1128 t = thread_get_thread_struct_locked(id);
1129 if(t != NULL) {
1130 sem = t->return_code_sem;
1131 } else {
1132 sem = ERR_INVALID_HANDLE;
1135 RELEASE_THREAD_LOCK();
1136 int_restore_interrupts(state);
1138 return sem_acquire_etc(sem, 1, 0, 0, retcode);
1141 int user_proc_wait_on_proc(proc_id id, int *uretcode)
1143 int retcode;
1144 int rc, rc2;
1146 if((addr)uretcode >= KERNEL_BASE && (addr)uretcode <= KERNEL_TOP)
1147 return ERR_VM_BAD_USER_MEMORY;
1149 rc = proc_wait_on_proc(id, &retcode);
1150 if(rc < 0)
1151 return rc;
1153 rc2 = user_memcpy(uretcode, &retcode, sizeof(retcode));
1154 if(rc2 < 0)
1155 return rc2;
1157 return rc;
1160 int proc_wait_on_proc(proc_id id, int *retcode)
1162 struct proc *p;
1163 thread_id tid;
1164 int state;
1166 state = int_disable_interrupts();
1167 GRAB_PROC_LOCK();
1168 p = proc_get_proc_struct_locked(id);
1169 if(p && p->main_thread) {
1170 tid = p->main_thread->id;
1171 } else {
1172 tid = ERR_INVALID_HANDLE;
1174 RELEASE_PROC_LOCK();
1175 int_restore_interrupts(state);
1177 return thread_wait_on_thread(tid, retcode);
1180 thread_id thread_get_current_thread_id()
1182 if(cur_thread == NULL)
1183 return 0;
1184 return CURR_THREAD->id;
1187 struct thread *thread_get_current_thread()
1189 if(cur_thread == NULL)
1190 return 0;
1191 return CURR_THREAD;
1194 struct thread *thread_get_thread_struct(thread_id id)
1196 struct thread *t;
1197 int state;
1199 state = int_disable_interrupts();
1200 GRAB_THREAD_LOCK();
1202 t = thread_get_thread_struct_locked(id);
1204 RELEASE_THREAD_LOCK();
1205 int_restore_interrupts(state);
1207 return t;
1210 static struct thread *thread_get_thread_struct_locked(thread_id id)
1212 struct thread_key key;
1214 key.id = id;
1216 return hash_lookup(thread_hash, &key);
1219 static struct proc *proc_get_proc_struct(proc_id id)
1221 struct proc *p;
1222 int state;
1224 state = int_disable_interrupts();
1225 GRAB_PROC_LOCK();
1227 p = proc_get_proc_struct_locked(id);
1229 RELEASE_PROC_LOCK();
1230 int_restore_interrupts(state);
1232 return p;
1235 static struct proc *proc_get_proc_struct_locked(proc_id id)
1237 struct proc_key key;
1239 key.id = id;
1241 return hash_lookup(proc_hash, &key);
1244 static void thread_context_switch(struct thread *t_from, struct thread *t_to)
1246 arch_thread_context_switch(t_from, t_to);
1249 #define NUM_TEST_THREADS 16
1250 /* thread TEST code */
1251 static sem_id thread_test_sems[NUM_TEST_THREADS];
1252 static thread_id thread_test_first_thid;
1254 int test_thread_starter_thread()
1256 thread_snooze(1000000); // wait a second
1258 // start the chain of threads by releasing one of them
1259 sem_release(thread_test_sems[0], 1);
1261 return 0;
1264 int test_thread5()
1266 int fd;
1268 fd = sys_open("/bus/pci", STREAM_TYPE_DEVICE, 0);
1269 if(fd < 0) {
1270 dprintf("test_thread5: error opening /bus/pci\n");
1271 return 1;
1274 sys_ioctl(fd, 99, NULL, 0);
1276 return 0;
1279 int test_thread4()
1281 proc_id pid;
1283 pid = proc_create_proc("/boot/testapp", "testapp", 5);
1284 if(pid < 0)
1285 return -1;
1287 dprintf("test_thread4: finished created new process\n");
1289 thread_snooze(1000000);
1291 // kill the process
1292 // proc_kill_proc(pid);
1294 return 0;
1297 int test_thread3()
1299 int fd;
1300 char buf[1024];
1301 ssize_t len;
1303 kprintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
1304 fd = sys_open("/boot/testfile", STREAM_TYPE_FILE, 0);
1305 if(fd < 0)
1306 panic("could not open /boot/testfile\n");
1307 len = sys_read(fd, buf, 0, sizeof(buf));
1308 sys_write(0, buf, 0, len);
1309 sys_close(fd);
1311 return 0;
1314 int test_thread2()
1316 while(1) {
1317 char str[65];
1318 ssize_t len;
1320 len = sys_read(0, str, 0, sizeof(str) - 1);
1321 if(len < 0) {
1322 dprintf("error reading from console!\n");
1323 break;
1325 if(len > 1) dprintf("test_thread2: read %d bytes\n", len);
1326 str[len] = 0;
1327 kprintf("%s", str);
1329 return 0;
1332 int test_thread()
1334 int a = 0;
1335 int tid = thread_get_current_thread_id();
1336 int x, y;
1337 // char c = 'a';
1339 x = tid % 80;
1340 y = (tid / 80) * 2 ;
1342 while(1) {
1343 // a += tid;
1344 a++;
1345 #if 1
1346 kprintf_xy(0, tid-1, "thread%d - %d - 0x%x 0x%x - cpu %d", tid, a, system_time(), smp_get_current_cpu());
1347 #endif
1348 #if 0
1349 dprintf("thread%d - %d - %d %d - cpu %d\n", tid, a, system_time(), smp_get_current_cpu());
1350 #endif
1351 #if 0
1352 kprintf("thread%d - %d - %d %d - cpu %d\n", tid, a, system_time(), smp_get_current_cpu());
1353 #endif
1354 #if 0
1355 kprintf_xy(x, y, "%c", c++);
1356 if(c > 'z')
1357 c = 'a';
1358 kprintf_xy(x, y+1, "%d", smp_get_current_cpu());
1359 #endif
1360 #if 0
1361 thread_snooze(10000 * tid);
1362 #endif
1363 #if 0
1364 sem_acquire(thread_test_sems[tid - thread_test_first_thid], 1);
1366 // release the next semaphore
1368 sem_id sem_to_release;
1370 sem_to_release = tid - thread_test_first_thid + 1;
1371 if(sem_to_release >= NUM_TEST_THREADS)
1372 sem_to_release = 0;
1373 sem_to_release = thread_test_sems[sem_to_release];
1374 sem_release(sem_to_release, 1);
1376 #endif
1377 #if 0
1378 switch(tid - thread_test_first_thid) {
1379 case 2: case 4:
1380 if((a % 2048) == 0)
1381 sem_release(thread_test_sem, _rand() % 16 + 1);
1382 break;
1383 default:
1384 sem_acquire(thread_test_sem, 1);
1386 #endif
1387 #if 0
1388 if(a > tid * 100) {
1389 kprintf("thread %d exiting\n", tid);
1390 break;
1392 #endif
1394 return 1;
1397 int panic_thread()
1399 dprintf("panic thread starting\n");
1401 thread_snooze(10000000);
1402 panic("gotcha!\n");
1403 return 0;
1406 int thread_test()
1408 thread_id tid;
1409 int i;
1410 char temp[64];
1412 #if 1
1413 for(i=0; i<NUM_TEST_THREADS; i++) {
1414 sprintf(temp, "test_thread%d", i);
1415 tid = thread_create_kernel_thread(temp, &test_thread, 5);
1416 thread_resume_thread(tid);
1417 if(i == 0) {
1418 thread_test_first_thid = tid;
1420 sprintf(temp, "test sem %d", i);
1421 thread_test_sems[i] = sem_create(0, temp);
1423 tid = thread_create_kernel_thread("test starter thread", &test_thread_starter_thread, THREAD_MAX_PRIORITY);
1424 thread_resume_thread(tid);
1425 #endif
1426 #if 0
1427 tid = thread_create_kernel_thread("test thread 2", &test_thread2, 5);
1428 thread_resume_thread(tid);
1429 #endif
1430 #if 0
1431 tid = thread_create_kernel_thread("test thread 3", &test_thread3, 5);
1432 thread_resume_thread(tid);
1433 #endif
1434 #if 0
1435 tid = thread_create_kernel_thread("test thread 4", &test_thread4, 5);
1436 thread_resume_thread(tid);
1437 #endif
1438 #if 0
1439 tid = thread_create_kernel_thread("test thread 5", &test_thread5, 5);
1440 thread_resume_thread(tid);
1441 #endif
1442 #if 0
1443 tid = thread_create_kernel_thread("panic thread", &panic_thread, THREAD_MAX_PRIORITY);
1444 thread_resume_thread(tid);
1445 #endif
1446 dprintf("thread_test: done creating test threads\n");
1448 return 0;
1451 static int _rand()
1453 static int next = 0;
1455 if(next == 0)
1456 next = system_time();
1458 next = next * 1103515245 + 12345;
1459 return((next >> 16) & 0x7FFF);
1462 static int reschedule_event(void *unused)
1464 // this function is called as a result of the timer event set by the scheduler
1465 // returning this causes a reschedule on the timer event
1466 return INT_RESCHEDULE;
1469 // NOTE: expects thread_spinlock to be held
1470 void thread_resched()
1472 struct thread *next_thread = NULL;
1473 int last_thread_pri = -1;
1474 struct thread *old_thread = CURR_THREAD;
1475 int i;
1476 time_t quantum;
1478 // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), CURR_THREAD);
1480 switch(old_thread->next_state) {
1481 case THREAD_STATE_RUNNING:
1482 case THREAD_STATE_READY:
1483 // dprintf("enqueueing thread 0x%x into run q. pri = %d\n", old_thread, old_thread->priority);
1484 thread_enqueue_run_q(old_thread);
1485 break;
1486 case THREAD_STATE_SUSPENDED:
1487 dprintf("suspending thread 0x%x\n", old_thread->id);
1488 break;
1489 case THREAD_STATE_FREE_ON_RESCHED:
1490 thread_enqueue(old_thread, &dead_q);
1491 break;
1492 default:
1493 // dprintf("not enqueueing thread 0x%x into run q. next_state = %d\n", old_thread, old_thread->next_state);
1496 old_thread->state = old_thread->next_state;
1498 for(i = THREAD_MAX_PRIORITY; i > THREAD_IDLE_PRIORITY; i--) {
1499 next_thread = thread_lookat_run_q(i);
1500 if(next_thread != NULL) {
1501 // skip it sometimes
1502 if(_rand() > 0x3000) {
1503 next_thread = thread_dequeue_run_q(i);
1504 break;
1506 last_thread_pri = i;
1507 next_thread = NULL;
1510 if(next_thread == NULL) {
1511 if(last_thread_pri != -1) {
1512 next_thread = thread_dequeue_run_q(last_thread_pri);
1513 if(next_thread == NULL)
1514 panic("next_thread == NULL! last_thread_pri = %d\n", last_thread_pri);
1515 } else {
1516 next_thread = thread_dequeue_run_q(THREAD_IDLE_PRIORITY);
1517 if(next_thread == NULL)
1518 panic("next_thread == NULL! no idle priorities!\n", last_thread_pri);
1522 next_thread->state = THREAD_STATE_RUNNING;
1523 next_thread->next_state = THREAD_STATE_READY;
1525 // XXX calculate quantum
1526 quantum = 10000;
1528 timer_cancel_event(&LOCAL_CPU_TIMER);
1529 timer_setup_timer(&reschedule_event, NULL, &LOCAL_CPU_TIMER);
1530 timer_set_event(quantum, TIMER_MODE_ONESHOT, &LOCAL_CPU_TIMER);
1532 if(next_thread != old_thread) {
1533 // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",
1534 // smp_get_current_cpu(), old_thread->id, next_thread->id);
1535 CURR_THREAD = next_thread;
1536 thread_context_switch(old_thread, next_thread);
1540 static int proc_struct_compare(void *_p, void *_key)
1542 struct proc *p = _p;
1543 struct proc_key *key = _key;
1545 if(p->id == key->id) return 0;
1546 else return 1;
1549 static unsigned int proc_struct_hash(void *_p, void *_key, int range)
1551 struct proc *p = _p;
1552 struct proc_key *key = _key;
1554 if(p != NULL)
1555 return (p->id % range);
1556 else
1557 return (key->id % range);
1560 struct proc *proc_get_kernel_proc()
1562 return kernel_proc;
1565 proc_id proc_get_current_proc_id()
1567 return CURR_THREAD->proc->id;
1570 static struct proc *create_proc_struct(const char *name, bool kernel)
1572 struct proc *p;
1574 p = (struct proc *)kmalloc(sizeof(struct proc));
1575 if(p == NULL)
1576 goto error;
1577 p->id = atomic_add(&next_proc_id, 1);
1578 p->name = (char *)kmalloc(strlen(name)+1);
1579 if(p->name == NULL)
1580 goto error1;
1581 strcpy(p->name, name);
1582 p->num_threads = 0;
1583 p->ioctx = NULL;
1584 p->aspace_id = -1;
1585 p->aspace = NULL;
1586 p->kaspace = vm_get_kernel_aspace();
1587 p->thread_list = NULL;
1588 p->main_thread = NULL;
1589 p->state = PROC_STATE_BIRTH;
1590 p->pending_signals = SIG_NONE;
1591 p->proc_creation_sem = sem_create(0, "proc_creation_sem");
1592 if(p->proc_creation_sem < 0)
1593 goto error2;
1594 if(arch_proc_init_proc_struct(p, kernel) < 0)
1595 goto error3;
1597 return p;
1599 error3:
1600 sem_delete(p->proc_creation_sem);
1601 error2:
1602 kfree(p->name);
1603 error1:
1604 kfree(p);
1605 error:
1606 return NULL;
1609 static int proc_create_proc2(void)
1611 int err;
1612 struct thread *t;
1613 struct proc *p;
1614 char *path;
1615 addr entry;
1616 char ustack_name[128];
1618 t = thread_get_current_thread();
1619 p = t->proc;
1621 dprintf("proc_create_proc2: entry thread %d\n", t->id);
1623 // create an initial primary stack region
1624 t->user_stack_base = ((USER_STACK_REGION - STACK_SIZE) + USER_STACK_REGION_SIZE);
1625 sprintf(ustack_name, "%s_primary_stack", p->name);
1626 t->user_stack_region_id = vm_create_anonymous_region(p->aspace_id, ustack_name, (void **)&t->user_stack_base,
1627 REGION_ADDR_EXACT_ADDRESS, STACK_SIZE, REGION_WIRING_LAZY, LOCK_RW);
1628 if(t->user_stack_region_id < 0) {
1629 panic("proc_create_proc2: could not create default user stack region\n");
1630 sem_delete_etc(p->proc_creation_sem, -1);
1631 return t->user_stack_region_id;
1634 path = p->args;
1635 dprintf("proc_create_proc2: loading elf binary '%s'\n", path);
1637 err = elf_load_uspace(path, p, 0, &entry);
1638 if(err < 0){
1639 // XXX clean up proc
1640 sem_delete_etc(p->proc_creation_sem, -1);
1641 return err;
1644 dprintf("proc_create_proc2: loaded elf. entry = 0x%x\n", entry);
1646 p->state = PROC_STATE_NORMAL;
1648 // this will wake up the thread that initially created us, with the process id
1649 // as the return code
1650 sem_delete_etc(p->proc_creation_sem, p->id);
1651 p->proc_creation_sem = 0;
1653 // jump to the entry point in user space
1654 arch_thread_enter_uspace(entry, t->user_stack_base + STACK_SIZE);
1656 // never gets here
1657 return 0;
1660 proc_id user_proc_create_proc(const char *upath, const char *uname, int priority)
1662 char path[SYS_MAX_PATH_LEN];
1663 char name[SYS_MAX_OS_NAME_LEN];
1664 int rc;
1666 if((addr)upath >= KERNEL_BASE && (addr)upath <= KERNEL_TOP)
1667 return ERR_VM_BAD_USER_MEMORY;
1668 if((addr)uname >= KERNEL_BASE && (addr)uname <= KERNEL_TOP)
1669 return ERR_VM_BAD_USER_MEMORY;
1671 rc = user_strncpy(path, upath, SYS_MAX_PATH_LEN-1);
1672 if(rc < 0)
1673 return rc;
1674 path[SYS_MAX_PATH_LEN-1] = 0;
1676 rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
1677 if(rc < 0)
1678 return rc;
1679 name[SYS_MAX_OS_NAME_LEN-1] = 0;
1681 return proc_create_proc(path, name, priority);
1684 proc_id proc_create_proc(const char *path, const char *name, int priority)
1686 struct proc *p;
1687 thread_id tid;
1688 int err;
1689 unsigned int state;
1690 int sem_retcode;
1692 dprintf("proc_create_proc: entry '%s', name '%s'\n", path, name);
1694 p = create_proc_struct(name, false);
1695 if(p == NULL)
1696 return ERR_NO_MEMORY;
1698 state = int_disable_interrupts();
1699 GRAB_PROC_LOCK();
1700 hash_insert(proc_hash, p);
1701 RELEASE_PROC_LOCK();
1702 int_restore_interrupts(state);
1704 // create a kernel thread, but under the context of the new process
1705 tid = thread_create_kernel_thread_etc(name, proc_create_proc2, priority, p);
1706 if(tid < 0) {
1707 // XXX clean up proc
1708 return tid;
1711 // copy the args over
1712 p->args = kmalloc(strlen(path) + 1);
1713 if(p->args == NULL) {
1714 // XXX clean up proc
1715 return ERR_NO_MEMORY;
1717 strcpy(p->args, path);
1719 // create a new ioctx for this process
1720 p->ioctx = vfs_new_ioctx();
1721 if(p->ioctx == NULL) {
1722 // XXX clean up proc
1723 panic("proc_create_proc: could not create new ioctx\n");
1724 return ERR_NO_MEMORY;
1727 // create an address space for this process
1728 p->aspace_id = vm_create_aspace(p->name, USER_BASE, USER_SIZE, false);
1729 if(p->aspace_id < 0) {
1730 // XXX clean up proc
1731 panic("proc_create_proc: could not create user address space\n");
1732 return p->aspace_id;
1734 p->aspace = vm_get_aspace_from_id(p->aspace_id);
1736 thread_resume_thread(tid);
1738 // XXX race condition
1739 // acquire this semaphore, which will exist throughout the creation of the process
1740 // by the new thread in the new process. At the end of creation, the semaphore will
1741 // be deleted, with the return code being the process id, or an error.
1742 sem_acquire_etc(p->proc_creation_sem, 1, 0, 0, &sem_retcode);
1744 // this will either contain the process id, or an error code
1745 return sem_retcode;
1748 int proc_kill_proc(proc_id id)
1750 int state;
1751 struct proc *p;
1752 struct thread *t;
1753 thread_id tid = -1;
1754 int retval = 0;
1756 state = int_disable_interrupts();
1757 GRAB_PROC_LOCK();
1759 p = proc_get_proc_struct_locked(id);
1760 if(p != NULL) {
1761 tid = p->main_thread->id;
1762 } else {
1763 retval = ERR_INVALID_HANDLE;
1766 RELEASE_PROC_LOCK();
1767 int_restore_interrupts(state);
1768 if(retval < 0)
1769 return retval;
1771 // just kill the main thread in the process. The cleanup code there will
1772 // take care of the process
1773 return thread_kill_thread(tid);
1774 #if 0
1775 // now suspend all of the threads in this process. It's safe to walk this process's
1776 // thread list without the lock held because the state of this process is now DEATH
1777 // so all of the operations that involve changing the thread list are blocked
1778 // also, it's ok to 'suspend' this thread, if we belong to this process, since we're
1779 // in the kernel now, we won't be suspended until we leave the kernel. By then,
1780 // we will have passed the kill signal to this thread.
1781 for(t = p->thread_list; t; t = t->proc_next) {
1782 thread_suspend_thread(t->id);
1785 // XXX cycle through the list of threads again, killing each thread.
1786 // Note: this wont kill the current thread, if it's one of them, since we're in the
1787 // kernel.
1788 // If we actually kill the last thread and not just deliver a signal to it, remove
1789 // the process along with it, otherwise the last thread that belongs to the process
1790 // will clean it up when it dies.
1792 // XXX not finished
1793 #endif
1794 return retval;
1797 // sets the pending signal flag on a thread and possibly does some work to wake it up, etc.
1798 // expects the thread lock to be held
1799 static void deliver_signal(struct thread *t, int signal)
1801 dprintf("deliver_signal: thread 0x%x (%d), signal %d\n", t, t->id, signal);
1802 switch(signal) {
1803 case SIG_KILL:
1804 t->pending_signals |= SIG_KILL;
1805 switch(t->state) {
1806 case THREAD_STATE_SUSPENDED:
1807 t->state = THREAD_STATE_READY;
1808 t->next_state = THREAD_STATE_READY;
1810 thread_enqueue_run_q(t);
1811 break;
1812 case THREAD_STATE_WAITING:
1813 sem_interrupt_thread(t);
1814 break;
1815 default:
1818 break;
1819 default:
1820 t->pending_signals |= signal;
1824 // expects the thread lock to be held
1825 static void _check_for_thread_sigs(struct thread *t, int state)
1827 if(t->pending_signals == SIG_NONE)
1828 return;
1830 if(t->pending_signals & SIG_KILL) {
1831 t->pending_signals &= ~SIG_KILL;
1833 RELEASE_THREAD_LOCK();
1834 int_restore_interrupts(state);
1835 thread_exit(0);
1836 // never gets to here
1838 if(t->pending_signals & SIG_SUSPEND) {
1839 t->pending_signals &= ~SIG_SUSPEND;
1840 t->next_state = THREAD_STATE_SUSPENDED;
1841 // XXX will probably want to delay this
1842 thread_resched();
1846 // called in the int handler code when a thread enters the kernel for any reason
1847 void thread_atkernel_entry()
1849 int state;
1850 struct thread *t = CURR_THREAD;
1852 // dprintf("thread_atkernel_entry: entry thread 0x%x\n", t->id);
1854 state = int_disable_interrupts();
1855 GRAB_THREAD_LOCK();
1857 t->in_kernel = true;
1859 _check_for_thread_sigs(t, state);
1861 RELEASE_THREAD_LOCK();
1862 int_restore_interrupts(state);
1865 // called when a thread exits kernel space to user space
1866 void thread_atkernel_exit()
1868 int state;
1869 struct thread *t = CURR_THREAD;
1871 // dprintf("thread_atkernel_exit: entry\n");
1873 state = int_disable_interrupts();
1874 GRAB_THREAD_LOCK();
1876 t->in_kernel = false;
1878 _check_for_thread_sigs(t, state);
1880 RELEASE_THREAD_LOCK();
1881 int_restore_interrupts(state);