2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
6 #include <kernel/debug.h>
7 #include <kernel/console.h>
8 #include <kernel/thread.h>
9 #include <kernel/arch/thread.h>
10 #include <kernel/khash.h>
11 #include <kernel/int.h>
12 #include <kernel/smp.h>
13 #include <kernel/timer.h>
14 #include <kernel/arch/cpu.h>
15 #include <kernel/arch/int.h>
16 #include <kernel/sem.h>
17 #include <kernel/vfs.h>
18 #include <kernel/elf.h>
19 #include <kernel/heap.h>
20 #include <sys/errors.h>
21 #include <boot/stage2.h>
22 #include <libc/string.h>
23 #include <libc/printf.h>
33 static struct proc
*create_proc_struct(const char *name
, bool kernel
);
34 static int proc_struct_compare(void *_p
, void *_key
);
35 static unsigned int proc_struct_hash(void *_p
, void *_key
, int range
);
38 spinlock_t thread_spinlock
= 0;
41 static void *proc_hash
= NULL
;
42 static struct proc
*kernel_proc
= NULL
;
43 static proc_id next_proc_id
= 0;
44 static spinlock_t proc_spinlock
= 0;
45 // NOTE: PROC lock can be held over a THREAD lock acquisition,
46 // but not the other way (to avoid deadlock)
47 #define GRAB_PROC_LOCK() acquire_spinlock(&proc_spinlock)
48 #define RELEASE_PROC_LOCK() release_spinlock(&proc_spinlock)
51 #define LOCAL_CPU_TIMER timers[smp_get_current_cpu()]
52 static struct timer_event
*timers
= NULL
;
55 #define CURR_THREAD cur_thread[smp_get_current_cpu()]
56 static struct thread
**cur_thread
= NULL
;
57 static void *thread_hash
= NULL
;
58 static thread_id next_thread_id
= 0;
60 static sem_id snooze_sem
= -1;
63 // used temporarily as a thread cleans itself up
69 static struct death_stack
*death_stacks
;
70 static unsigned int num_death_stacks
;
71 static unsigned int num_free_death_stacks
;
72 static sem_id death_stack_sem
;
73 static spinlock_t death_stack_spinlock
;
76 static struct thread_queue run_q
[THREAD_NUM_PRIORITY_LEVELS
] = { { NULL
, NULL
}, };
77 static struct thread_queue dead_q
;
80 static void thread_entry(void);
81 static struct thread
*thread_get_thread_struct_locked(thread_id id
);
82 static struct proc
*proc_get_proc_struct(proc_id id
);
83 static struct proc
*proc_get_proc_struct_locked(proc_id id
);
84 static void thread_kthread_exit();
85 static void deliver_signal(struct thread
*t
, int signal
);
87 // insert a thread onto the tail of a queue
88 void thread_enqueue(struct thread
*t
, struct thread_queue
*q
)
100 struct thread
*thread_lookat_queue(struct thread_queue
*q
)
105 struct thread
*thread_dequeue(struct thread_queue
*q
)
118 struct thread
*thread_dequeue_id(struct thread_queue
*q
, thread_id thr_id
)
121 struct thread
*last
= NULL
;
125 if(t
->id
== thr_id
) {
129 last
->q_next
= t
->q_next
;
141 struct thread
*thread_lookat_run_q(int priority
)
143 return thread_lookat_queue(&run_q
[priority
]);
146 void thread_enqueue_run_q(struct thread
*t
)
148 // these shouldn't exist
149 if(t
->priority
> THREAD_MAX_PRIORITY
)
150 t
->priority
= THREAD_MAX_PRIORITY
;
154 thread_enqueue(t
, &run_q
[t
->priority
]);
157 struct thread
*thread_dequeue_run_q(int priority
)
159 return thread_dequeue(&run_q
[priority
]);
162 static void insert_thread_into_proc(struct proc
*p
, struct thread
*t
)
164 t
->proc_next
= p
->thread_list
;
167 if(p
->num_threads
== 1) {
168 // this was the first thread
174 static void remove_thread_from_proc(struct proc
*p
, struct thread
*t
)
176 struct thread
*temp
, *last
= NULL
;
178 for(temp
= p
->thread_list
; temp
!= NULL
; temp
= temp
->proc_next
) {
181 p
->thread_list
= temp
->proc_next
;
183 last
->proc_next
= temp
->proc_next
;
192 static int thread_struct_compare(void *_t
, void *_key
)
194 struct thread
*t
= _t
;
195 struct thread_key
*key
= _key
;
197 if(t
->id
== key
->id
) return 0;
201 static unsigned int thread_struct_hash(void *_t
, void *_key
, int range
)
203 struct thread
*t
= _t
;
204 struct thread_key
*key
= _key
;
207 return (t
->id
% range
);
209 return (key
->id
% range
);
212 static struct thread
*create_thread_struct(const char *name
)
217 state
= int_disable_interrupts();
219 t
= thread_dequeue(&dead_q
);
220 RELEASE_THREAD_LOCK();
221 int_restore_interrupts(state
);
224 t
= (struct thread
*)kmalloc(sizeof(struct thread
));
228 t
->name
= (char *)kmalloc(strlen(name
) + 1);
231 strcpy(t
->name
, name
);
232 t
->id
= atomic_add(&next_thread_id
, 1);
234 t
->sem_blocking
= -1;
235 t
->fault_handler
= 0;
236 t
->kernel_stack_region_id
= -1;
237 t
->kernel_stack_base
= 0;
238 t
->user_stack_region_id
= -1;
239 t
->user_stack_base
= 0;
244 t
->pending_signals
= SIG_NONE
;
249 sprintf(temp
, "thread_0x%x_retcode_sem", t
->id
);
250 t
->return_code_sem
= sem_create(0, temp
);
251 if(t
->return_code_sem
< 0)
255 if(arch_thread_init_thread_struct(t
) < 0)
261 sem_delete_etc(t
->return_code_sem
, -1);
270 static int _create_user_thread_kentry(void)
274 t
= thread_get_current_thread();
276 // a signal may have been delivered here
277 thread_atkernel_exit();
279 // jump to the entry point in user space
280 arch_thread_enter_uspace((addr
)t
->args
, t
->user_stack_base
+ STACK_SIZE
);
286 static thread_id
_create_thread(const char *name
, proc_id pid
, int priority
, addr entry
, bool kernel
)
294 t
= create_thread_struct(name
);
296 return ERR_NO_MEMORY
;
298 t
->priority
= priority
;
299 t
->state
= THREAD_STATE_BIRTH
;
300 t
->next_state
= THREAD_STATE_SUSPENDED
;
302 state
= int_disable_interrupts();
305 // insert into global list
306 hash_insert(thread_hash
, t
);
307 RELEASE_THREAD_LOCK();
310 // look at the proc, make sure it's not being deleted
311 p
= proc_get_proc_struct_locked(pid
);
312 if(p
!= NULL
&& p
->state
!= PROC_STATE_DEATH
) {
313 insert_thread_into_proc(p
, t
);
320 hash_remove(thread_hash
, t
);
321 RELEASE_THREAD_LOCK();
323 int_restore_interrupts(state
);
327 return ERR_TASK_PROC_DELETED
;
330 sprintf(stack_name
, "%s_kstack", name
);
331 t
->kernel_stack_region_id
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), stack_name
,
332 (void **)&t
->kernel_stack_base
, REGION_ADDR_ANY_ADDRESS
, KSTACK_SIZE
,
333 REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
334 if(t
->kernel_stack_region_id
< 0)
335 panic("_create_thread: error creating kernel stack!\n");
338 // this sets up an initial kthread stack that runs the entry
339 arch_thread_initialize_kthread_stack(t
, (void *)entry
, &thread_entry
, &thread_kthread_exit
);
342 // XXX make this better. For now just keep trying to create a stack
343 // until we find a spot.
344 t
->user_stack_base
= (USER_STACK_REGION
- STACK_SIZE
) + USER_STACK_REGION_SIZE
;
345 while(t
->user_stack_base
> USER_STACK_REGION
) {
346 sprintf(stack_name
, "%s_stack%d", p
->name
, t
->id
);
347 t
->user_stack_region_id
= vm_create_anonymous_region(p
->aspace_id
, stack_name
,
348 (void **)&t
->user_stack_base
,
349 REGION_ADDR_ANY_ADDRESS
, STACK_SIZE
, REGION_WIRING_LAZY
, LOCK_RW
);
350 if(t
->user_stack_region_id
< 0) {
351 t
->user_stack_base
-= STACK_SIZE
;
353 // we created a region
357 if(t
->user_stack_region_id
< 0)
358 panic("_create_thread: unable to create user stack!\n");
360 // copy the user entry over to the args field in the thread struct
361 // the function this will call will immediately switch the thread into
363 t
->args
= (void *)entry
;
364 arch_thread_initialize_kthread_stack(t
, &_create_user_thread_kentry
, &thread_entry
, &thread_kthread_exit
);
367 t
->state
= THREAD_STATE_SUSPENDED
;
372 thread_id
user_thread_create_user_thread(char *uname
, proc_id pid
, int priority
, addr entry
)
374 char name
[SYS_MAX_OS_NAME_LEN
];
377 if((addr
)uname
>= KERNEL_BASE
&& (addr
)uname
<= KERNEL_TOP
)
378 return ERR_VM_BAD_USER_MEMORY
;
379 if(entry
>= KERNEL_BASE
&& entry
<= KERNEL_TOP
)
380 return ERR_VM_BAD_USER_MEMORY
;
382 rc
= user_strncpy(name
, uname
, SYS_MAX_OS_NAME_LEN
-1);
385 name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
387 return thread_create_user_thread(name
, pid
, priority
, entry
);
390 thread_id
thread_create_user_thread(char *name
, proc_id pid
, int priority
, addr entry
)
392 return _create_thread(name
, pid
, priority
, entry
, false);
395 thread_id
thread_create_kernel_thread(const char *name
, int (*func
)(void), int priority
)
397 return _create_thread(name
, proc_get_kernel_proc()->id
, priority
, (addr
)func
, true);
400 static thread_id
thread_create_kernel_thread_etc(const char *name
, int (*func
)(void), int priority
, struct proc
*p
)
402 return _create_thread(name
, p
->id
, priority
, (addr
)func
, true);
405 int thread_suspend_thread(thread_id id
)
410 bool global_resched
= false;
412 state
= int_disable_interrupts();
415 if(CURR_THREAD
->id
== id
) {
418 t
= thread_get_thread_struct_locked(id
);
422 if(t
->proc
== kernel_proc
) {
424 retval
= ERR_NOT_ALLOWED
;
425 } else if(t
->in_kernel
== true) {
426 t
->pending_signals
|= SIG_SUSPEND
;
428 t
->next_state
= THREAD_STATE_SUSPENDED
;
429 global_resched
= true;
433 retval
= ERR_INVALID_HANDLE
;
436 RELEASE_THREAD_LOCK();
437 int_restore_interrupts(state
);
440 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_SYNC
);
446 int thread_resume_thread(thread_id id
)
452 state
= int_disable_interrupts();
455 t
= thread_get_thread_struct_locked(id
);
456 if(t
!= NULL
&& t
->state
== THREAD_STATE_SUSPENDED
) {
457 t
->state
= THREAD_STATE_READY
;
458 t
->next_state
= THREAD_STATE_READY
;
460 thread_enqueue_run_q(t
);
463 retval
= ERR_INVALID_HANDLE
;
466 RELEASE_THREAD_LOCK();
467 int_restore_interrupts(state
);
472 static void _dump_proc_info(struct proc
*p
)
474 dprintf("PROC: 0x%x\n", p
);
475 dprintf("id: 0x%x\n", p
->id
);
476 dprintf("name: '%s'\n", p
->name
);
477 dprintf("next: 0x%x\n", p
->next
);
478 dprintf("num_threads: %d\n", p
->num_threads
);
479 dprintf("state: %d\n", p
->state
);
480 dprintf("pending_signals: 0x%x\n", p
->pending_signals
);
481 dprintf("ioctx: 0x%x\n", p
->ioctx
);
482 dprintf("args: 0x%x\n", p
->args
);
483 dprintf("proc_creation_sem: 0x%x\n", p
->proc_creation_sem
);
484 dprintf("aspace_id: 0x%x\n", p
->aspace_id
);
485 dprintf("aspace: 0x%x\n", p
->aspace
);
486 dprintf("kaspace: 0x%x\n", p
->kaspace
);
487 dprintf("main_thread: 0x%x\n", p
->main_thread
);
488 dprintf("thread_list: 0x%x\n", p
->thread_list
);
491 static void dump_proc_info(int argc
, char **argv
)
496 struct hash_iterator i
;
499 dprintf("proc: not enough arguments\n");
503 // if the argument looks like a hex number, treat it as such
504 if(strlen(argv
[1]) > 2 && argv
[1][0] == '0' && argv
[1][1] == 'x') {
505 num
= atoul(argv
[1]);
506 if(num
> vm_get_kernel_aspace()->virtual_map
.base
) {
508 _dump_proc_info((struct proc
*)num
);
515 // walk through the thread list, trying to match name or id
516 hash_open(proc_hash
, &i
);
517 while((p
= hash_next(proc_hash
, &i
)) != NULL
) {
518 if((p
->name
&& strcmp(argv
[1], p
->name
) == 0) || p
->id
== id
) {
523 hash_close(proc_hash
, &i
, false);
527 static const char *state_to_text(int state
)
530 case THREAD_STATE_READY
:
532 case THREAD_STATE_RUNNING
:
534 case THREAD_STATE_WAITING
:
536 case THREAD_STATE_SUSPENDED
:
538 case THREAD_STATE_FREE_ON_RESCHED
:
540 case THREAD_STATE_BIRTH
:
547 static struct thread
*last_thread_dumped
= NULL
;
549 static void _dump_thread_info(struct thread
*t
)
551 dprintf("THREAD: 0x%x\n", t
);
552 dprintf("id: 0x%x\n", t
->id
);
553 dprintf("name: '%s'\n", t
->name
);
554 dprintf("all_next: 0x%x\nproc_next: 0x%x\nq_next: 0x%x\n",
555 t
->all_next
, t
->proc_next
, t
->q_next
);
556 dprintf("priority: 0x%x\n", t
->priority
);
557 dprintf("state: %s\n", state_to_text(t
->state
));
558 dprintf("next_state: %s\n", state_to_text(t
->next_state
));
559 dprintf("pending_signals: 0x%x\n", t
->pending_signals
);
560 dprintf("in_kernel: %d\n", t
->in_kernel
);
561 dprintf("sem_blocking:0x%x\n", t
->sem_blocking
);
562 dprintf("sem_count: 0x%x\n", t
->sem_count
);
563 dprintf("sem_deleted_retcode: 0x%x\n", t
->sem_deleted_retcode
);
564 dprintf("sem_errcode: 0x%x\n", t
->sem_errcode
);
565 dprintf("args: 0x%x\n", t
->args
);
566 dprintf("proc: 0x%x\n", t
->proc
);
567 dprintf("return_code_sem: 0x%x\n", t
->return_code_sem
);
568 dprintf("kernel_stack_region_id: 0x%x\n", t
->kernel_stack_region_id
);
569 dprintf("kernel_stack_base: 0x%x\n", t
->kernel_stack_base
);
570 dprintf("user_stack_region_id: 0x%x\n", t
->user_stack_region_id
);
571 dprintf("user_stack_base: 0x%x\n", t
->user_stack_base
);
572 dprintf("architecture dependant section:\n");
573 arch_thread_dump_info(&t
->arch_info
);
575 last_thread_dumped
= t
;
578 static void dump_thread_info(int argc
, char **argv
)
583 struct hash_iterator i
;
586 dprintf("thread: not enough arguments\n");
590 // if the argument looks like a hex number, treat it as such
591 if(strlen(argv
[1]) > 2 && argv
[1][0] == '0' && argv
[1][1] == 'x') {
592 num
= atoul(argv
[1]);
593 if(num
> vm_get_kernel_aspace()->virtual_map
.base
) {
595 _dump_thread_info((struct thread
*)num
);
602 // walk through the thread list, trying to match name or id
603 hash_open(thread_hash
, &i
);
604 while((t
= hash_next(thread_hash
, &i
)) != NULL
) {
605 if((t
->name
&& strcmp(argv
[1], t
->name
) == 0) || t
->id
== id
) {
606 _dump_thread_info(t
);
610 hash_close(thread_hash
, &i
, false);
613 static void dump_thread_list(int argc
, char **argv
)
616 struct hash_iterator i
;
618 hash_open(thread_hash
, &i
);
619 while((t
= hash_next(thread_hash
, &i
)) != NULL
) {
622 dprintf("\t%32s", t
->name
);
624 dprintf("\t%32s", "<NULL>");
625 dprintf("\t0x%x", t
->id
);
626 dprintf("\t%16s", state_to_text(t
->state
));
627 dprintf("\t0x%x\n", t
->kernel_stack_base
);
629 hash_close(thread_hash
, &i
, false);
632 static void dump_next_thread_in_q(int argc
, char **argv
)
634 struct thread
*t
= last_thread_dumped
;
637 dprintf("no thread previously dumped. Examine a thread first.\n");
641 dprintf("next thread in queue after thread @ 0x%x\n", t
);
642 if(t
->q_next
!= NULL
) {
643 _dump_thread_info(t
->q_next
);
649 static void dump_next_thread_in_all_list(int argc
, char **argv
)
651 struct thread
*t
= last_thread_dumped
;
654 dprintf("no thread previously dumped. Examine a thread first.\n");
658 dprintf("next thread in global list after thread @ 0x%x\n", t
);
659 if(t
->all_next
!= NULL
) {
660 _dump_thread_info(t
->all_next
);
666 static void dump_next_thread_in_proc(int argc
, char **argv
)
668 struct thread
*t
= last_thread_dumped
;
671 dprintf("no thread previously dumped. Examine a thread first.\n");
675 dprintf("next thread in proc after thread @ 0x%x\n", t
);
676 if(t
->proc_next
!= NULL
) {
677 _dump_thread_info(t
->proc_next
);
683 int get_death_stack(void)
688 sem_acquire(death_stack_sem
, 1);
690 // grab the thread lock around the search for a death stack to make sure it doesn't
691 // find a death stack that has been returned by a thread that still hasn't been
692 // rescheduled for the last time. Localized hack here and put_death_stack_and_reschedule.
693 state
= int_disable_interrupts();
694 acquire_spinlock(&death_stack_spinlock
);
696 release_spinlock(&death_stack_spinlock
);
698 for(i
=0; i
<num_death_stacks
; i
++) {
699 if(death_stacks
[i
].in_use
== false) {
700 death_stacks
[i
].in_use
= true;
705 RELEASE_THREAD_LOCK();
706 int_restore_interrupts(state
);
708 if(i
>= num_death_stacks
) {
709 panic("get_death_stack: couldn't find free stack!\n");
712 dprintf("get_death_stack: returning 0x%x\n", death_stacks
[i
].address
);
717 static void put_death_stack_and_reschedule(unsigned int index
)
719 dprintf("put_death_stack...: passed %d\n", index
);
721 if(index
>= num_death_stacks
|| death_stacks
[index
].in_use
== false)
722 panic("put_death_stack_and_reschedule: passed invalid stack index %d\n", index
);
723 death_stacks
[index
].in_use
= false;
725 // disable the interrupts around the semaphore release to prevent the get_death_stack
726 // function from allocating this stack before the reschedule. Kind of a hack, but localized
727 // not an easy way around it.
728 int_disable_interrupts();
730 acquire_spinlock(&death_stack_spinlock
);
731 sem_release_etc(death_stack_sem
, 1, SEM_FLAG_NO_RESCHED
);
734 release_spinlock(&death_stack_spinlock
);
739 int thread_init(kernel_args
*ka
)
744 dprintf("thread_init: entry\n");
746 // create the process hash table
747 proc_hash
= hash_init(15, (addr
)&kernel_proc
->next
- (addr
)kernel_proc
,
748 &proc_struct_compare
, &proc_struct_hash
);
750 // create the kernel process
751 kernel_proc
= create_proc_struct("kernel_proc", true);
752 if(kernel_proc
== NULL
)
753 panic("could not create kernel proc!\n");
754 kernel_proc
->state
= PROC_STATE_NORMAL
;
756 kernel_proc
->ioctx
= vfs_new_ioctx();
757 if(kernel_proc
->ioctx
== NULL
)
758 panic("could not create ioctx for kernel proc!\n");
760 // stick it in the process hash
761 hash_insert(proc_hash
, kernel_proc
);
763 // create the thread hash table
764 thread_hash
= hash_init(15, (addr
)&t
->all_next
- (addr
)t
,
765 &thread_struct_compare
, &thread_struct_hash
);
767 // zero out the run queues
768 memset(run_q
, 0, sizeof(run_q
));
770 // zero out the dead thread structure q
771 memset(&dead_q
, 0, sizeof(dead_q
));
773 // allocate as many CUR_THREAD slots as there are cpus
774 cur_thread
= (struct thread
**)kmalloc(sizeof(struct thread
*) * smp_get_num_cpus());
775 if(cur_thread
== NULL
) {
776 panic("error allocating cur_thread slots\n");
777 return ERR_NO_MEMORY
;
779 memset(cur_thread
, 0, sizeof(struct thread
*) * smp_get_num_cpus());
781 // allocate a timer structure per cpu
782 timers
= (struct timer_event
*)kmalloc(sizeof(struct timer_event
) * smp_get_num_cpus());
784 panic("error allocating scheduling timers\n");
785 return ERR_NO_MEMORY
;
787 memset(timers
, 0, sizeof(struct timer_event
) * smp_get_num_cpus());
789 // allocate a snooze sem
790 snooze_sem
= sem_create(0, "snooze sem");
792 panic("error creating snooze sem\n");
796 // create an idle thread for each cpu
797 for(i
=0; i
<ka
->num_cpus
; i
++) {
800 sprintf(temp
, "idle_thread%d", i
);
801 t
= create_thread_struct(temp
);
803 panic("error creating idle thread struct\n");
804 return ERR_NO_MEMORY
;
806 t
->proc
= proc_get_kernel_proc();
807 t
->priority
= THREAD_IDLE_PRIORITY
;
808 t
->state
= THREAD_STATE_RUNNING
;
809 t
->next_state
= THREAD_STATE_READY
;
810 sprintf(temp
, "idle_thread%d_kstack", i
);
811 t
->kernel_stack_region_id
= vm_find_region_by_name(vm_get_kernel_aspace_id(), temp
);
812 hash_insert(thread_hash
, t
);
813 insert_thread_into_proc(t
->proc
, t
);
817 // create a set of death stacks
818 num_death_stacks
= smp_get_num_cpus();
819 num_free_death_stacks
= smp_get_num_cpus();
820 death_stacks
= (struct death_stack
*)kmalloc(num_death_stacks
* sizeof(struct death_stack
));
821 if(death_stacks
== NULL
) {
822 panic("error creating death stacks\n");
823 return ERR_NO_MEMORY
;
828 for(i
=0; i
<num_death_stacks
; i
++) {
829 sprintf(temp
, "death_stack%d", i
);
830 death_stacks
[i
].rid
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), temp
,
831 (void **)&death_stacks
[i
].address
,
832 REGION_ADDR_ANY_ADDRESS
, KSTACK_SIZE
, REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
833 if(death_stacks
[i
].rid
< 0) {
834 panic("error creating death stacks\n");
835 return death_stacks
[i
].rid
;
837 death_stacks
[i
].in_use
= false;
840 death_stack_sem
= sem_create(num_death_stacks
, "death_stack_noavail_sem");
841 death_stack_spinlock
= 0;
843 // set up some debugger commands
844 dbg_add_command(dump_thread_list
, "threads", "list all threads");
845 dbg_add_command(dump_thread_info
, "thread", "list info about a particular thread");
846 dbg_add_command(dump_next_thread_in_q
, "next_q", "dump the next thread in the queue of last thread viewed");
847 dbg_add_command(dump_next_thread_in_all_list
, "next_all", "dump the next thread in the global list of the last thread viewed");
848 dbg_add_command(dump_next_thread_in_proc
, "next_proc", "dump the next thread in the process of the last thread viewed");
849 dbg_add_command(dump_proc_info
, "proc", "list info about a particular process");
854 // this starts the scheduler. Must be run under the context of
855 // the initial idle thread.
856 void thread_start_threading()
860 // XXX may not be the best place for this
861 // invalidate all of the other processors' TLB caches
862 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_SYNC
);
863 arch_cpu_global_TLB_invalidate();
865 // start the other processors
866 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_ASYNC
);
868 state
= int_disable_interrupts();
873 RELEASE_THREAD_LOCK();
874 int_restore_interrupts(state
);
877 void thread_snooze(time_t time
)
879 sem_acquire_etc(snooze_sem
, 1, SEM_FLAG_TIMEOUT
, time
, NULL
);
882 // this function gets run by a new thread before anything else
883 static void thread_entry(void)
885 // simulates the thread spinlock release that would occur if the thread had been
886 // rescheded from. The resched didn't happen because the thread is new.
887 RELEASE_THREAD_LOCK();
888 int_enable_interrupts(); // this essentially simulates a return-from-interrupt
891 // used to pass messages between thread_exit and thread_exit2
892 struct thread_exit_args
{
894 region_id old_kernel_stack
;
896 unsigned int death_stack
;
899 static void thread_exit2(void *_args
)
901 struct thread_exit_args args
;
904 // copy the arguments over, since the source is probably on the kernel stack we're about to delete
905 memcpy(&args
, _args
, sizeof(struct thread_exit_args
));
907 // restore the interrupts
908 int_restore_interrupts(args
.int_state
);
910 dprintf("thread_exit2, running on death stack 0x%x\n", args
.t
->kernel_stack_base
);
912 // delete the old kernel stack region
913 dprintf("thread_exit2: deleting old kernel stack id 0x%x for thread 0x%x\n", args
.old_kernel_stack
, args
.t
->id
);
914 vm_delete_region(vm_get_kernel_aspace_id(), args
.old_kernel_stack
);
916 dprintf("thread_exit2: freeing name for thid 0x%x\n", args
.t
->id
);
924 dprintf("thread_exit2: removing thread 0x%x from global lists\n", args
.t
->id
);
926 // remove this thread from all of the global lists
927 int_disable_interrupts();
929 remove_thread_from_proc(kernel_proc
, args
.t
);
932 hash_remove(thread_hash
, args
.t
);
933 RELEASE_THREAD_LOCK();
935 dprintf("thread_exit2: done removing thread from lists\n");
937 // set the next state to be gone. Will return the thread structure to a ready pool upon reschedule
938 args
.t
->next_state
= THREAD_STATE_FREE_ON_RESCHED
;
940 // return the death stack and reschedule one last time
941 put_death_stack_and_reschedule(args
.death_stack
);
943 panic("thread_exit2: made it where it shouldn't have!\n");
946 void thread_exit(int retcode
)
949 struct thread
*t
= CURR_THREAD
;
950 struct proc
*p
= t
->proc
;
951 bool delete_proc
= false;
952 unsigned int death_stack
;
954 dprintf("thread 0x%x exiting w/return code 0x%x\n", t
->id
, retcode
);
956 // delete the user stack region first
957 if(p
->aspace_id
>= 0 && t
->user_stack_region_id
>= 0) {
958 region_id rid
= t
->user_stack_region_id
;
959 t
->user_stack_region_id
= -1;
960 vm_delete_region(p
->aspace_id
, rid
);
963 if(p
!= kernel_proc
) {
964 // remove this thread from the current process and add it to the kernel
965 // put the thread into the kernel proc until it dies
966 state
= int_disable_interrupts();
968 remove_thread_from_proc(p
, t
);
969 t
->proc
= kernel_proc
;
970 insert_thread_into_proc(kernel_proc
, t
);
971 if(p
->main_thread
== t
) {
972 // this was main thread in this process
974 hash_remove(proc_hash
, p
);
975 p
->state
= PROC_STATE_DEATH
;
979 // reschedule, thus making sure this thread is running in the context of the kernel
981 RELEASE_THREAD_LOCK();
982 int_restore_interrupts(state
);
984 dprintf("thread_exit: thread 0x%x now a kernel thread!\n", t
->id
);
987 // delete the process
989 if(p
->num_threads
> 0) {
990 // there are other threads still in this process,
991 // cycle through and signal kill on each of the threads
992 // XXX this can be optimized. There's got to be a better solution.
993 struct thread
*temp_thread
;
995 state
= int_disable_interrupts();
997 // we can safely walk the list because of the lock. no new threads can be created
998 // because of the PROC_STATE_DEATH flag on the process
999 for(temp_thread
= p
->thread_list
; temp_thread
; temp_thread
= temp_thread
->proc_next
) {
1000 thread_kill_thread_nowait(temp_thread
->id
);
1002 RELEASE_PROC_LOCK();
1003 int_restore_interrupts(state
);
1005 // Now wait for all of the threads to die
1006 // XXX block on a semaphore
1007 while((volatile int)p
->num_threads
> 0) {
1008 thread_snooze(10000); // 10 ms
1011 vm_delete_aspace(p
->aspace_id
);
1012 vfs_free_ioctx(p
->ioctx
);
1016 // delete the sem that others will use to wait on us and get the retcode
1018 sem_id s
= t
->return_code_sem
;
1020 t
->return_code_sem
= -1;
1021 sem_delete_etc(s
, retcode
);
1024 death_stack
= get_death_stack();
1026 struct thread_exit_args args
;
1029 args
.old_kernel_stack
= t
->kernel_stack_region_id
;
1030 args
.death_stack
= death_stack
;
1032 // disable the interrupts. Must remain disabled until the kernel stack pointer can be officially switched
1033 args
.int_state
= int_disable_interrupts();
1035 // set the new kernel stack officially to the death stack, wont be really switched until
1036 // the next function is called. This bookkeeping must be done now before a context switch
1037 // happens, or the processor will interrupt to the old stack
1038 t
->kernel_stack_region_id
= death_stacks
[death_stack
].rid
;
1039 t
->kernel_stack_base
= death_stacks
[death_stack
].address
;
1041 // we will continue in thread_exit2(), on the new stack
1042 arch_thread_switch_kstack_and_call(t
, t
->kernel_stack_base
+ KSTACK_SIZE
, thread_exit2
, &args
);
1045 panic("never can get here\n");
1048 static int _thread_kill_thread(thread_id id
, bool wait_on
)
1054 dprintf("_thread_kill_thread: id %d, wait_on %d\n", id
, wait_on
);
1056 state
= int_disable_interrupts();
1059 t
= thread_get_thread_struct_locked(id
);
1061 if(t
->proc
== kernel_proc
) {
1063 rc
= ERR_NOT_ALLOWED
;
1065 deliver_signal(t
, SIG_KILL
);
1067 if(t
->id
== CURR_THREAD
->id
)
1068 wait_on
= false; // can't wait on ourself
1071 rc
= ERR_INVALID_HANDLE
;
1074 RELEASE_THREAD_LOCK();
1075 int_restore_interrupts(state
);
1080 thread_wait_on_thread(id
, NULL
);
1085 int thread_kill_thread(thread_id id
)
1087 return _thread_kill_thread(id
, true);
1090 int thread_kill_thread_nowait(thread_id id
)
1092 return _thread_kill_thread(id
, false);
1095 static void thread_kthread_exit()
1100 int user_thread_wait_on_thread(thread_id id
, int *uretcode
)
1105 if((addr
)uretcode
>= KERNEL_BASE
&& (addr
)uretcode
<= KERNEL_TOP
)
1106 return ERR_VM_BAD_USER_MEMORY
;
1108 rc
= thread_wait_on_thread(id
, &retcode
);
1112 rc2
= user_memcpy(uretcode
, &retcode
, sizeof(retcode
));
1119 int thread_wait_on_thread(thread_id id
, int *retcode
)
1125 state
= int_disable_interrupts();
1128 t
= thread_get_thread_struct_locked(id
);
1130 sem
= t
->return_code_sem
;
1132 sem
= ERR_INVALID_HANDLE
;
1135 RELEASE_THREAD_LOCK();
1136 int_restore_interrupts(state
);
1138 return sem_acquire_etc(sem
, 1, 0, 0, retcode
);
1141 int user_proc_wait_on_proc(proc_id id
, int *uretcode
)
1146 if((addr
)uretcode
>= KERNEL_BASE
&& (addr
)uretcode
<= KERNEL_TOP
)
1147 return ERR_VM_BAD_USER_MEMORY
;
1149 rc
= proc_wait_on_proc(id
, &retcode
);
1153 rc2
= user_memcpy(uretcode
, &retcode
, sizeof(retcode
));
1160 int proc_wait_on_proc(proc_id id
, int *retcode
)
1166 state
= int_disable_interrupts();
1168 p
= proc_get_proc_struct_locked(id
);
1169 if(p
&& p
->main_thread
) {
1170 tid
= p
->main_thread
->id
;
1172 tid
= ERR_INVALID_HANDLE
;
1174 RELEASE_PROC_LOCK();
1175 int_restore_interrupts(state
);
1177 return thread_wait_on_thread(tid
, retcode
);
1180 thread_id
thread_get_current_thread_id()
1182 if(cur_thread
== NULL
)
1184 return CURR_THREAD
->id
;
1187 struct thread
*thread_get_current_thread()
1189 if(cur_thread
== NULL
)
1194 struct thread
*thread_get_thread_struct(thread_id id
)
1199 state
= int_disable_interrupts();
1202 t
= thread_get_thread_struct_locked(id
);
1204 RELEASE_THREAD_LOCK();
1205 int_restore_interrupts(state
);
1210 static struct thread
*thread_get_thread_struct_locked(thread_id id
)
1212 struct thread_key key
;
1216 return hash_lookup(thread_hash
, &key
);
1219 static struct proc
*proc_get_proc_struct(proc_id id
)
1224 state
= int_disable_interrupts();
1227 p
= proc_get_proc_struct_locked(id
);
1229 RELEASE_PROC_LOCK();
1230 int_restore_interrupts(state
);
1235 static struct proc
*proc_get_proc_struct_locked(proc_id id
)
1237 struct proc_key key
;
1241 return hash_lookup(proc_hash
, &key
);
1244 static void thread_context_switch(struct thread
*t_from
, struct thread
*t_to
)
1246 arch_thread_context_switch(t_from
, t_to
);
1249 #define NUM_TEST_THREADS 16
1250 /* thread TEST code */
1251 static sem_id thread_test_sems
[NUM_TEST_THREADS
];
1252 static thread_id thread_test_first_thid
;
1254 int test_thread_starter_thread()
1256 thread_snooze(1000000); // wait a second
1258 // start the chain of threads by releasing one of them
1259 sem_release(thread_test_sems
[0], 1);
1268 fd
= sys_open("/bus/pci", STREAM_TYPE_DEVICE
, 0);
1270 dprintf("test_thread5: error opening /bus/pci\n");
1274 sys_ioctl(fd
, 99, NULL
, 0);
1283 pid
= proc_create_proc("/boot/testapp", "testapp", 5);
1287 dprintf("test_thread4: finished created new process\n");
1289 thread_snooze(1000000);
1292 // proc_kill_proc(pid);
1303 kprintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
1304 fd
= sys_open("/boot/testfile", STREAM_TYPE_FILE
, 0);
1306 panic("could not open /boot/testfile\n");
1307 len
= sys_read(fd
, buf
, 0, sizeof(buf
));
1308 sys_write(0, buf
, 0, len
);
1320 len
= sys_read(0, str
, 0, sizeof(str
) - 1);
1322 dprintf("error reading from console!\n");
1325 if(len
> 1) dprintf("test_thread2: read %d bytes\n", len
);
1335 int tid
= thread_get_current_thread_id();
1340 y
= (tid
/ 80) * 2 ;
1346 kprintf_xy(0, tid
-1, "thread%d - %d - 0x%x 0x%x - cpu %d", tid
, a
, system_time(), smp_get_current_cpu());
1349 dprintf("thread%d - %d - %d %d - cpu %d\n", tid
, a
, system_time(), smp_get_current_cpu());
1352 kprintf("thread%d - %d - %d %d - cpu %d\n", tid
, a
, system_time(), smp_get_current_cpu());
1355 kprintf_xy(x
, y
, "%c", c
++);
1358 kprintf_xy(x
, y
+1, "%d", smp_get_current_cpu());
1361 thread_snooze(10000 * tid
);
1364 sem_acquire(thread_test_sems
[tid
- thread_test_first_thid
], 1);
1366 // release the next semaphore
1368 sem_id sem_to_release
;
1370 sem_to_release
= tid
- thread_test_first_thid
+ 1;
1371 if(sem_to_release
>= NUM_TEST_THREADS
)
1373 sem_to_release
= thread_test_sems
[sem_to_release
];
1374 sem_release(sem_to_release
, 1);
1378 switch(tid
- thread_test_first_thid
) {
1381 sem_release(thread_test_sem
, _rand() % 16 + 1);
1384 sem_acquire(thread_test_sem
, 1);
1389 kprintf("thread %d exiting\n", tid
);
1399 dprintf("panic thread starting\n");
1401 thread_snooze(10000000);
1413 for(i
=0; i
<NUM_TEST_THREADS
; i
++) {
1414 sprintf(temp
, "test_thread%d", i
);
1415 tid
= thread_create_kernel_thread(temp
, &test_thread
, 5);
1416 thread_resume_thread(tid
);
1418 thread_test_first_thid
= tid
;
1420 sprintf(temp
, "test sem %d", i
);
1421 thread_test_sems
[i
] = sem_create(0, temp
);
1423 tid
= thread_create_kernel_thread("test starter thread", &test_thread_starter_thread
, THREAD_MAX_PRIORITY
);
1424 thread_resume_thread(tid
);
1427 tid
= thread_create_kernel_thread("test thread 2", &test_thread2
, 5);
1428 thread_resume_thread(tid
);
1431 tid
= thread_create_kernel_thread("test thread 3", &test_thread3
, 5);
1432 thread_resume_thread(tid
);
1435 tid
= thread_create_kernel_thread("test thread 4", &test_thread4
, 5);
1436 thread_resume_thread(tid
);
1439 tid
= thread_create_kernel_thread("test thread 5", &test_thread5
, 5);
1440 thread_resume_thread(tid
);
1443 tid
= thread_create_kernel_thread("panic thread", &panic_thread
, THREAD_MAX_PRIORITY
);
1444 thread_resume_thread(tid
);
1446 dprintf("thread_test: done creating test threads\n");
1453 static int next
= 0;
1456 next
= system_time();
1458 next
= next
* 1103515245 + 12345;
1459 return((next
>> 16) & 0x7FFF);
1462 static int reschedule_event(void *unused
)
1464 // this function is called as a result of the timer event set by the scheduler
1465 // returning this causes a reschedule on the timer event
1466 return INT_RESCHEDULE
;
1469 // NOTE: expects thread_spinlock to be held
1470 void thread_resched()
1472 struct thread
*next_thread
= NULL
;
1473 int last_thread_pri
= -1;
1474 struct thread
*old_thread
= CURR_THREAD
;
1478 // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), CURR_THREAD);
1480 switch(old_thread
->next_state
) {
1481 case THREAD_STATE_RUNNING
:
1482 case THREAD_STATE_READY
:
1483 // dprintf("enqueueing thread 0x%x into run q. pri = %d\n", old_thread, old_thread->priority);
1484 thread_enqueue_run_q(old_thread
);
1486 case THREAD_STATE_SUSPENDED
:
1487 dprintf("suspending thread 0x%x\n", old_thread
->id
);
1489 case THREAD_STATE_FREE_ON_RESCHED
:
1490 thread_enqueue(old_thread
, &dead_q
);
1493 // dprintf("not enqueueing thread 0x%x into run q. next_state = %d\n", old_thread, old_thread->next_state);
1496 old_thread
->state
= old_thread
->next_state
;
1498 for(i
= THREAD_MAX_PRIORITY
; i
> THREAD_IDLE_PRIORITY
; i
--) {
1499 next_thread
= thread_lookat_run_q(i
);
1500 if(next_thread
!= NULL
) {
1501 // skip it sometimes
1502 if(_rand() > 0x3000) {
1503 next_thread
= thread_dequeue_run_q(i
);
1506 last_thread_pri
= i
;
1510 if(next_thread
== NULL
) {
1511 if(last_thread_pri
!= -1) {
1512 next_thread
= thread_dequeue_run_q(last_thread_pri
);
1513 if(next_thread
== NULL
)
1514 panic("next_thread == NULL! last_thread_pri = %d\n", last_thread_pri
);
1516 next_thread
= thread_dequeue_run_q(THREAD_IDLE_PRIORITY
);
1517 if(next_thread
== NULL
)
1518 panic("next_thread == NULL! no idle priorities!\n", last_thread_pri
);
1522 next_thread
->state
= THREAD_STATE_RUNNING
;
1523 next_thread
->next_state
= THREAD_STATE_READY
;
1525 // XXX calculate quantum
1528 timer_cancel_event(&LOCAL_CPU_TIMER
);
1529 timer_setup_timer(&reschedule_event
, NULL
, &LOCAL_CPU_TIMER
);
1530 timer_set_event(quantum
, TIMER_MODE_ONESHOT
, &LOCAL_CPU_TIMER
);
1532 if(next_thread
!= old_thread
) {
1533 // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",
1534 // smp_get_current_cpu(), old_thread->id, next_thread->id);
1535 CURR_THREAD
= next_thread
;
1536 thread_context_switch(old_thread
, next_thread
);
1540 static int proc_struct_compare(void *_p
, void *_key
)
1542 struct proc
*p
= _p
;
1543 struct proc_key
*key
= _key
;
1545 if(p
->id
== key
->id
) return 0;
1549 static unsigned int proc_struct_hash(void *_p
, void *_key
, int range
)
1551 struct proc
*p
= _p
;
1552 struct proc_key
*key
= _key
;
1555 return (p
->id
% range
);
1557 return (key
->id
% range
);
1560 struct proc
*proc_get_kernel_proc()
1565 proc_id
proc_get_current_proc_id()
1567 return CURR_THREAD
->proc
->id
;
1570 static struct proc
*create_proc_struct(const char *name
, bool kernel
)
1574 p
= (struct proc
*)kmalloc(sizeof(struct proc
));
1577 p
->id
= atomic_add(&next_proc_id
, 1);
1578 p
->name
= (char *)kmalloc(strlen(name
)+1);
1581 strcpy(p
->name
, name
);
1586 p
->kaspace
= vm_get_kernel_aspace();
1587 p
->thread_list
= NULL
;
1588 p
->main_thread
= NULL
;
1589 p
->state
= PROC_STATE_BIRTH
;
1590 p
->pending_signals
= SIG_NONE
;
1591 p
->proc_creation_sem
= sem_create(0, "proc_creation_sem");
1592 if(p
->proc_creation_sem
< 0)
1594 if(arch_proc_init_proc_struct(p
, kernel
) < 0)
1600 sem_delete(p
->proc_creation_sem
);
1609 static int proc_create_proc2(void)
1616 char ustack_name
[128];
1618 t
= thread_get_current_thread();
1621 dprintf("proc_create_proc2: entry thread %d\n", t
->id
);
1623 // create an initial primary stack region
1624 t
->user_stack_base
= ((USER_STACK_REGION
- STACK_SIZE
) + USER_STACK_REGION_SIZE
);
1625 sprintf(ustack_name
, "%s_primary_stack", p
->name
);
1626 t
->user_stack_region_id
= vm_create_anonymous_region(p
->aspace_id
, ustack_name
, (void **)&t
->user_stack_base
,
1627 REGION_ADDR_EXACT_ADDRESS
, STACK_SIZE
, REGION_WIRING_LAZY
, LOCK_RW
);
1628 if(t
->user_stack_region_id
< 0) {
1629 panic("proc_create_proc2: could not create default user stack region\n");
1630 sem_delete_etc(p
->proc_creation_sem
, -1);
1631 return t
->user_stack_region_id
;
1635 dprintf("proc_create_proc2: loading elf binary '%s'\n", path
);
1637 err
= elf_load_uspace(path
, p
, 0, &entry
);
1639 // XXX clean up proc
1640 sem_delete_etc(p
->proc_creation_sem
, -1);
1644 dprintf("proc_create_proc2: loaded elf. entry = 0x%x\n", entry
);
1646 p
->state
= PROC_STATE_NORMAL
;
1648 // this will wake up the thread that initially created us, with the process id
1649 // as the return code
1650 sem_delete_etc(p
->proc_creation_sem
, p
->id
);
1651 p
->proc_creation_sem
= 0;
1653 // jump to the entry point in user space
1654 arch_thread_enter_uspace(entry
, t
->user_stack_base
+ STACK_SIZE
);
1660 proc_id
user_proc_create_proc(const char *upath
, const char *uname
, int priority
)
1662 char path
[SYS_MAX_PATH_LEN
];
1663 char name
[SYS_MAX_OS_NAME_LEN
];
1666 if((addr
)upath
>= KERNEL_BASE
&& (addr
)upath
<= KERNEL_TOP
)
1667 return ERR_VM_BAD_USER_MEMORY
;
1668 if((addr
)uname
>= KERNEL_BASE
&& (addr
)uname
<= KERNEL_TOP
)
1669 return ERR_VM_BAD_USER_MEMORY
;
1671 rc
= user_strncpy(path
, upath
, SYS_MAX_PATH_LEN
-1);
1674 path
[SYS_MAX_PATH_LEN
-1] = 0;
1676 rc
= user_strncpy(name
, uname
, SYS_MAX_OS_NAME_LEN
-1);
1679 name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
1681 return proc_create_proc(path
, name
, priority
);
1684 proc_id
proc_create_proc(const char *path
, const char *name
, int priority
)
1692 dprintf("proc_create_proc: entry '%s', name '%s'\n", path
, name
);
1694 p
= create_proc_struct(name
, false);
1696 return ERR_NO_MEMORY
;
1698 state
= int_disable_interrupts();
1700 hash_insert(proc_hash
, p
);
1701 RELEASE_PROC_LOCK();
1702 int_restore_interrupts(state
);
1704 // create a kernel thread, but under the context of the new process
1705 tid
= thread_create_kernel_thread_etc(name
, proc_create_proc2
, priority
, p
);
1707 // XXX clean up proc
1711 // copy the args over
1712 p
->args
= kmalloc(strlen(path
) + 1);
1713 if(p
->args
== NULL
) {
1714 // XXX clean up proc
1715 return ERR_NO_MEMORY
;
1717 strcpy(p
->args
, path
);
1719 // create a new ioctx for this process
1720 p
->ioctx
= vfs_new_ioctx();
1721 if(p
->ioctx
== NULL
) {
1722 // XXX clean up proc
1723 panic("proc_create_proc: could not create new ioctx\n");
1724 return ERR_NO_MEMORY
;
1727 // create an address space for this process
1728 p
->aspace_id
= vm_create_aspace(p
->name
, USER_BASE
, USER_SIZE
, false);
1729 if(p
->aspace_id
< 0) {
1730 // XXX clean up proc
1731 panic("proc_create_proc: could not create user address space\n");
1732 return p
->aspace_id
;
1734 p
->aspace
= vm_get_aspace_from_id(p
->aspace_id
);
1736 thread_resume_thread(tid
);
1738 // XXX race condition
1739 // acquire this semaphore, which will exist throughout the creation of the process
1740 // by the new thread in the new process. At the end of creation, the semaphore will
1741 // be deleted, with the return code being the process id, or an error.
1742 sem_acquire_etc(p
->proc_creation_sem
, 1, 0, 0, &sem_retcode
);
1744 // this will either contain the process id, or an error code
1748 int proc_kill_proc(proc_id id
)
1756 state
= int_disable_interrupts();
1759 p
= proc_get_proc_struct_locked(id
);
1761 tid
= p
->main_thread
->id
;
1763 retval
= ERR_INVALID_HANDLE
;
1766 RELEASE_PROC_LOCK();
1767 int_restore_interrupts(state
);
1771 // just kill the main thread in the process. The cleanup code there will
1772 // take care of the process
1773 return thread_kill_thread(tid
);
1775 // now suspend all of the threads in this process. It's safe to walk this process's
1776 // thread list without the lock held because the state of this process is now DEATH
1777 // so all of the operations that involve changing the thread list are blocked
1778 // also, it's ok to 'suspend' this thread, if we belong to this process, since we're
1779 // in the kernel now, we won't be suspended until we leave the kernel. By then,
1780 // we will have passed the kill signal to this thread.
1781 for(t
= p
->thread_list
; t
; t
= t
->proc_next
) {
1782 thread_suspend_thread(t
->id
);
1785 // XXX cycle through the list of threads again, killing each thread.
1786 // Note: this wont kill the current thread, if it's one of them, since we're in the
1788 // If we actually kill the last thread and not just deliver a signal to it, remove
1789 // the process along with it, otherwise the last thread that belongs to the process
1790 // will clean it up when it dies.
1797 // sets the pending signal flag on a thread and possibly does some work to wake it up, etc.
1798 // expects the thread lock to be held
1799 static void deliver_signal(struct thread
*t
, int signal
)
1801 dprintf("deliver_signal: thread 0x%x (%d), signal %d\n", t
, t
->id
, signal
);
1804 t
->pending_signals
|= SIG_KILL
;
1806 case THREAD_STATE_SUSPENDED
:
1807 t
->state
= THREAD_STATE_READY
;
1808 t
->next_state
= THREAD_STATE_READY
;
1810 thread_enqueue_run_q(t
);
1812 case THREAD_STATE_WAITING
:
1813 sem_interrupt_thread(t
);
1820 t
->pending_signals
|= signal
;
1824 // expects the thread lock to be held
1825 static void _check_for_thread_sigs(struct thread
*t
, int state
)
1827 if(t
->pending_signals
== SIG_NONE
)
1830 if(t
->pending_signals
& SIG_KILL
) {
1831 t
->pending_signals
&= ~SIG_KILL
;
1833 RELEASE_THREAD_LOCK();
1834 int_restore_interrupts(state
);
1836 // never gets to here
1838 if(t
->pending_signals
& SIG_SUSPEND
) {
1839 t
->pending_signals
&= ~SIG_SUSPEND
;
1840 t
->next_state
= THREAD_STATE_SUSPENDED
;
1841 // XXX will probably want to delay this
1846 // called in the int handler code when a thread enters the kernel for any reason
1847 void thread_atkernel_entry()
1850 struct thread
*t
= CURR_THREAD
;
1852 // dprintf("thread_atkernel_entry: entry thread 0x%x\n", t->id);
1854 state
= int_disable_interrupts();
1857 t
->in_kernel
= true;
1859 _check_for_thread_sigs(t
, state
);
1861 RELEASE_THREAD_LOCK();
1862 int_restore_interrupts(state
);
1865 // called when a thread exits kernel space to user space
1866 void thread_atkernel_exit()
1869 struct thread
*t
= CURR_THREAD
;
1871 // dprintf("thread_atkernel_exit: entry\n");
1873 state
= int_disable_interrupts();
1876 t
->in_kernel
= false;
1878 _check_for_thread_sigs(t
, state
);
1880 RELEASE_THREAD_LOCK();
1881 int_restore_interrupts(state
);