2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
6 #include <kernel/debug.h>
7 #include <kernel/console.h>
8 #include <kernel/thread.h>
9 #include <kernel/arch/thread.h>
10 #include <kernel/khash.h>
11 #include <kernel/int.h>
12 #include <kernel/smp.h>
13 #include <kernel/timer.h>
14 #include <kernel/cpu.h>
15 #include <kernel/arch/cpu.h>
16 #include <kernel/arch/int.h>
17 #include <kernel/arch/vm.h>
18 #include <kernel/sem.h>
19 #include <kernel/port.h>
20 #include <kernel/vfs.h>
21 #include <kernel/elf.h>
22 #include <kernel/heap.h>
23 #include <newos/user_runtime.h>
24 #include <newos/errors.h>
25 #include <boot/stage2.h>
29 #include <sys/resource.h>
45 static struct proc
*create_proc_struct(const char *name
, bool kernel
);
46 static int proc_struct_compare(void *_p
, const void *_key
);
47 static unsigned int proc_struct_hash(void *_p
, const void *_key
, unsigned int range
);
50 spinlock_t thread_spinlock
= 0;
53 static void *proc_hash
= NULL
;
54 static struct proc
*kernel_proc
= NULL
;
55 static proc_id next_proc_id
= 1;
56 static spinlock_t proc_spinlock
= 0;
57 // NOTE: PROC lock can be held over a THREAD lock acquisition,
58 // but not the other way (to avoid deadlock)
59 #define GRAB_PROC_LOCK() acquire_spinlock(&proc_spinlock)
60 #define RELEASE_PROC_LOCK() release_spinlock(&proc_spinlock)
63 static struct thread
*idle_threads
[MAX_BOOT_CPUS
];
64 static void *thread_hash
= NULL
;
65 static thread_id next_thread_id
= 1;
67 static sem_id snooze_sem
= -1;
70 // used temporarily as a thread cleans itself up
76 static struct death_stack
*death_stacks
;
77 static unsigned int num_death_stacks
;
78 static unsigned int volatile death_stack_bitmap
;
79 static sem_id death_stack_sem
;
82 static struct thread_queue run_q
[THREAD_NUM_PRIORITY_LEVELS
] = { { NULL
, NULL
}, };
83 static struct thread_queue dead_q
;
85 static int _rand(void);
86 static void thread_entry(void);
87 static struct thread
*thread_get_thread_struct_locked(thread_id id
);
88 static struct proc
*proc_get_proc_struct(proc_id id
);
89 static struct proc
*proc_get_proc_struct_locked(proc_id id
);
90 static void thread_kthread_exit(void);
91 static void deliver_signal(struct thread
*t
, int signal
);
93 // insert a thread onto the tail of a queue
94 void thread_enqueue(struct thread
*t
, struct thread_queue
*q
)
106 struct thread
*thread_lookat_queue(struct thread_queue
*q
)
111 struct thread
*thread_dequeue(struct thread_queue
*q
)
124 struct thread
*thread_dequeue_id(struct thread_queue
*q
, thread_id thr_id
)
127 struct thread
*last
= NULL
;
131 if(t
->id
== thr_id
) {
135 last
->q_next
= t
->q_next
;
147 struct thread
*thread_lookat_run_q(int priority
)
149 return thread_lookat_queue(&run_q
[priority
]);
152 void thread_enqueue_run_q(struct thread
*t
)
154 // these shouldn't exist
155 if(t
->priority
> THREAD_MAX_PRIORITY
)
156 t
->priority
= THREAD_MAX_PRIORITY
;
160 thread_enqueue(t
, &run_q
[t
->priority
]);
163 struct thread
*thread_dequeue_run_q(int priority
)
165 return thread_dequeue(&run_q
[priority
]);
168 static void insert_thread_into_proc(struct proc
*p
, struct thread
*t
)
170 t
->proc_next
= p
->thread_list
;
173 if(p
->num_threads
== 1) {
174 // this was the first thread
180 static void remove_thread_from_proc(struct proc
*p
, struct thread
*t
)
182 struct thread
*temp
, *last
= NULL
;
184 for(temp
= p
->thread_list
; temp
!= NULL
; temp
= temp
->proc_next
) {
187 p
->thread_list
= temp
->proc_next
;
189 last
->proc_next
= temp
->proc_next
;
198 static int thread_struct_compare(void *_t
, const void *_key
)
200 struct thread
*t
= _t
;
201 const struct thread_key
*key
= _key
;
203 if(t
->id
== key
->id
) return 0;
207 // Frees the argument list
209 // args argument list.
210 // args number of arguments
212 static void free_arg_list(char **args
, int argc
)
217 for(cnt
= 0; cnt
< argc
; cnt
++){
225 // Copy argument list from userspace to kernel space
227 // args userspace parameters
228 // argc number of parameters
229 // kargs usespace parameters
230 // return < 0 on error and **kargs = NULL
232 static int user_copy_arg_list(char **args
, int argc
, char ***kargs
)
238 char buf
[SYS_THREAD_ARG_LENGTH_MAX
];
242 if((addr
)args
>= KERNEL_BASE
&& (addr
)args
<= KERNEL_TOP
)
243 return ERR_VM_BAD_USER_MEMORY
;
245 largs
= kmalloc((argc
+ 1) * sizeof(char *));
247 return ERR_NO_MEMORY
;
250 // scan all parameters and copy to kernel space
252 for(cnt
= 0; cnt
< argc
; cnt
++) {
253 err
= user_memcpy(&source
, &(args
[cnt
]), sizeof(char *));
257 if((addr
)source
>= KERNEL_BASE
&& (addr
)source
<= KERNEL_TOP
){
258 err
= ERR_VM_BAD_USER_MEMORY
;
262 err
= user_strncpy(buf
,source
, SYS_THREAD_ARG_LENGTH_MAX
- 1);
265 buf
[SYS_THREAD_ARG_LENGTH_MAX
- 1] = 0;
267 largs
[cnt
] = kstrdup(buf
);
268 if(largs
[cnt
] == NULL
){
280 free_arg_list(largs
,cnt
);
281 dprintf("user_copy_arg_list failed %d \n",err
);
285 static unsigned int thread_struct_hash(void *_t
, const void *_key
, unsigned int range
)
287 struct thread
*t
= _t
;
288 const struct thread_key
*key
= _key
;
291 return (t
->id
% range
);
293 return (key
->id
% range
);
296 static struct thread
*create_thread_struct(const char *name
)
301 state
= int_disable_interrupts();
303 t
= thread_dequeue(&dead_q
);
304 RELEASE_THREAD_LOCK();
305 int_restore_interrupts(state
);
308 t
= (struct thread
*)kmalloc(sizeof(struct thread
));
313 strncpy(&t
->name
[0], name
, SYS_MAX_OS_NAME_LEN
-1);
314 t
->name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
316 t
->id
= atomic_add(&next_thread_id
, 1);
319 t
->sem_blocking
= -1;
320 t
->fault_handler
= 0;
321 t
->kernel_stack_region_id
= -1;
322 t
->kernel_stack_base
= 0;
323 t
->user_stack_region_id
= -1;
324 t
->user_stack_base
= 0;
329 t
->pending_signals
= SIG_NONE
;
337 sprintf(temp
, "thread_0x%x_retcode_sem", t
->id
);
338 t
->return_code_sem
= sem_create(0, temp
);
339 if(t
->return_code_sem
< 0)
343 if(arch_thread_init_thread_struct(t
) < 0)
349 sem_delete_etc(t
->return_code_sem
, -1);
356 static void delete_thread_struct(struct thread
*t
)
358 if(t
->return_code_sem
>= 0)
359 sem_delete_etc(t
->return_code_sem
, -1);
363 static int _create_user_thread_kentry(void)
367 t
= thread_get_current_thread();
369 // a signal may have been delivered here
370 thread_atkernel_exit();
372 // jump to the entry point in user space
373 arch_thread_enter_uspace((addr
)t
->entry
, t
->args
, t
->user_stack_base
+ STACK_SIZE
);
379 static int _create_kernel_thread_kentry(void)
381 int (*func
)(void *args
);
384 t
= thread_get_current_thread();
386 // call the entry function with the appropriate args
387 func
= (void *)t
->entry
;
389 return func(t
->args
);
392 static thread_id
_create_thread(const char *name
, proc_id pid
, addr entry
, void *args
, bool kernel
)
400 t
= create_thread_struct(name
);
402 return ERR_NO_MEMORY
;
404 t
->priority
= THREAD_MEDIUM_PRIORITY
;
405 t
->state
= THREAD_STATE_BIRTH
;
406 t
->next_state
= THREAD_STATE_SUSPENDED
;
408 state
= int_disable_interrupts();
411 // insert into global list
412 hash_insert(thread_hash
, t
);
413 RELEASE_THREAD_LOCK();
416 // look at the proc, make sure it's not being deleted
417 p
= proc_get_proc_struct_locked(pid
);
418 if(p
!= NULL
&& p
->state
!= PROC_STATE_DEATH
) {
419 insert_thread_into_proc(p
, t
);
426 hash_remove(thread_hash
, t
);
427 RELEASE_THREAD_LOCK();
429 int_restore_interrupts(state
);
431 delete_thread_struct(t
);
432 return ERR_TASK_PROC_DELETED
;
435 sprintf(stack_name
, "%s_kstack", name
);
436 t
->kernel_stack_region_id
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), stack_name
,
437 (void **)&t
->kernel_stack_base
, REGION_ADDR_ANY_ADDRESS
, KSTACK_SIZE
,
438 REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
439 if(t
->kernel_stack_region_id
< 0)
440 panic("_create_thread: error creating kernel stack!\n");
446 // this sets up an initial kthread stack that runs the entry
447 arch_thread_initialize_kthread_stack(t
, &_create_kernel_thread_kentry
, &thread_entry
, &thread_kthread_exit
);
450 // XXX make this better. For now just keep trying to create a stack
451 // until we find a spot.
452 t
->user_stack_base
= (USER_STACK_REGION
- STACK_SIZE
) + USER_STACK_REGION_SIZE
;
453 while(t
->user_stack_base
> USER_STACK_REGION
) {
454 sprintf(stack_name
, "%s_stack%d", p
->name
, t
->id
);
455 t
->user_stack_region_id
= vm_create_anonymous_region(p
->aspace_id
, stack_name
,
456 (void **)&t
->user_stack_base
,
457 REGION_ADDR_ANY_ADDRESS
, STACK_SIZE
, REGION_WIRING_LAZY
, LOCK_RW
);
458 if(t
->user_stack_region_id
< 0) {
459 t
->user_stack_base
-= STACK_SIZE
;
461 // we created a region
465 if(t
->user_stack_region_id
< 0)
466 panic("_create_thread: unable to create user stack!\n");
468 // copy the user entry over to the args field in the thread struct
469 // the function this will call will immediately switch the thread into
471 arch_thread_initialize_kthread_stack(t
, &_create_user_thread_kentry
, &thread_entry
, &thread_kthread_exit
);
474 t
->state
= THREAD_STATE_SUSPENDED
;
479 thread_id
user_thread_create_user_thread(char *uname
, proc_id pid
, addr entry
, void *args
)
481 char name
[SYS_MAX_OS_NAME_LEN
];
484 if((addr
)uname
>= KERNEL_BASE
&& (addr
)uname
<= KERNEL_TOP
)
485 return ERR_VM_BAD_USER_MEMORY
;
486 if(entry
>= KERNEL_BASE
&& entry
<= KERNEL_TOP
)
487 return ERR_VM_BAD_USER_MEMORY
;
489 rc
= user_strncpy(name
, uname
, SYS_MAX_OS_NAME_LEN
-1);
492 name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
494 return thread_create_user_thread(name
, pid
, entry
, args
);
497 thread_id
thread_create_user_thread(char *name
, proc_id pid
, addr entry
, void *args
)
499 return _create_thread(name
, pid
, entry
, args
, false);
502 thread_id
thread_create_kernel_thread(const char *name
, int (*func
)(void *), void *args
)
504 return _create_thread(name
, proc_get_kernel_proc()->id
, (addr
)func
, args
, true);
507 static thread_id
thread_create_kernel_thread_etc(const char *name
, int (*func
)(void *), void *args
, struct proc
*p
)
509 return _create_thread(name
, p
->id
, (addr
)func
, args
, true);
512 int thread_suspend_thread(thread_id id
)
517 bool global_resched
= false;
519 state
= int_disable_interrupts();
522 t
= thread_get_current_thread();
524 t
= thread_get_thread_struct_locked(id
);
528 if(t
->proc
== kernel_proc
) {
530 retval
= ERR_NOT_ALLOWED
;
531 } else if(t
->in_kernel
== true) {
532 t
->pending_signals
|= SIG_SUSPEND
;
535 t
->next_state
= THREAD_STATE_SUSPENDED
;
536 global_resched
= true;
540 retval
= ERR_INVALID_HANDLE
;
543 RELEASE_THREAD_LOCK();
544 int_restore_interrupts(state
);
547 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_SYNC
);
553 int thread_resume_thread(thread_id id
)
559 state
= int_disable_interrupts();
562 t
= thread_get_thread_struct_locked(id
);
563 if(t
!= NULL
&& t
->state
== THREAD_STATE_SUSPENDED
) {
564 t
->state
= THREAD_STATE_READY
;
565 t
->next_state
= THREAD_STATE_READY
;
567 thread_enqueue_run_q(t
);
570 retval
= ERR_INVALID_HANDLE
;
573 RELEASE_THREAD_LOCK();
574 int_restore_interrupts(state
);
579 int thread_set_priority(thread_id id
, int priority
)
584 // make sure the passed in priority is within bounds
585 if(priority
> THREAD_MAX_PRIORITY
)
586 priority
= THREAD_MAX_PRIORITY
;
587 if(priority
< THREAD_MIN_PRIORITY
)
588 priority
= THREAD_MIN_PRIORITY
;
590 t
= thread_get_current_thread();
592 // it's ourself, so we know we aren't in a run queue, and we can manipulate
593 // our structure directly
594 t
->priority
= priority
;
597 int state
= int_disable_interrupts();
600 t
= thread_get_thread_struct_locked(id
);
602 if(t
->state
== THREAD_STATE_READY
&& t
->priority
!= priority
) {
603 // this thread is in a ready queue right now, so it needs to be reinserted
604 thread_dequeue_id(&run_q
[t
->priority
], t
->id
);
605 t
->priority
= priority
;
606 thread_enqueue_run_q(t
);
608 t
->priority
= priority
;
612 retval
= ERR_INVALID_HANDLE
;
615 RELEASE_THREAD_LOCK();
616 int_restore_interrupts(state
);
622 int thread_get_thread_info(thread_id id
, struct thread_info
*outinfo
)
626 struct thread_info info
;
629 state
= int_disable_interrupts();
632 t
= thread_get_thread_struct_locked(id
);
634 err
= ERR_INVALID_HANDLE
;
638 /* found the thread, copy the data out */
640 info
.owner_proc_id
= t
->proc
->id
;
641 strncpy(info
.name
, t
->name
, SYS_MAX_OS_NAME_LEN
-1);
642 info
.name
[SYS_MAX_OS_NAME_LEN
-1] = '\0';
643 info
.state
= t
->state
;
644 info
.user_stack_base
= t
->user_stack_base
;
645 info
.user_time
= t
->user_time
;
646 info
.kernel_time
= t
->kernel_time
;
651 RELEASE_THREAD_LOCK();
652 int_restore_interrupts(state
);
655 memcpy(outinfo
, &info
, sizeof(info
));
660 int user_thread_get_thread_info(thread_id id
, struct thread_info
*uinfo
)
662 struct thread_info info
;
665 if((addr
)uinfo
>= KERNEL_BASE
&& (addr
)uinfo
<= KERNEL_TOP
) {
666 return ERR_VM_BAD_USER_MEMORY
;
669 err
= thread_get_thread_info(id
, &info
);
673 err2
= user_memcpy(uinfo
, &info
, sizeof(info
));
680 int thread_get_next_thread_info(uint32
*_cookie
, proc_id pid
, struct thread_info
*outinfo
)
685 struct thread_info info
;
690 cookie
= (thread_id
)*_cookie
;
692 state
= int_disable_interrupts();
695 p
= proc_get_proc_struct_locked(pid
);
697 err
= ERR_INVALID_HANDLE
;
701 /* find the next thread in the list of threads in the proc structure */
705 for(t
= p
->thread_list
; t
; t
= t
->proc_next
) {
706 if(t
->id
== cookie
) {
707 /* we found what the last search got us, walk one past the last search */
719 /* found the thread, copy the data out */
721 info
.owner_proc_id
= t
->proc
->id
;
722 strncpy(info
.name
, t
->name
, SYS_MAX_OS_NAME_LEN
-1);
723 info
.name
[SYS_MAX_OS_NAME_LEN
-1] = '\0';
724 info
.state
= t
->state
;
725 info
.user_stack_base
= t
->user_stack_base
;
726 info
.user_time
= t
->user_time
;
727 info
.kernel_time
= t
->kernel_time
;
731 *_cookie
= (uint32
)t
->id
;
735 int_restore_interrupts(state
);
738 memcpy(outinfo
, &info
, sizeof(info
));
743 int user_thread_get_next_thread_info(uint32
*ucookie
, proc_id pid
, struct thread_info
*uinfo
)
745 struct thread_info info
;
749 if((addr
)ucookie
>= KERNEL_BASE
&& (addr
)ucookie
<= KERNEL_TOP
) {
750 return ERR_VM_BAD_USER_MEMORY
;
753 if((addr
)uinfo
>= KERNEL_BASE
&& (addr
)uinfo
<= KERNEL_TOP
) {
754 return ERR_VM_BAD_USER_MEMORY
;
757 err2
= user_memcpy(&cookie
, ucookie
, sizeof(cookie
));
761 err
= thread_get_next_thread_info(&cookie
, pid
, &info
);
765 err2
= user_memcpy(uinfo
, &info
, sizeof(info
));
769 err2
= user_memcpy(ucookie
, &cookie
, sizeof(cookie
));
777 static void _dump_proc_info(struct proc
*p
)
779 dprintf("PROC: %p\n", p
);
780 dprintf("id: 0x%x\n", p
->id
);
781 dprintf("name: '%s'\n", p
->name
);
782 dprintf("next: %p\n", p
->next
);
783 dprintf("num_threads: %d\n", p
->num_threads
);
784 dprintf("state: %d\n", p
->state
);
785 dprintf("pending_signals: 0x%x\n", p
->pending_signals
);
786 dprintf("ioctx: %p\n", p
->ioctx
);
787 dprintf("aspace_id: 0x%x\n", p
->aspace_id
);
788 dprintf("aspace: %p\n", p
->aspace
);
789 dprintf("kaspace: %p\n", p
->kaspace
);
790 dprintf("main_thread: %p\n", p
->main_thread
);
791 dprintf("thread_list: %p\n", p
->thread_list
);
794 static void dump_proc_info(int argc
, char **argv
)
799 struct hash_iterator i
;
802 dprintf("proc: not enough arguments\n");
806 // if the argument looks like a hex number, treat it as such
807 if(strlen(argv
[1]) > 2 && argv
[1][0] == '0' && argv
[1][1] == 'x') {
808 num
= atoul(argv
[1]);
809 if(num
> vm_get_kernel_aspace()->virtual_map
.base
) {
811 _dump_proc_info((struct proc
*)num
);
818 // walk through the thread list, trying to match name or id
819 hash_open(proc_hash
, &i
);
820 while((p
= hash_next(proc_hash
, &i
)) != NULL
) {
821 if((p
->name
&& strcmp(argv
[1], p
->name
) == 0) || p
->id
== id
) {
826 hash_close(proc_hash
, &i
, false);
830 static const char *state_to_text(int state
)
833 case THREAD_STATE_READY
:
835 case THREAD_STATE_RUNNING
:
837 case THREAD_STATE_WAITING
:
839 case THREAD_STATE_SUSPENDED
:
841 case THREAD_STATE_FREE_ON_RESCHED
:
843 case THREAD_STATE_BIRTH
:
850 static struct thread
*last_thread_dumped
= NULL
;
852 static void _dump_thread_info(struct thread
*t
)
854 dprintf("THREAD: %p\n", t
);
855 dprintf("id: 0x%x\n", t
->id
);
856 dprintf("name: '%s'\n", t
->name
);
857 dprintf("all_next: %p\nproc_next: %p\nq_next: %p\n",
858 t
->all_next
, t
->proc_next
, t
->q_next
);
859 dprintf("priority: 0x%x\n", t
->priority
);
860 dprintf("state: %s\n", state_to_text(t
->state
));
861 dprintf("next_state: %s\n", state_to_text(t
->next_state
));
862 dprintf("cpu: %p ", t
->cpu
);
864 dprintf("(%d)\n", t
->cpu
->info
.cpu_num
);
867 dprintf("pending_signals: 0x%x\n", t
->pending_signals
);
868 dprintf("in_kernel: %d\n", t
->in_kernel
);
869 dprintf("sem_blocking:0x%x\n", t
->sem_blocking
);
870 dprintf("sem_count: 0x%x\n", t
->sem_count
);
871 dprintf("sem_deleted_retcode: 0x%x\n", t
->sem_deleted_retcode
);
872 dprintf("sem_errcode: 0x%x\n", t
->sem_errcode
);
873 dprintf("sem_flags: 0x%x\n", t
->sem_flags
);
874 dprintf("fault_handler: 0x%lx\n", t
->fault_handler
);
875 dprintf("args: %p\n", t
->args
);
876 dprintf("entry: 0x%lx\n", t
->entry
);
877 dprintf("proc: %p\n", t
->proc
);
878 dprintf("return_code_sem: 0x%x\n", t
->return_code_sem
);
879 dprintf("kernel_stack_region_id: 0x%x\n", t
->kernel_stack_region_id
);
880 dprintf("kernel_stack_base: 0x%lx\n", t
->kernel_stack_base
);
881 dprintf("user_stack_region_id: 0x%x\n", t
->user_stack_region_id
);
882 dprintf("user_stack_base: 0x%lx\n", t
->user_stack_base
);
883 dprintf("kernel_time: %Ld\n", t
->kernel_time
);
884 dprintf("user_time: %Ld\n", t
->user_time
);
885 dprintf("architecture dependant section:\n");
886 arch_thread_dump_info(&t
->arch_info
);
888 last_thread_dumped
= t
;
891 static void dump_thread_info(int argc
, char **argv
)
896 struct hash_iterator i
;
899 dprintf("thread: not enough arguments\n");
903 // if the argument looks like a hex number, treat it as such
904 if(strlen(argv
[1]) > 2 && argv
[1][0] == '0' && argv
[1][1] == 'x') {
905 num
= atoul(argv
[1]);
906 if(num
> vm_get_kernel_aspace()->virtual_map
.base
) {
908 _dump_thread_info((struct thread
*)num
);
915 // walk through the thread list, trying to match name or id
916 hash_open(thread_hash
, &i
);
917 while((t
= hash_next(thread_hash
, &i
)) != NULL
) {
918 if((t
->name
&& strcmp(argv
[1], t
->name
) == 0) || t
->id
== id
) {
919 _dump_thread_info(t
);
923 hash_close(thread_hash
, &i
, false);
926 static void dump_thread_list(int argc
, char **argv
)
929 struct hash_iterator i
;
931 hash_open(thread_hash
, &i
);
932 while((t
= hash_next(thread_hash
, &i
)) != NULL
) {
935 dprintf("\t%32s", t
->name
);
937 dprintf("\t%32s", "<NULL>");
938 dprintf("\t0x%x", t
->id
);
939 dprintf("\t%16s", state_to_text(t
->state
));
941 dprintf("\t%d", t
->cpu
->info
.cpu_num
);
944 dprintf("\t0x%lx\n", t
->kernel_stack_base
);
946 hash_close(thread_hash
, &i
, false);
949 static void dump_next_thread_in_q(int argc
, char **argv
)
951 struct thread
*t
= last_thread_dumped
;
954 dprintf("no thread previously dumped. Examine a thread first.\n");
958 dprintf("next thread in queue after thread @ %p\n", t
);
959 if(t
->q_next
!= NULL
) {
960 _dump_thread_info(t
->q_next
);
966 static void dump_next_thread_in_all_list(int argc
, char **argv
)
968 struct thread
*t
= last_thread_dumped
;
971 dprintf("no thread previously dumped. Examine a thread first.\n");
975 dprintf("next thread in global list after thread @ %p\n", t
);
976 if(t
->all_next
!= NULL
) {
977 _dump_thread_info(t
->all_next
);
983 static void dump_next_thread_in_proc(int argc
, char **argv
)
985 struct thread
*t
= last_thread_dumped
;
988 dprintf("no thread previously dumped. Examine a thread first.\n");
992 dprintf("next thread in proc after thread @ %p\n", t
);
993 if(t
->proc_next
!= NULL
) {
994 _dump_thread_info(t
->proc_next
);
1000 static int get_death_stack(void)
1006 sem_acquire(death_stack_sem
, 1);
1008 // grap the thread lock, find a free spot and release
1009 state
= int_disable_interrupts();
1011 bit
= death_stack_bitmap
;
1012 bit
= (~bit
)&~((~bit
)-1);
1013 death_stack_bitmap
|= bit
;
1014 RELEASE_THREAD_LOCK();
1019 panic("get_death_stack: couldn't find free stack!\n");
1021 if( bit
& (bit
-1)) {
1022 panic("get_death_stack: impossible bitmap result!\n");
1033 // dprintf("get_death_stack: returning 0x%lx\n", death_stacks[i].address);
1038 static void put_death_stack_and_reschedule(unsigned int index
)
1040 // dprintf("put_death_stack...: passed %d\n", index);
1042 if(index
>= num_death_stacks
)
1043 panic("put_death_stack: passed invalid stack index %d\n", index
);
1045 if(!(death_stack_bitmap
& (1 << index
)))
1046 panic("put_death_stack: passed invalid stack index %d\n", index
);
1048 int_disable_interrupts();
1051 death_stack_bitmap
&= ~(1 << index
);
1053 sem_release_etc(death_stack_sem
, 1, SEM_FLAG_NO_RESCHED
);
1058 int thread_init(kernel_args
*ka
)
1063 // dprintf("thread_init: entry\n");
1065 // create the process hash table
1066 proc_hash
= hash_init(15, (addr
)&kernel_proc
->next
- (addr
)kernel_proc
,
1067 &proc_struct_compare
, &proc_struct_hash
);
1069 // create the kernel process
1070 kernel_proc
= create_proc_struct("kernel", true);
1071 if(kernel_proc
== NULL
)
1072 panic("could not create kernel proc!\n");
1073 kernel_proc
->state
= PROC_STATE_NORMAL
;
1075 kernel_proc
->ioctx
= vfs_new_ioctx(NULL
);
1076 if(kernel_proc
->ioctx
== NULL
)
1077 panic("could not create ioctx for kernel proc!\n");
1079 // stick it in the process hash
1080 hash_insert(proc_hash
, kernel_proc
);
1082 // create the thread hash table
1083 thread_hash
= hash_init(15, (addr
)&t
->all_next
- (addr
)t
,
1084 &thread_struct_compare
, &thread_struct_hash
);
1086 // zero out the run queues
1087 memset(run_q
, 0, sizeof(run_q
));
1089 // zero out the dead thread structure q
1090 memset(&dead_q
, 0, sizeof(dead_q
));
1092 // allocate a snooze sem
1093 snooze_sem
= sem_create(0, "snooze sem");
1094 if(snooze_sem
< 0) {
1095 panic("error creating snooze sem\n");
1099 // create an idle thread for each cpu
1100 for(i
=0; i
<ka
->num_cpus
; i
++) {
1104 sprintf(temp
, "idle_thread%d", i
);
1105 t
= create_thread_struct(temp
);
1107 panic("error creating idle thread struct\n");
1108 return ERR_NO_MEMORY
;
1110 t
->proc
= proc_get_kernel_proc();
1111 t
->priority
= THREAD_IDLE_PRIORITY
;
1112 t
->state
= THREAD_STATE_RUNNING
;
1113 t
->next_state
= THREAD_STATE_READY
;
1114 sprintf(temp
, "idle_thread%d_kstack", i
);
1115 t
->kernel_stack_region_id
= vm_find_region_by_name(vm_get_kernel_aspace_id(), temp
);
1116 region
= vm_get_region_by_id(t
->kernel_stack_region_id
);
1118 panic("error finding idle kstack region\n");
1120 t
->kernel_stack_base
= region
->base
;
1121 vm_put_region(region
);
1122 hash_insert(thread_hash
, t
);
1123 insert_thread_into_proc(t
->proc
, t
);
1124 idle_threads
[i
] = t
;
1126 arch_thread_set_current_thread(t
);
1130 // create a set of death stacks
1131 num_death_stacks
= smp_get_num_cpus();
1132 if(num_death_stacks
> 8*sizeof(death_stack_bitmap
)) {
1134 * clamp values for really beefy machines
1136 num_death_stacks
= 8*sizeof(death_stack_bitmap
);
1138 death_stack_bitmap
= 0;
1139 death_stacks
= (struct death_stack
*)kmalloc(num_death_stacks
* sizeof(struct death_stack
));
1140 if(death_stacks
== NULL
) {
1141 panic("error creating death stacks\n");
1142 return ERR_NO_MEMORY
;
1147 for(i
=0; i
<num_death_stacks
; i
++) {
1148 sprintf(temp
, "death_stack%d", i
);
1149 death_stacks
[i
].rid
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), temp
,
1150 (void **)&death_stacks
[i
].address
,
1151 REGION_ADDR_ANY_ADDRESS
, KSTACK_SIZE
, REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
1152 if(death_stacks
[i
].rid
< 0) {
1153 panic("error creating death stacks\n");
1154 return death_stacks
[i
].rid
;
1156 death_stacks
[i
].in_use
= false;
1159 death_stack_sem
= sem_create(num_death_stacks
, "death_stack_noavail_sem");
1161 // set up some debugger commands
1162 dbg_add_command(dump_thread_list
, "threads", "list all threads");
1163 dbg_add_command(dump_thread_info
, "thread", "list info about a particular thread");
1164 dbg_add_command(dump_next_thread_in_q
, "next_q", "dump the next thread in the queue of last thread viewed");
1165 dbg_add_command(dump_next_thread_in_all_list
, "next_all", "dump the next thread in the global list of the last thread viewed");
1166 dbg_add_command(dump_next_thread_in_proc
, "next_proc", "dump the next thread in the process of the last thread viewed");
1167 dbg_add_command(dump_proc_info
, "proc", "list info about a particular process");
1172 int thread_init_percpu(int cpu_num
)
1174 arch_thread_set_current_thread(idle_threads
[cpu_num
]);
1178 // this starts the scheduler. Must be run under the context of
1179 // the initial idle thread.
1180 void thread_start_threading(void)
1184 // XXX may not be the best place for this
1185 // invalidate all of the other processors' TLB caches
1186 state
= int_disable_interrupts();
1187 arch_cpu_global_TLB_invalidate();
1188 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_SYNC
);
1189 int_restore_interrupts(state
);
1191 // start the other processors
1192 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_ASYNC
);
1194 state
= int_disable_interrupts();
1199 RELEASE_THREAD_LOCK();
1200 int_restore_interrupts(state
);
1203 int user_thread_snooze(bigtime_t time
)
1205 thread_snooze(time
);
1209 void thread_snooze(bigtime_t time
)
1211 sem_acquire_etc(snooze_sem
, 1, SEM_FLAG_TIMEOUT
, time
, NULL
);
1214 // this function gets run by a new thread before anything else
1215 static void thread_entry(void)
1217 // simulates the thread spinlock release that would occur if the thread had been
1218 // rescheded from. The resched didn't happen because the thread is new.
1219 RELEASE_THREAD_LOCK();
1220 int_enable_interrupts(); // this essentially simulates a return-from-interrupt
1223 // used to pass messages between thread_exit and thread_exit2
1224 struct thread_exit_args
{
1226 region_id old_kernel_stack
;
1228 unsigned int death_stack
;
1231 static void thread_exit2(void *_args
)
1233 struct thread_exit_args args
;
1236 // copy the arguments over, since the source is probably on the kernel stack we're about to delete
1237 memcpy(&args
, _args
, sizeof(struct thread_exit_args
));
1239 // restore the interrupts
1240 int_restore_interrupts(args
.int_state
);
1242 // dprintf("thread_exit2, running on death stack 0x%lx\n", args.t->kernel_stack_base);
1244 // delete the old kernel stack region
1245 // dprintf("thread_exit2: deleting old kernel stack id 0x%x for thread 0x%x\n", args.old_kernel_stack, args.t->id);
1246 vm_delete_region(vm_get_kernel_aspace_id(), args
.old_kernel_stack
);
1248 // dprintf("thread_exit2: removing thread 0x%x from global lists\n", args.t->id);
1250 // remove this thread from all of the global lists
1251 int_disable_interrupts();
1253 remove_thread_from_proc(kernel_proc
, args
.t
);
1254 RELEASE_PROC_LOCK();
1256 hash_remove(thread_hash
, args
.t
);
1257 RELEASE_THREAD_LOCK();
1259 // dprintf("thread_exit2: done removing thread from lists\n");
1261 // set the next state to be gone. Will return the thread structure to a ready pool upon reschedule
1262 args
.t
->next_state
= THREAD_STATE_FREE_ON_RESCHED
;
1264 // return the death stack and reschedule one last time
1265 put_death_stack_and_reschedule(args
.death_stack
);
1266 // never get to here
1267 panic("thread_exit2: made it where it shouldn't have!\n");
1270 void thread_exit(int retcode
)
1273 struct thread
*t
= thread_get_current_thread();
1274 struct proc
*p
= t
->proc
;
1275 bool delete_proc
= false;
1276 unsigned int death_stack
;
1278 dprintf("thread 0x%x exiting w/return code 0x%x\n", t
->id
, retcode
);
1280 // boost our priority to get this over with
1281 thread_set_priority(t
->id
, THREAD_HIGH_PRIORITY
);
1283 // delete the user stack region first
1284 if(p
->aspace_id
>= 0 && t
->user_stack_region_id
>= 0) {
1285 region_id rid
= t
->user_stack_region_id
;
1286 t
->user_stack_region_id
= -1;
1287 vm_delete_region(p
->aspace_id
, rid
);
1290 if(p
!= kernel_proc
) {
1291 // remove this thread from the current process and add it to the kernel
1292 // put the thread into the kernel proc until it dies
1293 state
= int_disable_interrupts();
1295 remove_thread_from_proc(p
, t
);
1296 insert_thread_into_proc(kernel_proc
, t
);
1297 if(p
->main_thread
== t
) {
1298 // this was main thread in this process
1300 hash_remove(proc_hash
, p
);
1301 p
->state
= PROC_STATE_DEATH
;
1303 RELEASE_PROC_LOCK();
1304 // swap address spaces, to make sure we're running on the kernel's pgdir
1305 vm_aspace_swap(kernel_proc
->kaspace
);
1306 int_restore_interrupts(state
);
1308 // dprintf("thread_exit: thread 0x%x now a kernel thread!\n", t->id);
1311 // delete the process
1313 if(p
->num_threads
> 0) {
1314 // there are other threads still in this process,
1315 // cycle through and signal kill on each of the threads
1316 // XXX this can be optimized. There's got to be a better solution.
1317 struct thread
*temp_thread
;
1319 state
= int_disable_interrupts();
1321 // we can safely walk the list because of the lock. no new threads can be created
1322 // because of the PROC_STATE_DEATH flag on the process
1323 temp_thread
= p
->thread_list
;
1324 while(temp_thread
) {
1325 struct thread
*next
= temp_thread
->proc_next
;
1326 thread_kill_thread_nowait(temp_thread
->id
);
1329 RELEASE_PROC_LOCK();
1330 int_restore_interrupts(state
);
1332 // Now wait for all of the threads to die
1333 // XXX block on a semaphore
1334 while((volatile int)p
->num_threads
> 0) {
1335 thread_snooze(10000); // 10 ms
1338 vm_put_aspace(p
->aspace
);
1339 vm_delete_aspace(p
->aspace_id
);
1340 port_delete_owned_ports(p
->id
);
1341 sem_delete_owned_sems(p
->id
);
1342 vfs_free_ioctx(p
->ioctx
);
1346 // delete the sem that others will use to wait on us and get the retcode
1348 sem_id s
= t
->return_code_sem
;
1350 t
->return_code_sem
= -1;
1351 sem_delete_etc(s
, retcode
);
1354 death_stack
= get_death_stack();
1356 struct thread_exit_args args
;
1359 args
.old_kernel_stack
= t
->kernel_stack_region_id
;
1360 args
.death_stack
= death_stack
;
1362 // disable the interrupts. Must remain disabled until the kernel stack pointer can be officially switched
1363 args
.int_state
= int_disable_interrupts();
1365 // set the new kernel stack officially to the death stack, wont be really switched until
1366 // the next function is called. This bookkeeping must be done now before a context switch
1367 // happens, or the processor will interrupt to the old stack
1368 t
->kernel_stack_region_id
= death_stacks
[death_stack
].rid
;
1369 t
->kernel_stack_base
= death_stacks
[death_stack
].address
;
1371 // we will continue in thread_exit2(), on the new stack
1372 arch_thread_switch_kstack_and_call(t
, t
->kernel_stack_base
+ KSTACK_SIZE
, thread_exit2
, &args
);
1375 panic("never can get here\n");
1378 static int _thread_kill_thread(thread_id id
, bool wait_on
)
1384 // dprintf("_thread_kill_thread: id %d, wait_on %d\n", id, wait_on);
1386 state
= int_disable_interrupts();
1389 t
= thread_get_thread_struct_locked(id
);
1391 if(t
->proc
== kernel_proc
) {
1393 rc
= ERR_NOT_ALLOWED
;
1395 deliver_signal(t
, SIG_KILL
);
1397 if(t
->id
== thread_get_current_thread()->id
)
1398 wait_on
= false; // can't wait on ourself
1401 rc
= ERR_INVALID_HANDLE
;
1404 RELEASE_THREAD_LOCK();
1405 int_restore_interrupts(state
);
1410 thread_wait_on_thread(id
, NULL
);
1415 int thread_kill_thread(thread_id id
)
1417 return _thread_kill_thread(id
, true);
1420 int thread_kill_thread_nowait(thread_id id
)
1422 return _thread_kill_thread(id
, false);
1425 static void thread_kthread_exit(void)
1430 int user_thread_wait_on_thread(thread_id id
, int *uretcode
)
1435 if((addr
)uretcode
>= KERNEL_BASE
&& (addr
)uretcode
<= KERNEL_TOP
)
1436 return ERR_VM_BAD_USER_MEMORY
;
1438 rc
= thread_wait_on_thread(id
, &retcode
);
1440 rc2
= user_memcpy(uretcode
, &retcode
, sizeof(retcode
));
1447 int thread_wait_on_thread(thread_id id
, int *retcode
)
1454 state
= int_disable_interrupts();
1457 t
= thread_get_thread_struct_locked(id
);
1459 sem
= t
->return_code_sem
;
1461 sem
= ERR_INVALID_HANDLE
;
1464 RELEASE_THREAD_LOCK();
1465 int_restore_interrupts(state
);
1467 rc
= sem_acquire_etc(sem
, 1, 0, 0, retcode
);
1469 /* This thread died the way it should, dont ripple a non-error up */
1470 if (rc
== ERR_SEM_DELETED
)
1476 int user_proc_wait_on_proc(proc_id id
, int *uretcode
)
1481 if((addr
)uretcode
>= KERNEL_BASE
&& (addr
)uretcode
<= KERNEL_TOP
)
1482 return ERR_VM_BAD_USER_MEMORY
;
1484 rc
= proc_wait_on_proc(id
, &retcode
);
1488 rc2
= user_memcpy(uretcode
, &retcode
, sizeof(retcode
));
1495 int proc_wait_on_proc(proc_id id
, int *retcode
)
1501 state
= int_disable_interrupts();
1503 p
= proc_get_proc_struct_locked(id
);
1504 if(p
&& p
->main_thread
) {
1505 tid
= p
->main_thread
->id
;
1507 tid
= ERR_INVALID_HANDLE
;
1509 RELEASE_PROC_LOCK();
1510 int_restore_interrupts(state
);
1515 return thread_wait_on_thread(tid
, retcode
);
1518 struct thread
*thread_get_thread_struct(thread_id id
)
1523 state
= int_disable_interrupts();
1526 t
= thread_get_thread_struct_locked(id
);
1528 RELEASE_THREAD_LOCK();
1529 int_restore_interrupts(state
);
1534 static struct thread
*thread_get_thread_struct_locked(thread_id id
)
1536 struct thread_key key
;
1540 return hash_lookup(thread_hash
, &key
);
1543 static struct proc
*proc_get_proc_struct(proc_id id
)
1548 state
= int_disable_interrupts();
1551 p
= proc_get_proc_struct_locked(id
);
1553 RELEASE_PROC_LOCK();
1554 int_restore_interrupts(state
);
1559 static struct proc
*proc_get_proc_struct_locked(proc_id id
)
1561 struct proc_key key
;
1565 return hash_lookup(proc_hash
, &key
);
1568 static void thread_context_switch(struct thread
*t_from
, struct thread
*t_to
)
1572 // track kernel time
1573 now
= system_time();
1574 t_from
->kernel_time
+= now
- t_from
->last_time
;
1575 t_to
->last_time
= now
;
1577 t_to
->cpu
= t_from
->cpu
;
1578 arch_thread_set_current_thread(t_to
);
1580 arch_thread_context_switch(t_from
, t_to
);
1583 static int _rand(void)
1585 static int next
= 0;
1588 next
= system_time();
1590 next
= next
* 1103515245 + 12345;
1591 return((next
>> 16) & 0x7FFF);
1594 static int reschedule_event(void *unused
)
1596 // this function is called as a result of the timer event set by the scheduler
1597 // returning this causes a reschedule on the timer event
1598 thread_get_current_thread()->cpu
->info
.preempted
= 1;
1599 return INT_RESCHEDULE
;
1602 // NOTE: expects thread_spinlock to be held
1603 void thread_resched(void)
1605 struct thread
*next_thread
= NULL
;
1606 int last_thread_pri
= -1;
1607 struct thread
*old_thread
= thread_get_current_thread();
1610 struct timer_event
*quantum_timer
;
1612 // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), thread_get_current_thread());
1614 switch(old_thread
->next_state
) {
1615 case THREAD_STATE_RUNNING
:
1616 case THREAD_STATE_READY
:
1617 // dprintf("enqueueing thread 0x%x into run q. pri = %d\n", old_thread, old_thread->priority);
1618 thread_enqueue_run_q(old_thread
);
1620 case THREAD_STATE_SUSPENDED
:
1621 dprintf("suspending thread 0x%x\n", old_thread
->id
);
1623 case THREAD_STATE_FREE_ON_RESCHED
:
1624 thread_enqueue(old_thread
, &dead_q
);
1627 // dprintf("not enqueueing thread 0x%x into run q. next_state = %d\n", old_thread, old_thread->next_state);
1630 old_thread
->state
= old_thread
->next_state
;
1632 // search the real-time queue
1633 for(i
= THREAD_MAX_RT_PRIORITY
; i
>= THREAD_MIN_RT_PRIORITY
; i
--) {
1634 next_thread
= thread_dequeue_run_q(i
);
1639 // search the regular queue
1640 for(i
= THREAD_MAX_PRIORITY
; i
> THREAD_IDLE_PRIORITY
; i
--) {
1641 next_thread
= thread_lookat_run_q(i
);
1642 if(next_thread
!= NULL
) {
1643 // skip it sometimes
1644 if(_rand() > 0x3000) {
1645 next_thread
= thread_dequeue_run_q(i
);
1648 last_thread_pri
= i
;
1652 if(next_thread
== NULL
) {
1653 if(last_thread_pri
!= -1) {
1654 next_thread
= thread_dequeue_run_q(last_thread_pri
);
1655 if(next_thread
== NULL
)
1656 panic("next_thread == NULL! last_thread_pri = %d\n", last_thread_pri
);
1658 next_thread
= thread_dequeue_run_q(THREAD_IDLE_PRIORITY
);
1659 if(next_thread
== NULL
)
1660 panic("next_thread == NULL! no idle priorities!\n");
1665 next_thread
->state
= THREAD_STATE_RUNNING
;
1666 next_thread
->next_state
= THREAD_STATE_READY
;
1668 // XXX should only reset the quantum timer if we are switching to a new thread,
1669 // or we got here as a result of a quantum expire.
1671 // XXX calculate quantum
1674 // get the quantum timer for this cpu
1675 quantum_timer
= &old_thread
->cpu
->info
.quantum_timer
;
1676 if(!old_thread
->cpu
->info
.preempted
) {
1677 _local_timer_cancel_event(old_thread
->cpu
->info
.cpu_num
, quantum_timer
);
1679 old_thread
->cpu
->info
.preempted
= 0;
1680 timer_setup_timer(&reschedule_event
, NULL
, quantum_timer
);
1681 timer_set_event(quantum
, TIMER_MODE_ONESHOT
, quantum_timer
);
1683 if(next_thread
!= old_thread
) {
1684 // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",
1685 // smp_get_current_cpu(), old_thread->id, next_thread->id);
1686 thread_context_switch(old_thread
, next_thread
);
1690 static int proc_struct_compare(void *_p
, const void *_key
)
1692 struct proc
*p
= _p
;
1693 const struct proc_key
*key
= _key
;
1695 if(p
->id
== key
->id
) return 0;
1699 static unsigned int proc_struct_hash(void *_p
, const void *_key
, unsigned int range
)
1701 struct proc
*p
= _p
;
1702 const struct proc_key
*key
= _key
;
1705 return (p
->id
% range
);
1707 return (key
->id
% range
);
1710 struct proc
*proc_get_kernel_proc(void)
1715 proc_id
proc_get_kernel_proc_id(void)
1720 return kernel_proc
->id
;
1723 proc_id
proc_get_current_proc_id(void)
1725 return thread_get_current_thread()->proc
->id
;
1728 static struct proc
*create_proc_struct(const char *name
, bool kernel
)
1732 p
= (struct proc
*)kmalloc(sizeof(struct proc
));
1735 p
->id
= atomic_add(&next_proc_id
, 1);
1736 strncpy(&p
->name
[0], name
, SYS_MAX_OS_NAME_LEN
-1);
1737 p
->name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
1742 p
->kaspace
= vm_get_kernel_aspace();
1743 vm_put_aspace(p
->kaspace
);
1744 p
->thread_list
= NULL
;
1745 p
->main_thread
= NULL
;
1746 p
->state
= PROC_STATE_BIRTH
;
1747 p
->pending_signals
= SIG_NONE
;
1749 if(arch_proc_init_proc_struct(p
, kernel
) < 0)
1760 static void delete_proc_struct(struct proc
*p
)
1765 int proc_get_proc_info(proc_id id
, struct proc_info
*outinfo
)
1769 struct proc_info info
;
1772 state
= int_disable_interrupts();
1775 p
= proc_get_proc_struct_locked(id
);
1777 err
= ERR_INVALID_HANDLE
;
1781 /* found the proc, copy the data out */
1783 strncpy(info
.name
, p
->name
, SYS_MAX_OS_NAME_LEN
-1);
1784 info
.name
[SYS_MAX_OS_NAME_LEN
-1] = '\0';
1785 info
.state
= p
->state
;
1786 info
.num_threads
= p
->num_threads
;
1791 RELEASE_PROC_LOCK();
1792 int_restore_interrupts(state
);
1795 memcpy(outinfo
, &info
, sizeof(info
));
1800 int user_proc_get_proc_info(proc_id id
, struct proc_info
*uinfo
)
1802 struct proc_info info
;
1805 if((addr
)uinfo
>= KERNEL_BASE
&& (addr
)uinfo
<= KERNEL_TOP
) {
1806 return ERR_VM_BAD_USER_MEMORY
;
1809 err
= proc_get_proc_info(id
, &info
);
1813 err2
= user_memcpy(uinfo
, &info
, sizeof(info
));
1820 int proc_get_next_proc_info(uint32
*cookie
, struct proc_info
*outinfo
)
1823 struct proc_info info
;
1826 struct hash_iterator i
;
1827 proc_id id
= (proc_id
)*cookie
;
1829 state
= int_disable_interrupts();
1832 hash_open(proc_hash
, &i
);
1833 while((p
= hash_next(proc_hash
, &i
)) != NULL
) {
1835 break; // initial search, return the first proc
1837 // we found the last proc that was looked at, increment to the next one
1838 p
= hash_next(proc_hash
, &i
);
1843 err
= ERR_NO_MORE_HANDLES
;
1847 // we have the proc structure, copy the data out of it
1849 strncpy(info
.name
, p
->name
, SYS_MAX_OS_NAME_LEN
-1);
1850 info
.name
[SYS_MAX_OS_NAME_LEN
-1] = '\0';
1851 info
.state
= p
->state
;
1852 info
.num_threads
= p
->num_threads
;
1856 *cookie
= (uint32
)p
->id
;
1859 RELEASE_PROC_LOCK();
1860 int_restore_interrupts(state
);
1863 memcpy(outinfo
, &info
, sizeof(info
));
1868 int user_proc_get_next_proc_info(uint32
*ucookie
, struct proc_info
*uinfo
)
1870 struct proc_info info
;
1874 if((addr
)ucookie
>= KERNEL_BASE
&& (addr
)ucookie
<= KERNEL_TOP
) {
1875 return ERR_VM_BAD_USER_MEMORY
;
1878 if((addr
)uinfo
>= KERNEL_BASE
&& (addr
)uinfo
<= KERNEL_TOP
) {
1879 return ERR_VM_BAD_USER_MEMORY
;
1882 err2
= user_memcpy(&cookie
, ucookie
, sizeof(cookie
));
1886 err
= proc_get_next_proc_info(&cookie
, &info
);
1890 err2
= user_memcpy(uinfo
, &info
, sizeof(info
));
1894 err2
= user_memcpy(ucookie
, &cookie
, sizeof(cookie
));
1901 static int get_arguments_data_size(char **args
,int argc
)
1906 for(cnt
= 0; cnt
< argc
; cnt
++)
1907 tot_size
+= strlen(args
[cnt
]) + 1;
1908 tot_size
+= (argc
+ 1) * sizeof(char *);
1910 return tot_size
+ sizeof(struct uspace_prog_args_t
);
1913 static int proc_create_proc2(void *args
)
1918 struct proc_arg
*pargs
= args
;
1921 char ustack_name
[128];
1925 struct uspace_prog_args_t
*uspa
;
1928 t
= thread_get_current_thread();
1931 dprintf("proc_create_proc2: entry thread %d\n", t
->id
);
1933 // create an initial primary stack region
1935 tot_top_size
= STACK_SIZE
+ PAGE_ALIGN(get_arguments_data_size(pargs
->args
,pargs
->argc
));
1936 t
->user_stack_base
= ((USER_STACK_REGION
- tot_top_size
) + USER_STACK_REGION_SIZE
);
1937 sprintf(ustack_name
, "%s_primary_stack", p
->name
);
1938 t
->user_stack_region_id
= vm_create_anonymous_region(p
->aspace_id
, ustack_name
, (void **)&t
->user_stack_base
,
1939 REGION_ADDR_EXACT_ADDRESS
, tot_top_size
, REGION_WIRING_LAZY
, LOCK_RW
);
1940 if(t
->user_stack_region_id
< 0) {
1941 panic("proc_create_proc2: could not create default user stack region\n");
1942 return t
->user_stack_region_id
;
1945 uspa
= (struct uspace_prog_args_t
*)(t
->user_stack_base
+ STACK_SIZE
);
1946 uargs
= (char **)(uspa
+ 1);
1947 udest
= (char *)(uargs
+ pargs
->argc
+ 1);
1948 // dprintf("addr: stack base=0x%x uargs = 0x%x udest=0x%x tot_top_size=%d \n\n",t->user_stack_base,uargs,udest,tot_top_size);
1950 for(cnt
= 0;cnt
< pargs
->argc
;cnt
++){
1952 user_strcpy(udest
, pargs
->args
[cnt
]);
1953 udest
+= strlen(pargs
->args
[cnt
]) + 1;
1957 user_memcpy(uspa
->prog_name
, p
->name
, sizeof(uspa
->prog_name
));
1958 user_memcpy(uspa
->prog_path
, pargs
->path
, sizeof(uspa
->prog_path
));
1964 if(pargs
->args
!= NULL
)
1965 free_arg_list(pargs
->args
,pargs
->argc
);
1968 dprintf("proc_create_proc2: loading elf binary '%s'\n", path
);
1970 err
= elf_load_uspace("/boot/libexec/rld.so", p
, 0, &entry
);
1972 // XXX clean up proc
1980 dprintf("proc_create_proc2: loaded elf. entry = 0x%lx\n", entry
);
1982 p
->state
= PROC_STATE_NORMAL
;
1984 // jump to the entry point in user space
1985 arch_thread_enter_uspace(entry
, uspa
, t
->user_stack_base
+ STACK_SIZE
);
1991 proc_id
proc_create_proc(const char *path
, const char *name
, char **args
, int argc
, int priority
)
1999 struct proc_arg
*pargs
;
2001 dprintf("proc_create_proc: entry '%s', name '%s' args = %p argc = %d\n", path
, name
, args
, argc
);
2003 p
= create_proc_struct(name
, false);
2005 return ERR_NO_MEMORY
;
2009 state
= int_disable_interrupts();
2011 hash_insert(proc_hash
, p
);
2012 RELEASE_PROC_LOCK();
2013 int_restore_interrupts(state
);
2015 // copy the args over
2016 pargs
= kmalloc(sizeof(struct proc_arg
));
2018 err
= ERR_NO_MEMORY
;
2021 pargs
->path
= kstrdup(path
);
2022 if(pargs
->path
== NULL
){
2023 err
= ERR_NO_MEMORY
;
2029 // create a new ioctx for this process
2030 p
->ioctx
= vfs_new_ioctx(thread_get_current_thread()->proc
->ioctx
);
2032 err
= ERR_NO_MEMORY
;
2036 // create an address space for this process
2037 p
->aspace_id
= vm_create_aspace(p
->name
, USER_BASE
, USER_SIZE
, false);
2038 if(p
->aspace_id
< 0) {
2042 p
->aspace
= vm_get_aspace_by_id(p
->aspace_id
);
2044 // create a kernel thread, but under the context of the new process
2045 tid
= thread_create_kernel_thread_etc(name
, proc_create_proc2
, pargs
, p
);
2051 thread_resume_thread(tid
);
2056 vm_put_aspace(p
->aspace
);
2057 vm_delete_aspace(p
->aspace_id
);
2059 vfs_free_ioctx(p
->ioctx
);
2065 // remove the proc structure from the proc hash table and delete the proc structure
2066 state
= int_disable_interrupts();
2068 hash_remove(proc_hash
, p
);
2069 RELEASE_PROC_LOCK();
2070 int_restore_interrupts(state
);
2071 delete_proc_struct(p
);
2076 proc_id
user_proc_create_proc(const char *upath
, const char *uname
, char **args
, int argc
, int priority
)
2078 char path
[SYS_MAX_PATH_LEN
];
2079 char name
[SYS_MAX_OS_NAME_LEN
];
2083 dprintf("user_proc_create_proc : argc=%d \n",argc
);
2085 if((addr
)upath
>= KERNEL_BASE
&& (addr
)upath
<= KERNEL_TOP
)
2086 return ERR_VM_BAD_USER_MEMORY
;
2087 if((addr
)uname
>= KERNEL_BASE
&& (addr
)uname
<= KERNEL_TOP
)
2088 return ERR_VM_BAD_USER_MEMORY
;
2090 rc
= user_copy_arg_list(args
, argc
, &kargs
);
2094 rc
= user_strncpy(path
, upath
, SYS_MAX_PATH_LEN
-1);
2098 path
[SYS_MAX_PATH_LEN
-1] = 0;
2100 rc
= user_strncpy(name
, uname
, SYS_MAX_OS_NAME_LEN
-1);
2104 name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
2106 return proc_create_proc(path
, name
, kargs
, argc
, priority
);
2108 free_arg_list(kargs
,argc
);
2112 int proc_kill_proc(proc_id id
)
2120 state
= int_disable_interrupts();
2123 p
= proc_get_proc_struct_locked(id
);
2125 tid
= p
->main_thread
->id
;
2127 retval
= ERR_INVALID_HANDLE
;
2130 RELEASE_PROC_LOCK();
2131 int_restore_interrupts(state
);
2135 // just kill the main thread in the process. The cleanup code there will
2136 // take care of the process
2137 return thread_kill_thread(tid
);
2140 // sets the pending signal flag on a thread and possibly does some work to wake it up, etc.
2141 // expects the thread lock to be held
2142 static void deliver_signal(struct thread
*t
, int signal
)
2144 // dprintf("deliver_signal: thread %p (%d), signal %d\n", t, t->id, signal);
2147 t
->pending_signals
|= SIG_KILL
;
2149 case THREAD_STATE_SUSPENDED
:
2150 t
->state
= THREAD_STATE_READY
;
2151 t
->next_state
= THREAD_STATE_READY
;
2153 thread_enqueue_run_q(t
);
2155 case THREAD_STATE_WAITING
:
2156 sem_interrupt_thread(t
);
2163 t
->pending_signals
|= signal
;
2167 // expects the thread lock to be held
2168 static void _check_for_thread_sigs(struct thread
*t
, int state
)
2170 if(t
->pending_signals
== SIG_NONE
)
2173 if(t
->pending_signals
& SIG_KILL
) {
2174 t
->pending_signals
&= ~SIG_KILL
;
2176 RELEASE_THREAD_LOCK();
2177 int_restore_interrupts(state
);
2179 // never gets to here
2181 if(t
->pending_signals
& SIG_SUSPEND
) {
2182 t
->pending_signals
&= ~SIG_SUSPEND
;
2183 t
->next_state
= THREAD_STATE_SUSPENDED
;
2184 // XXX will probably want to delay this
2189 // called in the int handler code when a thread enters the kernel for any reason
2190 void thread_atkernel_entry(void)
2196 // dprintf("thread_atkernel_entry: entry thread 0x%x\n", t->id);
2198 t
= thread_get_current_thread();
2200 state
= int_disable_interrupts();
2203 now
= system_time();
2204 t
->user_time
+= now
- t
->last_time
;
2209 t
->in_kernel
= true;
2211 _check_for_thread_sigs(t
, state
);
2213 RELEASE_THREAD_LOCK();
2214 int_restore_interrupts(state
);
2217 // called when a thread exits kernel space to user space
2218 void thread_atkernel_exit(void)
2224 // dprintf("thread_atkernel_exit: entry\n");
2226 t
= thread_get_current_thread();
2228 state
= int_disable_interrupts();
2231 _check_for_thread_sigs(t
, state
);
2233 t
->in_kernel
= false;
2235 RELEASE_THREAD_LOCK();
2237 // track kernel time
2238 now
= system_time();
2239 t
->kernel_time
+= now
- t
->last_time
;
2242 int_restore_interrupts(state
);
2245 int user_getrlimit(int resource
, struct rlimit
* urlp
)
2251 return ERR_INVALID_ARGS
;
2253 if((addr
)urlp
>= KERNEL_BASE
&& (addr
)urlp
<= KERNEL_TOP
) {
2254 return ERR_VM_BAD_USER_MEMORY
;
2257 ret
= getrlimit(resource
, &rl
);
2260 ret
= user_memcpy(urlp
, &rl
, sizeof(struct rlimit
));
2270 int getrlimit(int resource
, struct rlimit
* rlp
)
2278 return vfs_getrlimit(resource
, rlp
);
2287 int user_setrlimit(int resource
, const struct rlimit
* urlp
)
2293 return ERR_INVALID_ARGS
;
2295 if((addr
)urlp
>= KERNEL_BASE
&& (addr
)urlp
<= KERNEL_TOP
) {
2296 return ERR_VM_BAD_USER_MEMORY
;
2299 err
= user_memcpy(&rl
, urlp
, sizeof(struct rlimit
));
2304 return setrlimit(resource
, &rl
);
2307 int setrlimit(int resource
, const struct rlimit
* rlp
)
2315 return vfs_setrlimit(resource
, rlp
);