2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
6 #include <kernel/debug.h>
7 #include <kernel/console.h>
8 #include <kernel/thread.h>
9 #include <kernel/arch/thread.h>
10 #include <kernel/khash.h>
11 #include <kernel/int.h>
12 #include <kernel/smp.h>
13 #include <kernel/timer.h>
14 #include <kernel/cpu.h>
15 #include <kernel/arch/cpu.h>
16 #include <kernel/arch/int.h>
17 #include <kernel/arch/vm.h>
18 #include <kernel/sem.h>
19 #include <kernel/port.h>
20 #include <kernel/vfs.h>
21 #include <kernel/elf.h>
22 #include <kernel/heap.h>
23 #include <newos/user_runtime.h>
24 #include <newos/errors.h>
25 #include <boot/stage2.h>
29 #include <sys/resource.h>
45 static struct proc
*create_proc_struct(const char *name
, bool kernel
);
46 static int proc_struct_compare(void *_p
, const void *_key
);
47 static unsigned int proc_struct_hash(void *_p
, const void *_key
, unsigned int range
);
50 spinlock_t thread_spinlock
= 0;
53 static void *proc_hash
= NULL
;
54 static struct proc
*kernel_proc
= NULL
;
55 static proc_id next_proc_id
= 0;
56 static spinlock_t proc_spinlock
= 0;
57 // NOTE: PROC lock can be held over a THREAD lock acquisition,
58 // but not the other way (to avoid deadlock)
59 #define GRAB_PROC_LOCK() acquire_spinlock(&proc_spinlock)
60 #define RELEASE_PROC_LOCK() release_spinlock(&proc_spinlock)
63 static struct thread
*idle_threads
[MAX_BOOT_CPUS
];
64 static void *thread_hash
= NULL
;
65 static thread_id next_thread_id
= 0;
67 static sem_id snooze_sem
= -1;
70 // used temporarily as a thread cleans itself up
76 static struct death_stack
*death_stacks
;
77 static unsigned int num_death_stacks
;
78 static unsigned int volatile death_stack_bitmap
;
79 static sem_id death_stack_sem
;
82 static struct thread_queue run_q
[THREAD_NUM_PRIORITY_LEVELS
] = { { NULL
, NULL
}, };
83 static struct thread_queue dead_q
;
85 static int _rand(void);
86 static void thread_entry(void);
87 static struct thread
*thread_get_thread_struct_locked(thread_id id
);
88 static struct proc
*proc_get_proc_struct(proc_id id
);
89 static struct proc
*proc_get_proc_struct_locked(proc_id id
);
90 static void thread_kthread_exit(void);
91 static void deliver_signal(struct thread
*t
, int signal
);
93 // insert a thread onto the tail of a queue
94 void thread_enqueue(struct thread
*t
, struct thread_queue
*q
)
106 struct thread
*thread_lookat_queue(struct thread_queue
*q
)
111 struct thread
*thread_dequeue(struct thread_queue
*q
)
124 struct thread
*thread_dequeue_id(struct thread_queue
*q
, thread_id thr_id
)
127 struct thread
*last
= NULL
;
131 if(t
->id
== thr_id
) {
135 last
->q_next
= t
->q_next
;
147 struct thread
*thread_lookat_run_q(int priority
)
149 return thread_lookat_queue(&run_q
[priority
]);
152 void thread_enqueue_run_q(struct thread
*t
)
154 // these shouldn't exist
155 if(t
->priority
> THREAD_MAX_PRIORITY
)
156 t
->priority
= THREAD_MAX_PRIORITY
;
160 thread_enqueue(t
, &run_q
[t
->priority
]);
163 struct thread
*thread_dequeue_run_q(int priority
)
165 return thread_dequeue(&run_q
[priority
]);
168 static void insert_thread_into_proc(struct proc
*p
, struct thread
*t
)
170 t
->proc_next
= p
->thread_list
;
173 if(p
->num_threads
== 1) {
174 // this was the first thread
180 static void remove_thread_from_proc(struct proc
*p
, struct thread
*t
)
182 struct thread
*temp
, *last
= NULL
;
184 for(temp
= p
->thread_list
; temp
!= NULL
; temp
= temp
->proc_next
) {
187 p
->thread_list
= temp
->proc_next
;
189 last
->proc_next
= temp
->proc_next
;
198 static int thread_struct_compare(void *_t
, const void *_key
)
200 struct thread
*t
= _t
;
201 const struct thread_key
*key
= _key
;
203 if(t
->id
== key
->id
) return 0;
207 // Frees the argument list
209 // args argument list.
210 // args number of arguments
212 static void free_arg_list(char **args
, int argc
)
217 for(cnt
= 0; cnt
< argc
; cnt
++){
225 // Copy argument list from userspace to kernel space
227 // args userspace parameters
228 // argc number of parameters
229 // kargs usespace parameters
230 // return < 0 on error and **kargs = NULL
232 static int user_copy_arg_list(char **args
, int argc
, char ***kargs
)
238 char buf
[SYS_THREAD_ARG_LENGTH_MAX
];
242 if((addr
)args
>= KERNEL_BASE
&& (addr
)args
<= KERNEL_TOP
)
243 return ERR_VM_BAD_USER_MEMORY
;
245 largs
= kmalloc((argc
+ 1) * sizeof(char *));
247 return ERR_NO_MEMORY
;
250 // scan all parameters and copy to kernel space
252 for(cnt
= 0; cnt
< argc
; cnt
++) {
253 err
= user_memcpy(&source
, &(args
[cnt
]), sizeof(char *));
257 if((addr
)source
>= KERNEL_BASE
&& (addr
)source
<= KERNEL_TOP
){
258 err
= ERR_VM_BAD_USER_MEMORY
;
262 err
= user_strncpy(buf
,source
, SYS_THREAD_ARG_LENGTH_MAX
- 1);
265 buf
[SYS_THREAD_ARG_LENGTH_MAX
- 1] = 0;
267 largs
[cnt
] = kstrdup(buf
);
268 if(largs
[cnt
] == NULL
){
280 free_arg_list(largs
,cnt
);
281 dprintf("user_copy_arg_list failed %d \n",err
);
285 static unsigned int thread_struct_hash(void *_t
, const void *_key
, unsigned int range
)
287 struct thread
*t
= _t
;
288 const struct thread_key
*key
= _key
;
291 return (t
->id
% range
);
293 return (key
->id
% range
);
296 static struct thread
*create_thread_struct(const char *name
)
301 state
= int_disable_interrupts();
303 t
= thread_dequeue(&dead_q
);
304 RELEASE_THREAD_LOCK();
305 int_restore_interrupts(state
);
308 t
= (struct thread
*)kmalloc(sizeof(struct thread
));
313 strncpy(&t
->name
[0], name
, SYS_MAX_OS_NAME_LEN
-1);
314 t
->name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
316 t
->id
= atomic_add(&next_thread_id
, 1);
319 t
->sem_blocking
= -1;
320 t
->fault_handler
= 0;
321 t
->kernel_stack_region_id
= -1;
322 t
->kernel_stack_base
= 0;
323 t
->user_stack_region_id
= -1;
324 t
->user_stack_base
= 0;
329 t
->pending_signals
= SIG_NONE
;
337 sprintf(temp
, "thread_0x%x_retcode_sem", t
->id
);
338 t
->return_code_sem
= sem_create(0, temp
);
339 if(t
->return_code_sem
< 0)
343 if(arch_thread_init_thread_struct(t
) < 0)
349 sem_delete_etc(t
->return_code_sem
, -1);
356 static void delete_thread_struct(struct thread
*t
)
358 if(t
->return_code_sem
>= 0)
359 sem_delete_etc(t
->return_code_sem
, -1);
363 static int _create_user_thread_kentry(void)
367 t
= thread_get_current_thread();
369 // a signal may have been delivered here
370 thread_atkernel_exit();
372 // jump to the entry point in user space
373 arch_thread_enter_uspace((addr
)t
->entry
, t
->args
, t
->user_stack_base
+ STACK_SIZE
);
379 static int _create_kernel_thread_kentry(void)
381 int (*func
)(void *args
);
384 t
= thread_get_current_thread();
386 // call the entry function with the appropriate args
387 func
= (void *)t
->entry
;
389 return func(t
->args
);
392 static thread_id
_create_thread(const char *name
, proc_id pid
, addr entry
, void *args
, bool kernel
)
400 t
= create_thread_struct(name
);
402 return ERR_NO_MEMORY
;
404 t
->priority
= THREAD_MEDIUM_PRIORITY
;
405 t
->state
= THREAD_STATE_BIRTH
;
406 t
->next_state
= THREAD_STATE_SUSPENDED
;
408 state
= int_disable_interrupts();
411 // insert into global list
412 hash_insert(thread_hash
, t
);
413 RELEASE_THREAD_LOCK();
416 // look at the proc, make sure it's not being deleted
417 p
= proc_get_proc_struct_locked(pid
);
418 if(p
!= NULL
&& p
->state
!= PROC_STATE_DEATH
) {
419 insert_thread_into_proc(p
, t
);
426 hash_remove(thread_hash
, t
);
427 RELEASE_THREAD_LOCK();
429 int_restore_interrupts(state
);
431 delete_thread_struct(t
);
432 return ERR_TASK_PROC_DELETED
;
435 sprintf(stack_name
, "%s_kstack", name
);
436 t
->kernel_stack_region_id
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), stack_name
,
437 (void **)&t
->kernel_stack_base
, REGION_ADDR_ANY_ADDRESS
, KSTACK_SIZE
,
438 REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
439 if(t
->kernel_stack_region_id
< 0)
440 panic("_create_thread: error creating kernel stack!\n");
446 // this sets up an initial kthread stack that runs the entry
447 arch_thread_initialize_kthread_stack(t
, &_create_kernel_thread_kentry
, &thread_entry
, &thread_kthread_exit
);
450 // XXX make this better. For now just keep trying to create a stack
451 // until we find a spot.
452 t
->user_stack_base
= (USER_STACK_REGION
- STACK_SIZE
) + USER_STACK_REGION_SIZE
;
453 while(t
->user_stack_base
> USER_STACK_REGION
) {
454 sprintf(stack_name
, "%s_stack%d", p
->name
, t
->id
);
455 t
->user_stack_region_id
= vm_create_anonymous_region(p
->aspace_id
, stack_name
,
456 (void **)&t
->user_stack_base
,
457 REGION_ADDR_ANY_ADDRESS
, STACK_SIZE
, REGION_WIRING_LAZY
, LOCK_RW
);
458 if(t
->user_stack_region_id
< 0) {
459 t
->user_stack_base
-= STACK_SIZE
;
461 // we created a region
465 if(t
->user_stack_region_id
< 0)
466 panic("_create_thread: unable to create user stack!\n");
468 // copy the user entry over to the args field in the thread struct
469 // the function this will call will immediately switch the thread into
471 arch_thread_initialize_kthread_stack(t
, &_create_user_thread_kentry
, &thread_entry
, &thread_kthread_exit
);
474 t
->state
= THREAD_STATE_SUSPENDED
;
479 thread_id
user_thread_create_user_thread(char *uname
, proc_id pid
, addr entry
, void *args
)
481 char name
[SYS_MAX_OS_NAME_LEN
];
484 if((addr
)uname
>= KERNEL_BASE
&& (addr
)uname
<= KERNEL_TOP
)
485 return ERR_VM_BAD_USER_MEMORY
;
486 if(entry
>= KERNEL_BASE
&& entry
<= KERNEL_TOP
)
487 return ERR_VM_BAD_USER_MEMORY
;
489 rc
= user_strncpy(name
, uname
, SYS_MAX_OS_NAME_LEN
-1);
492 name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
494 return thread_create_user_thread(name
, pid
, entry
, args
);
497 thread_id
thread_create_user_thread(char *name
, proc_id pid
, addr entry
, void *args
)
499 return _create_thread(name
, pid
, entry
, args
, false);
502 thread_id
thread_create_kernel_thread(const char *name
, int (*func
)(void *), void *args
)
504 return _create_thread(name
, proc_get_kernel_proc()->id
, (addr
)func
, args
, true);
507 static thread_id
thread_create_kernel_thread_etc(const char *name
, int (*func
)(void *), void *args
, struct proc
*p
)
509 return _create_thread(name
, p
->id
, (addr
)func
, args
, true);
512 int thread_suspend_thread(thread_id id
)
517 bool global_resched
= false;
519 state
= int_disable_interrupts();
522 t
= thread_get_current_thread();
524 t
= thread_get_thread_struct_locked(id
);
528 if(t
->proc
== kernel_proc
) {
530 retval
= ERR_NOT_ALLOWED
;
531 } else if(t
->in_kernel
== true) {
532 t
->pending_signals
|= SIG_SUSPEND
;
534 t
->next_state
= THREAD_STATE_SUSPENDED
;
535 global_resched
= true;
539 retval
= ERR_INVALID_HANDLE
;
542 RELEASE_THREAD_LOCK();
543 int_restore_interrupts(state
);
546 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_SYNC
);
552 int thread_resume_thread(thread_id id
)
558 state
= int_disable_interrupts();
561 t
= thread_get_thread_struct_locked(id
);
562 if(t
!= NULL
&& t
->state
== THREAD_STATE_SUSPENDED
) {
563 t
->state
= THREAD_STATE_READY
;
564 t
->next_state
= THREAD_STATE_READY
;
566 thread_enqueue_run_q(t
);
569 retval
= ERR_INVALID_HANDLE
;
572 RELEASE_THREAD_LOCK();
573 int_restore_interrupts(state
);
578 int thread_set_priority(thread_id id
, int priority
)
583 // make sure the passed in priority is within bounds
584 if(priority
> THREAD_MAX_PRIORITY
)
585 priority
= THREAD_MAX_PRIORITY
;
586 if(priority
< THREAD_MIN_PRIORITY
)
587 priority
= THREAD_MIN_PRIORITY
;
589 t
= thread_get_current_thread();
591 // it's ourself, so we know we aren't in a run queue, and we can manipulate
592 // our structure directly
593 t
->priority
= priority
;
596 int state
= int_disable_interrupts();
599 t
= thread_get_thread_struct_locked(id
);
601 if(t
->state
== THREAD_STATE_READY
&& t
->priority
!= priority
) {
602 // this thread is in a ready queue right now, so it needs to be reinserted
603 thread_dequeue_id(&run_q
[t
->priority
], t
->id
);
604 t
->priority
= priority
;
605 thread_enqueue_run_q(t
);
607 t
->priority
= priority
;
611 retval
= ERR_INVALID_HANDLE
;
614 RELEASE_THREAD_LOCK();
615 int_restore_interrupts(state
);
621 static void _dump_proc_info(struct proc
*p
)
623 dprintf("PROC: %p\n", p
);
624 dprintf("id: 0x%x\n", p
->id
);
625 dprintf("name: '%s'\n", p
->name
);
626 dprintf("next: %p\n", p
->next
);
627 dprintf("num_threads: %d\n", p
->num_threads
);
628 dprintf("state: %d\n", p
->state
);
629 dprintf("pending_signals: 0x%x\n", p
->pending_signals
);
630 dprintf("ioctx: %p\n", p
->ioctx
);
631 dprintf("aspace_id: 0x%x\n", p
->aspace_id
);
632 dprintf("aspace: %p\n", p
->aspace
);
633 dprintf("kaspace: %p\n", p
->kaspace
);
634 dprintf("main_thread: %p\n", p
->main_thread
);
635 dprintf("thread_list: %p\n", p
->thread_list
);
638 static void dump_proc_info(int argc
, char **argv
)
643 struct hash_iterator i
;
646 dprintf("proc: not enough arguments\n");
650 // if the argument looks like a hex number, treat it as such
651 if(strlen(argv
[1]) > 2 && argv
[1][0] == '0' && argv
[1][1] == 'x') {
652 num
= atoul(argv
[1]);
653 if(num
> vm_get_kernel_aspace()->virtual_map
.base
) {
655 _dump_proc_info((struct proc
*)num
);
662 // walk through the thread list, trying to match name or id
663 hash_open(proc_hash
, &i
);
664 while((p
= hash_next(proc_hash
, &i
)) != NULL
) {
665 if((p
->name
&& strcmp(argv
[1], p
->name
) == 0) || p
->id
== id
) {
670 hash_close(proc_hash
, &i
, false);
674 static const char *state_to_text(int state
)
677 case THREAD_STATE_READY
:
679 case THREAD_STATE_RUNNING
:
681 case THREAD_STATE_WAITING
:
683 case THREAD_STATE_SUSPENDED
:
685 case THREAD_STATE_FREE_ON_RESCHED
:
687 case THREAD_STATE_BIRTH
:
694 static struct thread
*last_thread_dumped
= NULL
;
696 static void _dump_thread_info(struct thread
*t
)
698 dprintf("THREAD: %p\n", t
);
699 dprintf("id: 0x%x\n", t
->id
);
700 dprintf("name: '%s'\n", t
->name
);
701 dprintf("all_next: %p\nproc_next: %p\nq_next: %p\n",
702 t
->all_next
, t
->proc_next
, t
->q_next
);
703 dprintf("priority: 0x%x\n", t
->priority
);
704 dprintf("state: %s\n", state_to_text(t
->state
));
705 dprintf("next_state: %s\n", state_to_text(t
->next_state
));
706 dprintf("cpu: %p ", t
->cpu
);
708 dprintf("(%d)\n", t
->cpu
->info
.cpu_num
);
711 dprintf("pending_signals: 0x%x\n", t
->pending_signals
);
712 dprintf("in_kernel: %d\n", t
->in_kernel
);
713 dprintf("sem_blocking:0x%x\n", t
->sem_blocking
);
714 dprintf("sem_count: 0x%x\n", t
->sem_count
);
715 dprintf("sem_deleted_retcode: 0x%x\n", t
->sem_deleted_retcode
);
716 dprintf("sem_errcode: 0x%x\n", t
->sem_errcode
);
717 dprintf("sem_flags: 0x%x\n", t
->sem_flags
);
718 dprintf("fault_handler: 0x%lx\n", t
->fault_handler
);
719 dprintf("args: %p\n", t
->args
);
720 dprintf("entry: 0x%lx\n", t
->entry
);
721 dprintf("proc: %p\n", t
->proc
);
722 dprintf("return_code_sem: 0x%x\n", t
->return_code_sem
);
723 dprintf("kernel_stack_region_id: 0x%x\n", t
->kernel_stack_region_id
);
724 dprintf("kernel_stack_base: 0x%lx\n", t
->kernel_stack_base
);
725 dprintf("user_stack_region_id: 0x%x\n", t
->user_stack_region_id
);
726 dprintf("user_stack_base: 0x%lx\n", t
->user_stack_base
);
727 dprintf("kernel_time: %Ld\n", t
->kernel_time
);
728 dprintf("user_time: %Ld\n", t
->user_time
);
729 dprintf("architecture dependant section:\n");
730 arch_thread_dump_info(&t
->arch_info
);
732 last_thread_dumped
= t
;
735 static void dump_thread_info(int argc
, char **argv
)
740 struct hash_iterator i
;
743 dprintf("thread: not enough arguments\n");
747 // if the argument looks like a hex number, treat it as such
748 if(strlen(argv
[1]) > 2 && argv
[1][0] == '0' && argv
[1][1] == 'x') {
749 num
= atoul(argv
[1]);
750 if(num
> vm_get_kernel_aspace()->virtual_map
.base
) {
752 _dump_thread_info((struct thread
*)num
);
759 // walk through the thread list, trying to match name or id
760 hash_open(thread_hash
, &i
);
761 while((t
= hash_next(thread_hash
, &i
)) != NULL
) {
762 if((t
->name
&& strcmp(argv
[1], t
->name
) == 0) || t
->id
== id
) {
763 _dump_thread_info(t
);
767 hash_close(thread_hash
, &i
, false);
770 static void dump_thread_list(int argc
, char **argv
)
773 struct hash_iterator i
;
775 hash_open(thread_hash
, &i
);
776 while((t
= hash_next(thread_hash
, &i
)) != NULL
) {
779 dprintf("\t%32s", t
->name
);
781 dprintf("\t%32s", "<NULL>");
782 dprintf("\t0x%x", t
->id
);
783 dprintf("\t%16s", state_to_text(t
->state
));
785 dprintf("\t%d", t
->cpu
->info
.cpu_num
);
788 dprintf("\t0x%lx\n", t
->kernel_stack_base
);
790 hash_close(thread_hash
, &i
, false);
793 static void dump_next_thread_in_q(int argc
, char **argv
)
795 struct thread
*t
= last_thread_dumped
;
798 dprintf("no thread previously dumped. Examine a thread first.\n");
802 dprintf("next thread in queue after thread @ %p\n", t
);
803 if(t
->q_next
!= NULL
) {
804 _dump_thread_info(t
->q_next
);
810 static void dump_next_thread_in_all_list(int argc
, char **argv
)
812 struct thread
*t
= last_thread_dumped
;
815 dprintf("no thread previously dumped. Examine a thread first.\n");
819 dprintf("next thread in global list after thread @ %p\n", t
);
820 if(t
->all_next
!= NULL
) {
821 _dump_thread_info(t
->all_next
);
827 static void dump_next_thread_in_proc(int argc
, char **argv
)
829 struct thread
*t
= last_thread_dumped
;
832 dprintf("no thread previously dumped. Examine a thread first.\n");
836 dprintf("next thread in proc after thread @ %p\n", t
);
837 if(t
->proc_next
!= NULL
) {
838 _dump_thread_info(t
->proc_next
);
844 static int get_death_stack(void)
850 sem_acquire(death_stack_sem
, 1);
852 // grap the thread lock, find a free spot and release
853 state
= int_disable_interrupts();
855 bit
= death_stack_bitmap
;
856 bit
= (~bit
)&~((~bit
)-1);
857 death_stack_bitmap
|= bit
;
858 RELEASE_THREAD_LOCK();
863 panic("get_death_stack: couldn't find free stack!\n");
866 panic("get_death_stack: impossible bitmap result!\n");
877 // dprintf("get_death_stack: returning 0x%lx\n", death_stacks[i].address);
882 static void put_death_stack_and_reschedule(unsigned int index
)
884 // dprintf("put_death_stack...: passed %d\n", index);
886 if(index
>= num_death_stacks
)
887 panic("put_death_stack: passed invalid stack index %d\n", index
);
889 if(!(death_stack_bitmap
& (1 << index
)))
890 panic("put_death_stack: passed invalid stack index %d\n", index
);
892 int_disable_interrupts();
895 death_stack_bitmap
&= ~(1 << index
);
897 sem_release_etc(death_stack_sem
, 1, SEM_FLAG_NO_RESCHED
);
902 int thread_init(kernel_args
*ka
)
907 // dprintf("thread_init: entry\n");
909 // create the process hash table
910 proc_hash
= hash_init(15, (addr
)&kernel_proc
->next
- (addr
)kernel_proc
,
911 &proc_struct_compare
, &proc_struct_hash
);
913 // create the kernel process
914 kernel_proc
= create_proc_struct("kernel_proc", true);
915 if(kernel_proc
== NULL
)
916 panic("could not create kernel proc!\n");
917 kernel_proc
->state
= PROC_STATE_NORMAL
;
919 kernel_proc
->ioctx
= vfs_new_ioctx(NULL
);
920 if(kernel_proc
->ioctx
== NULL
)
921 panic("could not create ioctx for kernel proc!\n");
923 // stick it in the process hash
924 hash_insert(proc_hash
, kernel_proc
);
926 // create the thread hash table
927 thread_hash
= hash_init(15, (addr
)&t
->all_next
- (addr
)t
,
928 &thread_struct_compare
, &thread_struct_hash
);
930 // zero out the run queues
931 memset(run_q
, 0, sizeof(run_q
));
933 // zero out the dead thread structure q
934 memset(&dead_q
, 0, sizeof(dead_q
));
936 // allocate a snooze sem
937 snooze_sem
= sem_create(0, "snooze sem");
939 panic("error creating snooze sem\n");
943 // create an idle thread for each cpu
944 for(i
=0; i
<ka
->num_cpus
; i
++) {
948 sprintf(temp
, "idle_thread%d", i
);
949 t
= create_thread_struct(temp
);
951 panic("error creating idle thread struct\n");
952 return ERR_NO_MEMORY
;
954 t
->proc
= proc_get_kernel_proc();
955 t
->priority
= THREAD_IDLE_PRIORITY
;
956 t
->state
= THREAD_STATE_RUNNING
;
957 t
->next_state
= THREAD_STATE_READY
;
958 sprintf(temp
, "idle_thread%d_kstack", i
);
959 t
->kernel_stack_region_id
= vm_find_region_by_name(vm_get_kernel_aspace_id(), temp
);
960 region
= vm_get_region_by_id(t
->kernel_stack_region_id
);
962 panic("error finding idle kstack region\n");
964 t
->kernel_stack_base
= region
->base
;
965 vm_put_region(region
);
966 hash_insert(thread_hash
, t
);
967 insert_thread_into_proc(t
->proc
, t
);
970 arch_thread_set_current_thread(t
);
974 // create a set of death stacks
975 num_death_stacks
= smp_get_num_cpus();
976 if(num_death_stacks
> 8*sizeof(death_stack_bitmap
)) {
978 * clamp values for really beefy machines
980 num_death_stacks
= 8*sizeof(death_stack_bitmap
);
982 death_stack_bitmap
= 0;
983 death_stacks
= (struct death_stack
*)kmalloc(num_death_stacks
* sizeof(struct death_stack
));
984 if(death_stacks
== NULL
) {
985 panic("error creating death stacks\n");
986 return ERR_NO_MEMORY
;
991 for(i
=0; i
<num_death_stacks
; i
++) {
992 sprintf(temp
, "death_stack%d", i
);
993 death_stacks
[i
].rid
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), temp
,
994 (void **)&death_stacks
[i
].address
,
995 REGION_ADDR_ANY_ADDRESS
, KSTACK_SIZE
, REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
996 if(death_stacks
[i
].rid
< 0) {
997 panic("error creating death stacks\n");
998 return death_stacks
[i
].rid
;
1000 death_stacks
[i
].in_use
= false;
1003 death_stack_sem
= sem_create(num_death_stacks
, "death_stack_noavail_sem");
1005 // set up some debugger commands
1006 dbg_add_command(dump_thread_list
, "threads", "list all threads");
1007 dbg_add_command(dump_thread_info
, "thread", "list info about a particular thread");
1008 dbg_add_command(dump_next_thread_in_q
, "next_q", "dump the next thread in the queue of last thread viewed");
1009 dbg_add_command(dump_next_thread_in_all_list
, "next_all", "dump the next thread in the global list of the last thread viewed");
1010 dbg_add_command(dump_next_thread_in_proc
, "next_proc", "dump the next thread in the process of the last thread viewed");
1011 dbg_add_command(dump_proc_info
, "proc", "list info about a particular process");
1016 int thread_init_percpu(int cpu_num
)
1018 arch_thread_set_current_thread(idle_threads
[cpu_num
]);
1022 // this starts the scheduler. Must be run under the context of
1023 // the initial idle thread.
1024 void thread_start_threading(void)
1028 // XXX may not be the best place for this
1029 // invalidate all of the other processors' TLB caches
1030 state
= int_disable_interrupts();
1031 arch_cpu_global_TLB_invalidate();
1032 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_SYNC
);
1033 int_restore_interrupts(state
);
1035 // start the other processors
1036 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_ASYNC
);
1038 state
= int_disable_interrupts();
1043 RELEASE_THREAD_LOCK();
1044 int_restore_interrupts(state
);
1047 int user_thread_snooze(bigtime_t time
)
1049 thread_snooze(time
);
1053 void thread_snooze(bigtime_t time
)
1055 sem_acquire_etc(snooze_sem
, 1, SEM_FLAG_TIMEOUT
, time
, NULL
);
1058 // this function gets run by a new thread before anything else
1059 static void thread_entry(void)
1061 // simulates the thread spinlock release that would occur if the thread had been
1062 // rescheded from. The resched didn't happen because the thread is new.
1063 RELEASE_THREAD_LOCK();
1064 int_enable_interrupts(); // this essentially simulates a return-from-interrupt
1067 // used to pass messages between thread_exit and thread_exit2
1068 struct thread_exit_args
{
1070 region_id old_kernel_stack
;
1072 unsigned int death_stack
;
1075 static void thread_exit2(void *_args
)
1077 struct thread_exit_args args
;
1080 // copy the arguments over, since the source is probably on the kernel stack we're about to delete
1081 memcpy(&args
, _args
, sizeof(struct thread_exit_args
));
1083 // restore the interrupts
1084 int_restore_interrupts(args
.int_state
);
1086 // dprintf("thread_exit2, running on death stack 0x%lx\n", args.t->kernel_stack_base);
1088 // delete the old kernel stack region
1089 // dprintf("thread_exit2: deleting old kernel stack id 0x%x for thread 0x%x\n", args.old_kernel_stack, args.t->id);
1090 vm_delete_region(vm_get_kernel_aspace_id(), args
.old_kernel_stack
);
1092 // dprintf("thread_exit2: removing thread 0x%x from global lists\n", args.t->id);
1094 // remove this thread from all of the global lists
1095 int_disable_interrupts();
1097 remove_thread_from_proc(kernel_proc
, args
.t
);
1098 RELEASE_PROC_LOCK();
1100 hash_remove(thread_hash
, args
.t
);
1101 RELEASE_THREAD_LOCK();
1103 // dprintf("thread_exit2: done removing thread from lists\n");
1105 // set the next state to be gone. Will return the thread structure to a ready pool upon reschedule
1106 args
.t
->next_state
= THREAD_STATE_FREE_ON_RESCHED
;
1108 // return the death stack and reschedule one last time
1109 put_death_stack_and_reschedule(args
.death_stack
);
1110 // never get to here
1111 panic("thread_exit2: made it where it shouldn't have!\n");
1114 void thread_exit(int retcode
)
1117 struct thread
*t
= thread_get_current_thread();
1118 struct proc
*p
= t
->proc
;
1119 bool delete_proc
= false;
1120 unsigned int death_stack
;
1122 dprintf("thread 0x%x exiting w/return code 0x%x\n", t
->id
, retcode
);
1124 // boost our priority to get this over with
1125 thread_set_priority(t
->id
, THREAD_HIGH_PRIORITY
);
1127 // delete the user stack region first
1128 if(p
->aspace_id
>= 0 && t
->user_stack_region_id
>= 0) {
1129 region_id rid
= t
->user_stack_region_id
;
1130 t
->user_stack_region_id
= -1;
1131 vm_delete_region(p
->aspace_id
, rid
);
1134 if(p
!= kernel_proc
) {
1135 // remove this thread from the current process and add it to the kernel
1136 // put the thread into the kernel proc until it dies
1137 state
= int_disable_interrupts();
1139 remove_thread_from_proc(p
, t
);
1140 insert_thread_into_proc(kernel_proc
, t
);
1141 if(p
->main_thread
== t
) {
1142 // this was main thread in this process
1144 hash_remove(proc_hash
, p
);
1145 p
->state
= PROC_STATE_DEATH
;
1147 RELEASE_PROC_LOCK();
1148 // swap address spaces, to make sure we're running on the kernel's pgdir
1149 vm_aspace_swap(kernel_proc
->kaspace
);
1150 int_restore_interrupts(state
);
1152 // dprintf("thread_exit: thread 0x%x now a kernel thread!\n", t->id);
1155 // delete the process
1157 if(p
->num_threads
> 0) {
1158 // there are other threads still in this process,
1159 // cycle through and signal kill on each of the threads
1160 // XXX this can be optimized. There's got to be a better solution.
1161 struct thread
*temp_thread
;
1163 state
= int_disable_interrupts();
1165 // we can safely walk the list because of the lock. no new threads can be created
1166 // because of the PROC_STATE_DEATH flag on the process
1167 temp_thread
= p
->thread_list
;
1168 while(temp_thread
) {
1169 struct thread
*next
= temp_thread
->proc_next
;
1170 thread_kill_thread_nowait(temp_thread
->id
);
1173 RELEASE_PROC_LOCK();
1174 int_restore_interrupts(state
);
1176 // Now wait for all of the threads to die
1177 // XXX block on a semaphore
1178 while((volatile int)p
->num_threads
> 0) {
1179 thread_snooze(10000); // 10 ms
1182 vm_put_aspace(p
->aspace
);
1183 vm_delete_aspace(p
->aspace_id
);
1184 port_delete_owned_ports(p
->id
);
1185 sem_delete_owned_sems(p
->id
);
1186 vfs_free_ioctx(p
->ioctx
);
1190 // delete the sem that others will use to wait on us and get the retcode
1192 sem_id s
= t
->return_code_sem
;
1194 t
->return_code_sem
= -1;
1195 sem_delete_etc(s
, retcode
);
1198 death_stack
= get_death_stack();
1200 struct thread_exit_args args
;
1203 args
.old_kernel_stack
= t
->kernel_stack_region_id
;
1204 args
.death_stack
= death_stack
;
1206 // disable the interrupts. Must remain disabled until the kernel stack pointer can be officially switched
1207 args
.int_state
= int_disable_interrupts();
1209 // set the new kernel stack officially to the death stack, wont be really switched until
1210 // the next function is called. This bookkeeping must be done now before a context switch
1211 // happens, or the processor will interrupt to the old stack
1212 t
->kernel_stack_region_id
= death_stacks
[death_stack
].rid
;
1213 t
->kernel_stack_base
= death_stacks
[death_stack
].address
;
1215 // we will continue in thread_exit2(), on the new stack
1216 arch_thread_switch_kstack_and_call(t
, t
->kernel_stack_base
+ KSTACK_SIZE
, thread_exit2
, &args
);
1219 panic("never can get here\n");
1222 static int _thread_kill_thread(thread_id id
, bool wait_on
)
1228 // dprintf("_thread_kill_thread: id %d, wait_on %d\n", id, wait_on);
1230 state
= int_disable_interrupts();
1233 t
= thread_get_thread_struct_locked(id
);
1235 if(t
->proc
== kernel_proc
) {
1237 rc
= ERR_NOT_ALLOWED
;
1239 deliver_signal(t
, SIG_KILL
);
1241 if(t
->id
== thread_get_current_thread()->id
)
1242 wait_on
= false; // can't wait on ourself
1245 rc
= ERR_INVALID_HANDLE
;
1248 RELEASE_THREAD_LOCK();
1249 int_restore_interrupts(state
);
1254 thread_wait_on_thread(id
, NULL
);
1259 int thread_kill_thread(thread_id id
)
1261 return _thread_kill_thread(id
, true);
1264 int thread_kill_thread_nowait(thread_id id
)
1266 return _thread_kill_thread(id
, false);
1269 static void thread_kthread_exit(void)
1274 int user_thread_wait_on_thread(thread_id id
, int *uretcode
)
1279 if((addr
)uretcode
>= KERNEL_BASE
&& (addr
)uretcode
<= KERNEL_TOP
)
1280 return ERR_VM_BAD_USER_MEMORY
;
1282 rc
= thread_wait_on_thread(id
, &retcode
);
1284 rc2
= user_memcpy(uretcode
, &retcode
, sizeof(retcode
));
1291 int thread_wait_on_thread(thread_id id
, int *retcode
)
1298 state
= int_disable_interrupts();
1301 t
= thread_get_thread_struct_locked(id
);
1303 sem
= t
->return_code_sem
;
1305 sem
= ERR_INVALID_HANDLE
;
1308 RELEASE_THREAD_LOCK();
1309 int_restore_interrupts(state
);
1311 rc
= sem_acquire_etc(sem
, 1, 0, 0, retcode
);
1313 /* This thread died the way it should, dont ripple a non-error up */
1314 if (rc
== ERR_SEM_DELETED
)
1320 int user_proc_wait_on_proc(proc_id id
, int *uretcode
)
1325 if((addr
)uretcode
>= KERNEL_BASE
&& (addr
)uretcode
<= KERNEL_TOP
)
1326 return ERR_VM_BAD_USER_MEMORY
;
1328 rc
= proc_wait_on_proc(id
, &retcode
);
1332 rc2
= user_memcpy(uretcode
, &retcode
, sizeof(retcode
));
1339 int proc_wait_on_proc(proc_id id
, int *retcode
)
1345 state
= int_disable_interrupts();
1347 p
= proc_get_proc_struct_locked(id
);
1348 if(p
&& p
->main_thread
) {
1349 tid
= p
->main_thread
->id
;
1351 tid
= ERR_INVALID_HANDLE
;
1353 RELEASE_PROC_LOCK();
1354 int_restore_interrupts(state
);
1359 return thread_wait_on_thread(tid
, retcode
);
1362 struct thread
*thread_get_thread_struct(thread_id id
)
1367 state
= int_disable_interrupts();
1370 t
= thread_get_thread_struct_locked(id
);
1372 RELEASE_THREAD_LOCK();
1373 int_restore_interrupts(state
);
1378 static struct thread
*thread_get_thread_struct_locked(thread_id id
)
1380 struct thread_key key
;
1384 return hash_lookup(thread_hash
, &key
);
1387 static struct proc
*proc_get_proc_struct(proc_id id
)
1392 state
= int_disable_interrupts();
1395 p
= proc_get_proc_struct_locked(id
);
1397 RELEASE_PROC_LOCK();
1398 int_restore_interrupts(state
);
1403 static struct proc
*proc_get_proc_struct_locked(proc_id id
)
1405 struct proc_key key
;
1409 return hash_lookup(proc_hash
, &key
);
1412 static void thread_context_switch(struct thread
*t_from
, struct thread
*t_to
)
1416 // track kernel time
1417 now
= system_time();
1418 t_from
->kernel_time
+= now
- t_from
->last_time
;
1419 t_to
->last_time
= now
;
1421 t_to
->cpu
= t_from
->cpu
;
1422 arch_thread_set_current_thread(t_to
);
1424 arch_thread_context_switch(t_from
, t_to
);
1427 static int _rand(void)
1429 static int next
= 0;
1432 next
= system_time();
1434 next
= next
* 1103515245 + 12345;
1435 return((next
>> 16) & 0x7FFF);
1438 static int reschedule_event(void *unused
)
1440 // this function is called as a result of the timer event set by the scheduler
1441 // returning this causes a reschedule on the timer event
1442 thread_get_current_thread()->cpu
->info
.preempted
= 1;
1443 return INT_RESCHEDULE
;
1446 // NOTE: expects thread_spinlock to be held
1447 void thread_resched(void)
1449 struct thread
*next_thread
= NULL
;
1450 int last_thread_pri
= -1;
1451 struct thread
*old_thread
= thread_get_current_thread();
1454 struct timer_event
*quantum_timer
;
1456 // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), thread_get_current_thread());
1458 switch(old_thread
->next_state
) {
1459 case THREAD_STATE_RUNNING
:
1460 case THREAD_STATE_READY
:
1461 // dprintf("enqueueing thread 0x%x into run q. pri = %d\n", old_thread, old_thread->priority);
1462 thread_enqueue_run_q(old_thread
);
1464 case THREAD_STATE_SUSPENDED
:
1465 dprintf("suspending thread 0x%x\n", old_thread
->id
);
1467 case THREAD_STATE_FREE_ON_RESCHED
:
1468 thread_enqueue(old_thread
, &dead_q
);
1471 // dprintf("not enqueueing thread 0x%x into run q. next_state = %d\n", old_thread, old_thread->next_state);
1474 old_thread
->state
= old_thread
->next_state
;
1476 // search the real-time queue
1477 for(i
= THREAD_MAX_RT_PRIORITY
; i
>= THREAD_MIN_RT_PRIORITY
; i
--) {
1478 next_thread
= thread_dequeue_run_q(i
);
1483 // search the regular queue
1484 for(i
= THREAD_MAX_PRIORITY
; i
> THREAD_IDLE_PRIORITY
; i
--) {
1485 next_thread
= thread_lookat_run_q(i
);
1486 if(next_thread
!= NULL
) {
1487 // skip it sometimes
1488 if(_rand() > 0x3000) {
1489 next_thread
= thread_dequeue_run_q(i
);
1492 last_thread_pri
= i
;
1496 if(next_thread
== NULL
) {
1497 if(last_thread_pri
!= -1) {
1498 next_thread
= thread_dequeue_run_q(last_thread_pri
);
1499 if(next_thread
== NULL
)
1500 panic("next_thread == NULL! last_thread_pri = %d\n", last_thread_pri
);
1502 next_thread
= thread_dequeue_run_q(THREAD_IDLE_PRIORITY
);
1503 if(next_thread
== NULL
)
1504 panic("next_thread == NULL! no idle priorities!\n");
1509 next_thread
->state
= THREAD_STATE_RUNNING
;
1510 next_thread
->next_state
= THREAD_STATE_READY
;
1512 // XXX should only reset the quantum timer if we are switching to a new thread,
1513 // or we got here as a result of a quantum expire.
1515 // XXX calculate quantum
1518 // get the quantum timer for this cpu
1519 quantum_timer
= &old_thread
->cpu
->info
.quantum_timer
;
1520 if(!old_thread
->cpu
->info
.preempted
) {
1521 _local_timer_cancel_event(old_thread
->cpu
->info
.cpu_num
, quantum_timer
);
1523 old_thread
->cpu
->info
.preempted
= 0;
1524 timer_setup_timer(&reschedule_event
, NULL
, quantum_timer
);
1525 timer_set_event(quantum
, TIMER_MODE_ONESHOT
, quantum_timer
);
1527 if(next_thread
!= old_thread
) {
1528 // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",
1529 // smp_get_current_cpu(), old_thread->id, next_thread->id);
1530 thread_context_switch(old_thread
, next_thread
);
1534 static int proc_struct_compare(void *_p
, const void *_key
)
1536 struct proc
*p
= _p
;
1537 const struct proc_key
*key
= _key
;
1539 if(p
->id
== key
->id
) return 0;
1543 static unsigned int proc_struct_hash(void *_p
, const void *_key
, unsigned int range
)
1545 struct proc
*p
= _p
;
1546 const struct proc_key
*key
= _key
;
1549 return (p
->id
% range
);
1551 return (key
->id
% range
);
1554 struct proc
*proc_get_kernel_proc(void)
1559 proc_id
proc_get_kernel_proc_id(void)
1564 return kernel_proc
->id
;
1567 proc_id
proc_get_current_proc_id(void)
1569 return thread_get_current_thread()->proc
->id
;
1572 static struct proc
*create_proc_struct(const char *name
, bool kernel
)
1576 p
= (struct proc
*)kmalloc(sizeof(struct proc
));
1579 p
->id
= atomic_add(&next_proc_id
, 1);
1580 strncpy(&p
->name
[0], name
, SYS_MAX_OS_NAME_LEN
-1);
1581 p
->name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
1586 p
->kaspace
= vm_get_kernel_aspace();
1587 vm_put_aspace(p
->kaspace
);
1588 p
->thread_list
= NULL
;
1589 p
->main_thread
= NULL
;
1590 p
->state
= PROC_STATE_BIRTH
;
1591 p
->pending_signals
= SIG_NONE
;
1593 if(arch_proc_init_proc_struct(p
, kernel
) < 0)
1604 static void delete_proc_struct(struct proc
*p
)
1609 static int get_arguments_data_size(char **args
,int argc
)
1614 for(cnt
= 0; cnt
< argc
; cnt
++)
1615 tot_size
+= strlen(args
[cnt
]) + 1;
1616 tot_size
+= (argc
+ 1) * sizeof(char *);
1618 return tot_size
+ sizeof(struct uspace_prog_args_t
);
1621 static int proc_create_proc2(void *args
)
1626 struct proc_arg
*pargs
= args
;
1629 char ustack_name
[128];
1633 struct uspace_prog_args_t
*uspa
;
1636 t
= thread_get_current_thread();
1639 dprintf("proc_create_proc2: entry thread %d\n", t
->id
);
1641 // create an initial primary stack region
1643 tot_top_size
= STACK_SIZE
+ PAGE_ALIGN(get_arguments_data_size(pargs
->args
,pargs
->argc
));
1644 t
->user_stack_base
= ((USER_STACK_REGION
- tot_top_size
) + USER_STACK_REGION_SIZE
);
1645 sprintf(ustack_name
, "%s_primary_stack", p
->name
);
1646 t
->user_stack_region_id
= vm_create_anonymous_region(p
->aspace_id
, ustack_name
, (void **)&t
->user_stack_base
,
1647 REGION_ADDR_EXACT_ADDRESS
, tot_top_size
, REGION_WIRING_LAZY
, LOCK_RW
);
1648 if(t
->user_stack_region_id
< 0) {
1649 panic("proc_create_proc2: could not create default user stack region\n");
1650 return t
->user_stack_region_id
;
1653 uspa
= (struct uspace_prog_args_t
*)(t
->user_stack_base
+ STACK_SIZE
);
1654 uargs
= (char **)(uspa
+ 1);
1655 udest
= (char *)(uargs
+ pargs
->argc
+ 1);
1656 // dprintf("addr: stack base=0x%x uargs = 0x%x udest=0x%x tot_top_size=%d \n\n",t->user_stack_base,uargs,udest,tot_top_size);
1658 for(cnt
= 0;cnt
< pargs
->argc
;cnt
++){
1660 user_strcpy(udest
, pargs
->args
[cnt
]);
1661 udest
+= strlen(pargs
->args
[cnt
]) + 1;
1665 user_memcpy(uspa
->prog_name
, p
->name
, sizeof(uspa
->prog_name
));
1666 user_memcpy(uspa
->prog_path
, pargs
->path
, sizeof(uspa
->prog_path
));
1672 if(pargs
->args
!= NULL
)
1673 free_arg_list(pargs
->args
,pargs
->argc
);
1676 dprintf("proc_create_proc2: loading elf binary '%s'\n", path
);
1678 err
= elf_load_uspace("/boot/libexec/rld.so", p
, 0, &entry
);
1680 // XXX clean up proc
1688 dprintf("proc_create_proc2: loaded elf. entry = 0x%lx\n", entry
);
1690 p
->state
= PROC_STATE_NORMAL
;
1692 // jump to the entry point in user space
1693 arch_thread_enter_uspace(entry
, uspa
, t
->user_stack_base
+ STACK_SIZE
);
1699 proc_id
proc_create_proc(const char *path
, const char *name
, char **args
, int argc
, int priority
)
1707 struct proc_arg
*pargs
;
1709 dprintf("proc_create_proc: entry '%s', name '%s' args = %p argc = %d\n", path
, name
, args
, argc
);
1711 p
= create_proc_struct(name
, false);
1713 return ERR_NO_MEMORY
;
1717 state
= int_disable_interrupts();
1719 hash_insert(proc_hash
, p
);
1720 RELEASE_PROC_LOCK();
1721 int_restore_interrupts(state
);
1723 // copy the args over
1724 pargs
= kmalloc(sizeof(struct proc_arg
));
1726 err
= ERR_NO_MEMORY
;
1729 pargs
->path
= kstrdup(path
);
1730 if(pargs
->path
== NULL
){
1731 err
= ERR_NO_MEMORY
;
1737 // create a new ioctx for this process
1738 p
->ioctx
= vfs_new_ioctx(thread_get_current_thread()->proc
->ioctx
);
1740 err
= ERR_NO_MEMORY
;
1744 // create an address space for this process
1745 p
->aspace_id
= vm_create_aspace(p
->name
, USER_BASE
, USER_SIZE
, false);
1746 if(p
->aspace_id
< 0) {
1750 p
->aspace
= vm_get_aspace_by_id(p
->aspace_id
);
1752 // create a kernel thread, but under the context of the new process
1753 tid
= thread_create_kernel_thread_etc(name
, proc_create_proc2
, pargs
, p
);
1759 thread_resume_thread(tid
);
1764 vm_put_aspace(p
->aspace
);
1765 vm_delete_aspace(p
->aspace_id
);
1767 vfs_free_ioctx(p
->ioctx
);
1773 // remove the proc structure from the proc hash table and delete the proc structure
1774 state
= int_disable_interrupts();
1776 hash_remove(proc_hash
, p
);
1777 RELEASE_PROC_LOCK();
1778 int_restore_interrupts(state
);
1779 delete_proc_struct(p
);
1784 proc_id
user_proc_create_proc(const char *upath
, const char *uname
, char **args
, int argc
, int priority
)
1786 char path
[SYS_MAX_PATH_LEN
];
1787 char name
[SYS_MAX_OS_NAME_LEN
];
1791 dprintf("user_proc_create_proc : argc=%d \n",argc
);
1793 if((addr
)upath
>= KERNEL_BASE
&& (addr
)upath
<= KERNEL_TOP
)
1794 return ERR_VM_BAD_USER_MEMORY
;
1795 if((addr
)uname
>= KERNEL_BASE
&& (addr
)uname
<= KERNEL_TOP
)
1796 return ERR_VM_BAD_USER_MEMORY
;
1798 rc
= user_copy_arg_list(args
, argc
, &kargs
);
1802 rc
= user_strncpy(path
, upath
, SYS_MAX_PATH_LEN
-1);
1806 path
[SYS_MAX_PATH_LEN
-1] = 0;
1808 rc
= user_strncpy(name
, uname
, SYS_MAX_OS_NAME_LEN
-1);
1812 name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
1814 return proc_create_proc(path
, name
, kargs
, argc
, priority
);
1816 free_arg_list(kargs
,argc
);
1821 // used by PS command and anything else interested in a process list
1822 int user_proc_get_table(struct proc_info
*pbuf
, size_t len
)
1825 struct hash_iterator i
;
1826 struct proc_info pi
;
1829 int max
= (len
/ sizeof(struct proc_info
));
1831 if((addr
)pbuf
>= KERNEL_BASE
&& (addr
)pbuf
<= KERNEL_TOP
)
1832 return ERR_VM_BAD_USER_MEMORY
;
1834 state
= int_disable_interrupts();
1837 hash_open(proc_hash
, &i
);
1838 while(((p
= hash_next(proc_hash
, &i
)) != NULL
) && (count
< max
)) {
1840 strcpy(pi
.name
, p
->name
);
1841 pi
.state
= p
->state
;
1842 pi
.num_threads
= p
->num_threads
;
1844 user_memcpy(pbuf
, &pi
, sizeof(struct proc_info
));
1845 pbuf
=pbuf
+ sizeof(struct proc_info
);
1847 hash_close(proc_hash
, &i
, false);
1849 RELEASE_PROC_LOCK();
1850 int_restore_interrupts(state
);
1855 return ERR_NO_MEMORY
;
1859 int proc_kill_proc(proc_id id
)
1867 state
= int_disable_interrupts();
1870 p
= proc_get_proc_struct_locked(id
);
1872 tid
= p
->main_thread
->id
;
1874 retval
= ERR_INVALID_HANDLE
;
1877 RELEASE_PROC_LOCK();
1878 int_restore_interrupts(state
);
1882 // just kill the main thread in the process. The cleanup code there will
1883 // take care of the process
1884 return thread_kill_thread(tid
);
1887 // sets the pending signal flag on a thread and possibly does some work to wake it up, etc.
1888 // expects the thread lock to be held
1889 static void deliver_signal(struct thread
*t
, int signal
)
1891 // dprintf("deliver_signal: thread %p (%d), signal %d\n", t, t->id, signal);
1894 t
->pending_signals
|= SIG_KILL
;
1896 case THREAD_STATE_SUSPENDED
:
1897 t
->state
= THREAD_STATE_READY
;
1898 t
->next_state
= THREAD_STATE_READY
;
1900 thread_enqueue_run_q(t
);
1902 case THREAD_STATE_WAITING
:
1903 sem_interrupt_thread(t
);
1910 t
->pending_signals
|= signal
;
1914 // expects the thread lock to be held
1915 static void _check_for_thread_sigs(struct thread
*t
, int state
)
1917 if(t
->pending_signals
== SIG_NONE
)
1920 if(t
->pending_signals
& SIG_KILL
) {
1921 t
->pending_signals
&= ~SIG_KILL
;
1923 RELEASE_THREAD_LOCK();
1924 int_restore_interrupts(state
);
1926 // never gets to here
1928 if(t
->pending_signals
& SIG_SUSPEND
) {
1929 t
->pending_signals
&= ~SIG_SUSPEND
;
1930 t
->next_state
= THREAD_STATE_SUSPENDED
;
1931 // XXX will probably want to delay this
1936 // called in the int handler code when a thread enters the kernel for any reason
1937 void thread_atkernel_entry(void)
1943 // dprintf("thread_atkernel_entry: entry thread 0x%x\n", t->id);
1945 t
= thread_get_current_thread();
1947 state
= int_disable_interrupts();
1950 now
= system_time();
1951 t
->user_time
+= now
- t
->last_time
;
1956 t
->in_kernel
= true;
1958 _check_for_thread_sigs(t
, state
);
1960 RELEASE_THREAD_LOCK();
1961 int_restore_interrupts(state
);
1964 // called when a thread exits kernel space to user space
1965 void thread_atkernel_exit(void)
1971 // dprintf("thread_atkernel_exit: entry\n");
1973 t
= thread_get_current_thread();
1975 state
= int_disable_interrupts();
1978 _check_for_thread_sigs(t
, state
);
1980 t
->in_kernel
= false;
1982 RELEASE_THREAD_LOCK();
1984 // track kernel time
1985 now
= system_time();
1986 t
->kernel_time
+= now
- t
->last_time
;
1989 int_restore_interrupts(state
);
1992 int user_getrlimit(int resource
, struct rlimit
* urlp
)
1998 return ERR_INVALID_ARGS
;
2000 if((addr
)urlp
>= KERNEL_BASE
&& (addr
)urlp
<= KERNEL_TOP
) {
2001 return ERR_VM_BAD_USER_MEMORY
;
2004 ret
= getrlimit(resource
, &rl
);
2007 ret
= user_memcpy(urlp
, &rl
, sizeof(struct rlimit
));
2017 int getrlimit(int resource
, struct rlimit
* rlp
)
2025 return vfs_getrlimit(resource
, rlp
);
2034 int user_setrlimit(int resource
, const struct rlimit
* urlp
)
2040 return ERR_INVALID_ARGS
;
2042 if((addr
)urlp
>= KERNEL_BASE
&& (addr
)urlp
<= KERNEL_TOP
) {
2043 return ERR_VM_BAD_USER_MEMORY
;
2046 err
= user_memcpy(&rl
, urlp
, sizeof(struct rlimit
));
2051 return setrlimit(resource
, &rl
);
2054 int setrlimit(int resource
, const struct rlimit
* rlp
)
2062 return vfs_setrlimit(resource
, rlp
);