2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/mman.h>
18 #include <linux/personality.h>
19 #include <linux/sys.h>
20 #include <linux/user.h>
21 #include <linux/a.out.h>
22 #include <linux/init.h>
23 #include <linux/completion.h>
25 #include <asm/bootinfo.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/mipsregs.h>
29 #include <asm/processor.h>
30 #include <asm/stackframe.h>
31 #include <asm/uaccess.h>
39 * We use this if we don't have any better idle routine..
40 * (This to kill: kernel/platform.c.
42 void default_idle (void)
47 * The idle thread. There's no useful work to be done, so just try to conserve
48 * power and have a low exit latency (ie sit in a loop waiting for somebody to
49 * say that they'd like to reschedule)
51 ATTRIB_NORET
void cpu_idle(void)
53 /* endless idle loop with no priority at all */
55 while (!need_resched())
62 asmlinkage
void ret_from_fork(void);
64 void start_thread(struct pt_regs
* regs
, unsigned long pc
, unsigned long sp
)
68 /* New thread loses kernel privileges. */
69 status
= regs
->cp0_status
& ~(ST0_CU0
|ST0_FR
|ST0_KSU
);
71 status
|= (current
->thread
.mflags
& MF_32BIT_REGS
) ? 0 : ST0_FR
;
72 regs
->cp0_status
= status
;
73 current
->used_math
= 0;
77 current_thread_info()->addr_limit
= USER_DS
;
80 void exit_thread(void)
84 void flush_thread(void)
88 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long usp
,
89 unsigned long unused
, struct task_struct
*p
,
92 struct thread_info
*ti
= p
->thread_info
;
93 struct pt_regs
*childregs
;
96 childksp
= (unsigned long)ti
+ KERNEL_STACK_SIZE
- 32;
102 /* set up new TSS. */
103 childregs
= (struct pt_regs
*) childksp
- 1;
105 childregs
->regs
[7] = 0; /* Clear error flag */
106 childregs
->regs
[2] = 0; /* Child gets zero as return value */
107 regs
->regs
[2] = p
->pid
;
109 if (childregs
->cp0_status
& ST0_CU0
) {
110 childregs
->regs
[28] = (unsigned long) ti
;
111 childregs
->regs
[29] = childksp
;
112 ti
->addr_limit
= KERNEL_DS
;
114 childregs
->regs
[29] = usp
;
115 ti
->addr_limit
= USER_DS
;
117 p
->thread
.reg29
= (unsigned long) childregs
;
118 p
->thread
.reg31
= (unsigned long) ret_from_fork
;
121 * New tasks lose permission to use the fpu. This accelerates context
122 * switching for most programs since they don't use the fpu.
124 p
->thread
.cp0_status
= read_c0_status() & ~(ST0_CU2
|ST0_CU1
|ST0_KSU
);
125 childregs
->cp0_status
&= ~(ST0_CU2
|ST0_CU1
);
126 clear_tsk_thread_flag(p
, TIF_USEDFPU
);
127 p
->set_child_tid
= p
->clear_child_tid
= NULL
;
132 /* Fill in the fpu structure for a core dump.. */
133 int dump_fpu(struct pt_regs
*regs
, elf_fpregset_t
*r
)
135 memcpy(r
, ¤t
->thread
.fpu
, sizeof(current
->thread
.fpu
));
140 * Create a kernel thread
142 int kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
146 __asm__
__volatile__(
151 " beq $6, $sp, 1f \n"
159 : "i" (__NR_clone
), "i" (__NR_exit
), "r" (arg
), "r" (fn
),
160 "r" (flags
| CLONE_VM
| CLONE_UNTRACED
)
162 * The called subroutine might have destroyed any of the
163 * at, result, argument or temporary registers ...
165 : "$2", "$3", "$4", "$5", "$6", "$7", "$8",
166 "$9","$10","$11","$12","$13","$14","$15","$24","$25","$31");
171 struct mips_frame_info
{
175 static struct mips_frame_info schedule_frame
;
176 static struct mips_frame_info schedule_timeout_frame
;
177 static struct mips_frame_info sleep_on_frame
;
178 static struct mips_frame_info sleep_on_timeout_frame
;
179 static struct mips_frame_info wait_for_completion_frame
;
180 static int mips_frame_info_initialized
;
181 static int __init
get_frame_info(struct mips_frame_info
*info
, void *func
)
184 union mips_instruction
*ip
= (union mips_instruction
*)func
;
185 info
->pc_offset
= -1;
186 info
->frame_offset
= -1;
187 for (i
= 0; i
< 128; i
++, ip
++) {
188 /* if jal, jalr, jr, stop. */
189 if (ip
->j_format
.opcode
== jal_op
||
190 (ip
->r_format
.opcode
== spec_op
&&
191 (ip
->r_format
.func
== jalr_op
||
192 ip
->r_format
.func
== jr_op
)))
194 if (ip
->i_format
.opcode
== sd_op
&&
195 ip
->i_format
.rs
== 29) {
196 /* sd $ra, offset($sp) */
197 if (ip
->i_format
.rt
== 31) {
198 if (info
->pc_offset
!= -1)
201 ip
->i_format
.simmediate
/ sizeof(long);
203 /* sd $s8, offset($sp) */
204 if (ip
->i_format
.rt
== 30) {
205 if (info
->frame_offset
!= -1)
208 ip
->i_format
.simmediate
/ sizeof(long);
212 if (info
->pc_offset
== -1 || info
->frame_offset
== -1) {
213 printk("Can't analyze prologue code at %p\n", func
);
214 info
->pc_offset
= -1;
215 info
->frame_offset
= -1;
221 void __init
frame_info_init(void)
223 mips_frame_info_initialized
=
224 !get_frame_info(&schedule_frame
, schedule
) &&
225 !get_frame_info(&schedule_timeout_frame
, schedule_timeout
) &&
226 !get_frame_info(&sleep_on_frame
, sleep_on
) &&
227 !get_frame_info(&sleep_on_timeout_frame
, sleep_on_timeout
) &&
228 !get_frame_info(&wait_for_completion_frame
, wait_for_completion
);
232 * Return saved PC of a blocked thread.
234 unsigned long thread_saved_pc(struct thread_struct
*t
)
236 extern void ret_from_fork(void);
238 /* New born processes are a special case */
239 if (t
->reg31
== (unsigned long) ret_from_fork
)
242 if (schedule_frame
.pc_offset
< 0)
244 return ((unsigned long *)t
->reg29
)[schedule_frame
.pc_offset
];
248 * These bracket the sleeping functions..
250 extern void scheduling_functions_start_here(void);
251 extern void scheduling_functions_end_here(void);
252 #define first_sched ((unsigned long) scheduling_functions_start_here)
253 #define last_sched ((unsigned long) scheduling_functions_end_here)
255 /* get_wchan - a maintenance nightmare ... */
256 unsigned long get_wchan(struct task_struct
*p
)
258 unsigned long frame
, pc
;
260 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
263 if (!mips_frame_info_initialized
)
265 pc
= thread_saved_pc(&p
->thread
);
266 if (pc
< first_sched
|| pc
>= last_sched
)
269 if (pc
>= (unsigned long) sleep_on_timeout
)
270 goto schedule_timeout_caller
;
271 if (pc
>= (unsigned long) sleep_on
)
272 goto schedule_caller
;
273 if (pc
>= (unsigned long) interruptible_sleep_on_timeout
)
274 goto schedule_timeout_caller
;
275 if (pc
>= (unsigned long)interruptible_sleep_on
)
276 goto schedule_caller
;
277 if (pc
>= (unsigned long)wait_for_completion
)
278 goto schedule_caller
;
279 goto schedule_timeout_caller
;
282 frame
= ((unsigned long *)p
->thread
.reg30
)[schedule_frame
.frame_offset
];
283 if (pc
>= (unsigned long) sleep_on
)
284 pc
= ((unsigned long *)frame
)[sleep_on_frame
.pc_offset
];
286 pc
= ((unsigned long *)frame
)[wait_for_completion_frame
.pc_offset
];
289 schedule_timeout_caller
:
290 /* Must be schedule_timeout ... */
291 frame
= ((unsigned long *)p
->thread
.reg30
)[schedule_frame
.frame_offset
];
293 /* The schedule_timeout frame ... */
294 pc
= ((unsigned long *)frame
)[schedule_timeout_frame
.pc_offset
];
296 if (pc
>= first_sched
&& pc
< last_sched
) {
297 /* schedule_timeout called by [interruptible_]sleep_on_timeout */
298 frame
= ((unsigned long *)frame
)[schedule_timeout_frame
.frame_offset
];
299 pc
= ((unsigned long *)frame
)[sleep_on_timeout_frame
.pc_offset
];
303 if (current
->thread
.mflags
& MF_32BIT_REGS
) /* Kludge for 32-bit ps */