Clear TIF_USEDFPU in copy_thread().
[linux-2.6/linux-mips.git] / arch / mips64 / kernel / process.c
blob75ee4ee7af090889e964c3216f0ade25b670fded
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/mman.h>
18 #include <linux/personality.h>
19 #include <linux/sys.h>
20 #include <linux/user.h>
21 #include <linux/a.out.h>
22 #include <linux/init.h>
23 #include <linux/completion.h>
25 #include <asm/bootinfo.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/mipsregs.h>
29 #include <asm/processor.h>
30 #include <asm/stackframe.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/elf.h>
34 #include <asm/cpu.h>
35 #include <asm/fpu.h>
36 #include <asm/inst.h>
39 * We use this if we don't have any better idle routine..
40 * (This to kill: kernel/platform.c.
42 void default_idle (void)
47 * The idle thread. There's no useful work to be done, so just try to conserve
48 * power and have a low exit latency (ie sit in a loop waiting for somebody to
49 * say that they'd like to reschedule)
51 ATTRIB_NORET void cpu_idle(void)
53 /* endless idle loop with no priority at all */
54 while (1) {
55 while (!need_resched())
56 if (cpu_wait)
57 (*cpu_wait)();
58 schedule();
62 asmlinkage void ret_from_fork(void);
64 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
66 unsigned long status;
68 /* New thread loses kernel privileges. */
69 status = regs->cp0_status & ~(ST0_CU0|ST0_FR|ST0_KSU);
70 status |= KSU_USER;
71 status |= (current->thread.mflags & MF_32BIT_REGS) ? 0 : ST0_FR;
72 regs->cp0_status = status;
73 current->used_math = 0;
74 loose_fpu();
75 regs->cp0_epc = pc;
76 regs->regs[29] = sp;
77 current_thread_info()->addr_limit = USER_DS;
80 void exit_thread(void)
84 void flush_thread(void)
88 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
89 unsigned long unused, struct task_struct *p,
90 struct pt_regs *regs)
92 struct thread_info *ti = p->thread_info;
93 struct pt_regs *childregs;
94 long childksp;
96 childksp = (unsigned long)ti + KERNEL_STACK_SIZE - 32;
98 if (is_fpu_owner()) {
99 save_fp(p);
102 /* set up new TSS. */
103 childregs = (struct pt_regs *) childksp - 1;
104 *childregs = *regs;
105 childregs->regs[7] = 0; /* Clear error flag */
106 childregs->regs[2] = 0; /* Child gets zero as return value */
107 regs->regs[2] = p->pid;
109 if (childregs->cp0_status & ST0_CU0) {
110 childregs->regs[28] = (unsigned long) ti;
111 childregs->regs[29] = childksp;
112 ti->addr_limit = KERNEL_DS;
113 } else {
114 childregs->regs[29] = usp;
115 ti->addr_limit = USER_DS;
117 p->thread.reg29 = (unsigned long) childregs;
118 p->thread.reg31 = (unsigned long) ret_from_fork;
121 * New tasks lose permission to use the fpu. This accelerates context
122 * switching for most programs since they don't use the fpu.
124 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1|ST0_KSU);
125 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
126 clear_tsk_thread_flag(p, TIF_USEDFPU);
127 p->set_child_tid = p->clear_child_tid = NULL;
129 return 0;
132 /* Fill in the fpu structure for a core dump.. */
133 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
135 memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
136 return 1;
140 * Create a kernel thread
142 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
144 int retval;
146 __asm__ __volatile__(
147 " move $6, $sp \n"
148 " move $4, %5 \n"
149 " li $2, %1 \n"
150 " syscall \n"
151 " beq $6, $sp, 1f \n"
152 " move $4, %3 \n"
153 " jalr %4 \n"
154 " move $4, $2 \n"
155 " li $2, %2 \n"
156 " syscall \n"
157 "1: move %0, $2"
158 : "=r" (retval)
159 : "i" (__NR_clone), "i" (__NR_exit), "r" (arg), "r" (fn),
160 "r" (flags | CLONE_VM | CLONE_UNTRACED)
162 * The called subroutine might have destroyed any of the
163 * at, result, argument or temporary registers ...
165 : "$2", "$3", "$4", "$5", "$6", "$7", "$8",
166 "$9","$10","$11","$12","$13","$14","$15","$24","$25","$31");
168 return retval;
171 struct mips_frame_info {
172 int frame_offset;
173 int pc_offset;
175 static struct mips_frame_info schedule_frame;
176 static struct mips_frame_info schedule_timeout_frame;
177 static struct mips_frame_info sleep_on_frame;
178 static struct mips_frame_info sleep_on_timeout_frame;
179 static struct mips_frame_info wait_for_completion_frame;
180 static int mips_frame_info_initialized;
181 static int __init get_frame_info(struct mips_frame_info *info, void *func)
183 int i;
184 union mips_instruction *ip = (union mips_instruction *)func;
185 info->pc_offset = -1;
186 info->frame_offset = -1;
187 for (i = 0; i < 128; i++, ip++) {
188 /* if jal, jalr, jr, stop. */
189 if (ip->j_format.opcode == jal_op ||
190 (ip->r_format.opcode == spec_op &&
191 (ip->r_format.func == jalr_op ||
192 ip->r_format.func == jr_op)))
193 break;
194 if (ip->i_format.opcode == sd_op &&
195 ip->i_format.rs == 29) {
196 /* sd $ra, offset($sp) */
197 if (ip->i_format.rt == 31) {
198 if (info->pc_offset != -1)
199 break;
200 info->pc_offset =
201 ip->i_format.simmediate / sizeof(long);
203 /* sd $s8, offset($sp) */
204 if (ip->i_format.rt == 30) {
205 if (info->frame_offset != -1)
206 break;
207 info->frame_offset =
208 ip->i_format.simmediate / sizeof(long);
212 if (info->pc_offset == -1 || info->frame_offset == -1) {
213 printk("Can't analyze prologue code at %p\n", func);
214 info->pc_offset = -1;
215 info->frame_offset = -1;
216 return -1;
219 return 0;
221 void __init frame_info_init(void)
223 mips_frame_info_initialized =
224 !get_frame_info(&schedule_frame, schedule) &&
225 !get_frame_info(&schedule_timeout_frame, schedule_timeout) &&
226 !get_frame_info(&sleep_on_frame, sleep_on) &&
227 !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) &&
228 !get_frame_info(&wait_for_completion_frame, wait_for_completion);
232 * Return saved PC of a blocked thread.
234 unsigned long thread_saved_pc(struct thread_struct *t)
236 extern void ret_from_fork(void);
238 /* New born processes are a special case */
239 if (t->reg31 == (unsigned long) ret_from_fork)
240 return t->reg31;
242 if (schedule_frame.pc_offset < 0)
243 return 0;
244 return ((unsigned long *)t->reg29)[schedule_frame.pc_offset];
248 * These bracket the sleeping functions..
250 extern void scheduling_functions_start_here(void);
251 extern void scheduling_functions_end_here(void);
252 #define first_sched ((unsigned long) scheduling_functions_start_here)
253 #define last_sched ((unsigned long) scheduling_functions_end_here)
255 /* get_wchan - a maintenance nightmare ... */
256 unsigned long get_wchan(struct task_struct *p)
258 unsigned long frame, pc;
260 if (!p || p == current || p->state == TASK_RUNNING)
261 return 0;
263 if (!mips_frame_info_initialized)
264 return 0;
265 pc = thread_saved_pc(&p->thread);
266 if (pc < first_sched || pc >= last_sched)
267 goto out;
269 if (pc >= (unsigned long) sleep_on_timeout)
270 goto schedule_timeout_caller;
271 if (pc >= (unsigned long) sleep_on)
272 goto schedule_caller;
273 if (pc >= (unsigned long) interruptible_sleep_on_timeout)
274 goto schedule_timeout_caller;
275 if (pc >= (unsigned long)interruptible_sleep_on)
276 goto schedule_caller;
277 if (pc >= (unsigned long)wait_for_completion)
278 goto schedule_caller;
279 goto schedule_timeout_caller;
281 schedule_caller:
282 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
283 if (pc >= (unsigned long) sleep_on)
284 pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset];
285 else
286 pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset];
287 goto out;
289 schedule_timeout_caller:
290 /* Must be schedule_timeout ... */
291 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
293 /* The schedule_timeout frame ... */
294 pc = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset];
296 if (pc >= first_sched && pc < last_sched) {
297 /* schedule_timeout called by [interruptible_]sleep_on_timeout */
298 frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset];
299 pc = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset];
302 out:
303 if (current->thread.mflags & MF_32BIT_REGS) /* Kludge for 32-bit ps */
304 pc &= 0xffffffff;
306 return pc;