2 * linux/arch/m68knommu/kernel/process.c
4 * Copyright (C) 1995 Hamish Macdonald
6 * 68060 fixes by Jesper Skov
9 * Copyright (C) 2000-2002, David McCullough <davidm@snapgear.com>
13 * This file handles the architecture-dependent parts of process handling..
16 #include <linux/module.h>
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/smp_lock.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/ptrace.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/reboot.h>
30 #include <linux/slab.h>
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34 #include <asm/traps.h>
35 #include <asm/machdep.h>
36 #include <asm/setup.h>
37 #include <asm/pgtable.h>
39 asmlinkage
void ret_from_fork(void);
42 * The following aren't currently used.
44 void (*pm_idle
)(void);
45 EXPORT_SYMBOL(pm_idle
);
47 void (*pm_power_off
)(void);
48 EXPORT_SYMBOL(pm_power_off
);
51 * The idle loop on an m68knommu..
53 static void default_idle(void)
56 while (!need_resched()) {
57 /* This stop will re-enable interrupts */
58 __asm__("stop #0x2000" : : : "cc");
64 void (*idle
)(void) = default_idle
;
67 * The idle thread. There's no useful work to be
68 * done, so just try to conserve power and have a
69 * low exit latency (ie sit in a loop waiting for
70 * somebody to say that they'd like to reschedule)
74 /* endless idle loop with no priority at all */
77 preempt_enable_no_resched();
83 void machine_restart(char * __unused
)
90 void machine_halt(void)
97 void machine_power_off(void)
104 void show_regs(struct pt_regs
* regs
)
106 printk(KERN_NOTICE
"\n");
107 printk(KERN_NOTICE
"Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
108 regs
->format
, regs
->vector
, regs
->pc
, regs
->sr
, print_tainted());
109 printk(KERN_NOTICE
"ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
110 regs
->orig_d0
, regs
->d0
, regs
->a2
, regs
->a1
);
111 printk(KERN_NOTICE
"A0: %08lx D5: %08lx D4: %08lx\n",
112 regs
->a0
, regs
->d5
, regs
->d4
);
113 printk(KERN_NOTICE
"D3: %08lx D2: %08lx D1: %08lx\n",
114 regs
->d3
, regs
->d2
, regs
->d1
);
115 if (!(regs
->sr
& PS_S
))
116 printk(KERN_NOTICE
"USP: %08lx\n", rdusp());
120 * Create a kernel thread
122 int kernel_thread(int (*fn
)(void *), void * arg
, unsigned long flags
)
125 long clone_arg
= flags
| CLONE_VM
;
131 __asm__
__volatile__ (
132 "movel %%sp, %%d2\n\t"
136 "cmpl %%sp, %%d2\n\t"
138 "movel %3, %%sp@-\n\t"
150 : "cc", "%d0", "%d1", "%d2");
156 void flush_thread(void)
159 unsigned long zero
= 0;
162 current
->thread
.fs
= __USER_DS
;
165 asm volatile (".chip 68k/68881\n\t"
167 ".chip 68k" : : "a" (&zero
));
172 * "m68k_fork()".. By the time we get here, the
173 * non-volatile registers have also been saved on the
174 * stack. We do some ugly pointer stuff here.. (see
178 asmlinkage
int m68k_fork(struct pt_regs
*regs
)
180 /* fork almost works, enough to trick you into looking elsewhere :-( */
184 asmlinkage
int m68k_vfork(struct pt_regs
*regs
)
186 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, rdusp(), regs
, 0, NULL
, NULL
);
189 asmlinkage
int m68k_clone(struct pt_regs
*regs
)
191 unsigned long clone_flags
;
194 /* syscall2 puts clone_flags in d1 and usp in d2 */
195 clone_flags
= regs
->d1
;
199 return do_fork(clone_flags
, newsp
, regs
, 0, NULL
, NULL
);
202 int copy_thread(unsigned long clone_flags
,
203 unsigned long usp
, unsigned long topstk
,
204 struct task_struct
* p
, struct pt_regs
* regs
)
206 struct pt_regs
* childregs
;
207 struct switch_stack
* childstack
, *stack
;
210 childregs
= (struct pt_regs
*) (task_stack_page(p
) + THREAD_SIZE
) - 1;
215 retp
= ((unsigned long *) regs
);
216 stack
= ((struct switch_stack
*) retp
) - 1;
218 childstack
= ((struct switch_stack
*) childregs
) - 1;
219 *childstack
= *stack
;
220 childstack
->retpc
= (unsigned long)ret_from_fork
;
223 p
->thread
.ksp
= (unsigned long)childstack
;
225 if (clone_flags
& CLONE_SETTLS
)
226 task_thread_info(p
)->tp_value
= regs
->d5
;
229 * Must save the current SFC/DFC value, NOT the value when
230 * the parent was last descheduled - RGH 10-08-96
232 p
->thread
.fs
= get_fs().seg
;
236 /* Copy the current fpu state */
237 asm volatile ("fsave %0" : : "m" (p
->thread
.fpstate
[0]) : "memory");
239 if (p
->thread
.fpstate
[0])
240 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
241 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
242 : : "m" (p
->thread
.fp
[0]), "m" (p
->thread
.fpcntl
[0])
244 /* Restore the state in case the fpu was busy */
245 asm volatile ("frestore %0" : : "m" (p
->thread
.fpstate
[0]));
252 /* Fill in the fpu structure for a core dump. */
254 int dump_fpu(struct pt_regs
*regs
, struct user_m68kfp_struct
*fpu
)
262 memcpy(fpu
->fpcntl
, current
->thread
.fpcntl
, 12);
263 memcpy(fpu
->fpregs
, current
->thread
.fp
, 96);
264 /* Convert internal fpu reg representation
265 * into long double format
267 for (i
= 0; i
< 24; i
+= 3)
268 fpu
->fpregs
[i
] = ((fpu
->fpregs
[i
] & 0xffff0000) << 15) |
269 ((fpu
->fpregs
[i
] & 0x0000ffff) << 16);
273 /* First dump the fpu context to avoid protocol violation. */
274 asm volatile ("fsave %0" :: "m" (fpustate
[0]) : "memory");
278 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
279 :: "m" (fpu
->fpcntl
[0])
281 asm volatile ("fmovemx %/fp0-%/fp7,%0"
282 :: "m" (fpu
->fpregs
[0])
289 * Generic dumping code. Used for panic and debug.
291 void dump(struct pt_regs
*fp
)
297 printk(KERN_EMERG
"\nCURRENT PROCESS:\n\n");
298 printk(KERN_EMERG
"COMM=%s PID=%d\n", current
->comm
, current
->pid
);
301 printk(KERN_EMERG
"TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
302 (int) current
->mm
->start_code
,
303 (int) current
->mm
->end_code
,
304 (int) current
->mm
->start_data
,
305 (int) current
->mm
->end_data
,
306 (int) current
->mm
->end_data
,
307 (int) current
->mm
->brk
);
308 printk(KERN_EMERG
"USER-STACK=%08x KERNEL-STACK=%08x\n\n",
309 (int) current
->mm
->start_stack
,
310 (int)(((unsigned long) current
) + THREAD_SIZE
));
313 printk(KERN_EMERG
"PC: %08lx\n", fp
->pc
);
314 printk(KERN_EMERG
"SR: %08lx SP: %08lx\n", (long) fp
->sr
, (long) fp
);
315 printk(KERN_EMERG
"d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
316 fp
->d0
, fp
->d1
, fp
->d2
, fp
->d3
);
317 printk(KERN_EMERG
"d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
318 fp
->d4
, fp
->d5
, fp
->a0
, fp
->a1
);
319 printk(KERN_EMERG
"\nUSP: %08x TRAPFRAME: %08x\n",
320 (unsigned int) rdusp(), (unsigned int) fp
);
322 printk(KERN_EMERG
"\nCODE:");
323 tp
= ((unsigned char *) fp
->pc
) - 0x20;
324 for (sp
= (unsigned long *) tp
, i
= 0; (i
< 0x40); i
+= 4) {
326 printk(KERN_EMERG
"%08x: ", (int) (tp
+ i
));
327 printk("%08x ", (int) *sp
++);
329 printk(KERN_EMERG
"\n");
331 printk(KERN_EMERG
"KERNEL STACK:");
332 tp
= ((unsigned char *) fp
) - 0x40;
333 for (sp
= (unsigned long *) tp
, i
= 0; (i
< 0xc0); i
+= 4) {
335 printk(KERN_EMERG
"%08x: ", (int) (tp
+ i
));
336 printk("%08x ", (int) *sp
++);
338 printk(KERN_EMERG
"\n");
340 printk(KERN_EMERG
"USER STACK:");
341 tp
= (unsigned char *) (rdusp() - 0x10);
342 for (sp
= (unsigned long *) tp
, i
= 0; (i
< 0x80); i
+= 4) {
344 printk(KERN_EMERG
"%08x: ", (int) (tp
+ i
));
345 printk("%08x ", (int) *sp
++);
347 printk(KERN_EMERG
"\n");
351 * sys_execve() executes a new program.
353 asmlinkage
int sys_execve(char *name
, char **argv
, char **envp
)
357 struct pt_regs
*regs
= (struct pt_regs
*) &name
;
359 filename
= getname(name
);
360 error
= PTR_ERR(filename
);
361 if (IS_ERR(filename
))
363 error
= do_execve(filename
, argv
, envp
, regs
);
368 unsigned long get_wchan(struct task_struct
*p
)
370 unsigned long fp
, pc
;
371 unsigned long stack_page
;
373 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
376 stack_page
= (unsigned long)p
;
377 fp
= ((struct switch_stack
*)p
->thread
.ksp
)->a6
;
379 if (fp
< stack_page
+sizeof(struct thread_info
) ||
380 fp
>= THREAD_SIZE
-8+stack_page
)
382 pc
= ((unsigned long *)fp
)[1];
383 if (!in_sched_functions(pc
))
385 fp
= *(unsigned long *) fp
;
386 } while (count
++ < 16);
391 * Return saved PC of a blocked thread.
393 unsigned long thread_saved_pc(struct task_struct
*tsk
)
395 struct switch_stack
*sw
= (struct switch_stack
*)tsk
->thread
.ksp
;
397 /* Check whether the thread is blocked in resume() */
398 if (in_sched_functions(sw
->retpc
))
399 return ((unsigned long *)sw
->a6
)[1];