- pre3:
[davej-history.git] / arch / arm / kernel / process.c
blob3456df4fd4ac049fa91f15ffc42d0491b3c9af2f
1 /*
2 * linux/arch/arm/kernel/process.c
4 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
5 * Origional Copyright (C) 1995 Linus Torvalds
6 */
8 #include <stdarg.h>
10 #include <linux/config.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/stddef.h>
15 #include <linux/unistd.h>
16 #include <linux/ptrace.h>
17 #include <linux/malloc.h>
18 #include <linux/user.h>
19 #include <linux/delay.h>
20 #include <linux/reboot.h>
21 #include <linux/init.h>
23 #include <asm/system.h>
24 #include <asm/io.h>
25 #include <asm/leds.h>
26 #include <asm/uaccess.h>
29 * Values for cpu_do_idle()
31 #define IDLE_WAIT_SLOW 0
32 #define IDLE_WAIT_FAST 1
33 #define IDLE_CLOCK_SLOW 2
34 #define IDLE_CLOCK_FAST 3
36 extern const char *processor_modes[];
37 extern void setup_mm_for_reboot(char mode);
39 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
41 static volatile int hlt_counter;
43 #include <asm/arch/system.h>
45 void disable_hlt(void)
47 hlt_counter++;
50 void enable_hlt(void)
52 hlt_counter--;
55 static int __init nohlt_setup(char *__unused)
57 hlt_counter = 1;
58 return 1;
61 static int __init hlt_setup(char *__unused)
63 hlt_counter = 0;
64 return 1;
67 __setup("nohlt", nohlt_setup);
68 __setup("hlt", hlt_setup);
71 * The following aren't currently used.
73 void (*pm_idle)(void);
74 void (*pm_power_off)(void);
77 * The idle thread. We try to conserve power, while trying to keep
78 * overall latency low. The architecture specific idle is passed
79 * a value to indicate the level of "idleness" of the system.
81 void cpu_idle(void)
83 /* endless idle loop with no priority at all */
84 init_idle();
85 current->nice = 20;
86 current->counter = -100;
88 while (1) {
89 void (*idle)(void) = pm_idle;
90 if (!idle)
91 idle = arch_idle;
92 while (!current->need_resched)
93 idle();
94 schedule();
95 #ifndef CONFIG_NO_PGT_CACHE
96 check_pgt_cache();
97 #endif
101 static char reboot_mode = 'h';
103 int __init reboot_setup(char *str)
105 reboot_mode = str[0];
106 return 1;
109 __setup("reboot=", reboot_setup);
111 void machine_halt(void)
113 leds_event(led_halted);
116 void machine_power_off(void)
118 leds_event(led_halted);
119 if (pm_power_off)
120 pm_power_off();
123 void machine_restart(char * __unused)
126 * Clean and disable cache, and turn off interrupts
128 cpu_proc_fin();
131 * Tell the mm system that we are going to reboot -
132 * we may need it to insert some 1:1 mappings so that
133 * soft boot works.
135 setup_mm_for_reboot(reboot_mode);
138 * Now call the architecture specific reboot code.
140 arch_reset(reboot_mode);
143 * Whoops - the architecture was unable to reboot.
144 * Tell the user!
146 mdelay(1000);
147 printk("Reboot failed -- System halted\n");
148 while (1);
151 void show_regs(struct pt_regs * regs)
153 unsigned long flags;
155 flags = condition_codes(regs);
157 printk("pc : [<%08lx>] lr : [<%08lx>]\n"
158 "sp : %08lx ip : %08lx fp : %08lx\n",
159 instruction_pointer(regs),
160 regs->ARM_lr, regs->ARM_sp,
161 regs->ARM_ip, regs->ARM_fp);
162 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
163 regs->ARM_r10, regs->ARM_r9,
164 regs->ARM_r8);
165 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
166 regs->ARM_r7, regs->ARM_r6,
167 regs->ARM_r5, regs->ARM_r4);
168 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
169 regs->ARM_r3, regs->ARM_r2,
170 regs->ARM_r1, regs->ARM_r0);
171 printk("Flags: %c%c%c%c",
172 flags & CC_N_BIT ? 'N' : 'n',
173 flags & CC_Z_BIT ? 'Z' : 'z',
174 flags & CC_C_BIT ? 'C' : 'c',
175 flags & CC_V_BIT ? 'V' : 'v');
176 printk(" IRQs %s FIQs %s Mode %s Segment %s\n",
177 interrupts_enabled(regs) ? "on" : "off",
178 fast_interrupts_enabled(regs) ? "on" : "off",
179 processor_modes[processor_mode(regs)],
180 get_fs() == get_ds() ? "kernel" : "user");
181 #if defined(CONFIG_CPU_32)
183 int ctrl, transbase, dac;
184 __asm__ (
185 " mrc p15, 0, %0, c1, c0\n"
186 " mrc p15, 0, %1, c2, c0\n"
187 " mrc p15, 0, %2, c3, c0\n"
188 : "=r" (ctrl), "=r" (transbase), "=r" (dac));
189 printk("Control: %04X Table: %08X DAC: %08X\n",
190 ctrl, transbase, dac);
192 #endif
195 void show_fpregs(struct user_fp *regs)
197 int i;
199 for (i = 0; i < 8; i++) {
200 unsigned long *p;
201 char type;
203 p = (unsigned long *)(regs->fpregs + i);
205 switch (regs->ftype[i]) {
206 case 1: type = 'f'; break;
207 case 2: type = 'd'; break;
208 case 3: type = 'e'; break;
209 default: type = '?'; break;
211 if (regs->init_flag)
212 type = '?';
214 printk(" f%d(%c): %08lx %08lx %08lx%c",
215 i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
219 printk("FPSR: %08lx FPCR: %08lx\n",
220 (unsigned long)regs->fpsr,
221 (unsigned long)regs->fpcr);
225 * Task structure and kernel stack allocation.
227 static struct task_struct *task_struct_head;
228 static unsigned int nr_task_struct;
230 #ifdef CONFIG_CPU_32
231 #define EXTRA_TASK_STRUCT 4
232 #else
233 #define EXTRA_TASK_STRUCT 0
234 #endif
236 struct task_struct *alloc_task_struct(void)
238 struct task_struct *tsk;
240 if (EXTRA_TASK_STRUCT)
241 tsk = task_struct_head;
242 else
243 tsk = NULL;
245 if (tsk) {
246 task_struct_head = tsk->next_task;
247 nr_task_struct -= 1;
248 } else
249 tsk = ll_alloc_task_struct();
251 #ifdef CONFIG_SYSRQ
253 * The stack must be cleared if you want SYSRQ-T to
254 * give sensible stack usage information
256 if (tsk) {
257 char *p = (char *)tsk;
258 memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
260 #endif
261 return tsk;
264 void __free_task_struct(struct task_struct *p)
266 if (EXTRA_TASK_STRUCT && nr_task_struct < EXTRA_TASK_STRUCT) {
267 p->next_task = task_struct_head;
268 task_struct_head = p;
269 nr_task_struct += 1;
270 } else
271 ll_free_task_struct(p);
275 * Free current thread data structures etc..
277 void exit_thread(void)
281 void flush_thread(void)
283 memset(&current->thread.debug, 0, sizeof(struct debug_info));
284 memset(&current->thread.fpstate, 0, sizeof(union fp_state));
285 current->used_math = 0;
286 current->flags &= ~PF_USEDFPU;
289 void release_thread(struct task_struct *dead_task)
293 int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
294 struct task_struct * p, struct pt_regs * regs)
296 struct pt_regs * childregs;
297 struct context_save_struct * save;
299 atomic_set(&p->thread.refcount, 1);
301 childregs = ((struct pt_regs *)((unsigned long)p + 8192)) - 1;
302 *childregs = *regs;
303 childregs->ARM_r0 = 0;
304 childregs->ARM_sp = esp;
306 save = ((struct context_save_struct *)(childregs)) - 1;
307 init_thread_css(save);
308 p->thread.save = save;
310 return 0;
314 * fill in the fpe structure for a core dump...
316 int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
318 if (current->used_math)
319 memcpy(fp, &current->thread.fpstate.soft, sizeof (*fp));
321 return current->used_math;
325 * fill in the user structure for a core dump..
327 void dump_thread(struct pt_regs * regs, struct user * dump)
329 struct task_struct *tsk = current;
331 dump->magic = CMAGIC;
332 dump->start_code = tsk->mm->start_code;
333 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
335 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
336 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
337 dump->u_ssize = 0;
339 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
340 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
341 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn;
342 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn;
343 dump->u_debugreg[4] = tsk->thread.debug.nsaved;
345 if (dump->start_stack < 0x04000000)
346 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
348 dump->regs = *regs;
349 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
353 * This is the mechanism for creating a new kernel thread.
355 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
356 * who haven't done an "execve()") should use this: it will work within
357 * a system call from a "real" process, but the process memory space will
358 * not be free'd until both the parent and the child have exited.
360 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
362 extern long sys_exit(int) __attribute__((noreturn));
363 pid_t __ret;
365 __asm__ __volatile__(
366 "mov r0, %1 @ kernel_thread sys_clone
367 mov r1, #0
368 "__syscall(clone)"
369 teq r0, #0 @ if we are the child
370 moveq fp, #0 @ ensure that fp is zero
371 mov %0, r0"
372 : "=r" (__ret)
373 : "Ir" (flags | CLONE_VM) : "r0", "r1");
374 if (__ret == 0)
375 sys_exit((fn)(arg));
376 return __ret;
380 * These bracket the sleeping functions..
382 extern void scheduling_functions_start_here(void);
383 extern void scheduling_functions_end_here(void);
384 #define first_sched ((unsigned long) scheduling_functions_start_here)
385 #define last_sched ((unsigned long) scheduling_functions_end_here)
387 unsigned long get_wchan(struct task_struct *p)
389 unsigned long fp, lr;
390 unsigned long stack_page;
391 int count = 0;
392 if (!p || p == current || p->state == TASK_RUNNING)
393 return 0;
395 stack_page = 4096 + (unsigned long)p;
396 fp = get_css_fp(&p->thread);
397 do {
398 if (fp < stack_page || fp > 4092+stack_page)
399 return 0;
400 lr = pc_pointer (((unsigned long *)fp)[-1]);
401 if (lr < first_sched || lr > last_sched)
402 return lr;
403 fp = *(unsigned long *) (fp - 12);
404 } while (count ++ < 16);
405 return 0;