2 * linux/arch/arm/kernel/process.c
4 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
5 * Origional Copyright (C) 1995 Linus Torvalds
10 #include <linux/config.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
14 #include <linux/stddef.h>
15 #include <linux/unistd.h>
16 #include <linux/ptrace.h>
17 #include <linux/malloc.h>
18 #include <linux/user.h>
19 #include <linux/delay.h>
20 #include <linux/reboot.h>
21 #include <linux/init.h>
23 #include <asm/system.h>
26 #include <asm/uaccess.h>
29 * Values for cpu_do_idle()
31 #define IDLE_WAIT_SLOW 0
32 #define IDLE_WAIT_FAST 1
33 #define IDLE_CLOCK_SLOW 2
34 #define IDLE_CLOCK_FAST 3
36 extern const char *processor_modes
[];
37 extern void setup_mm_for_reboot(char mode
);
39 asmlinkage
void ret_from_sys_call(void) __asm__("ret_from_sys_call");
41 static volatile int hlt_counter
;
43 #include <asm/arch/system.h>
45 void disable_hlt(void)
55 static int __init
nohlt_setup(char *__unused
)
61 static int __init
hlt_setup(char *__unused
)
67 __setup("nohlt", nohlt_setup
);
68 __setup("hlt", hlt_setup
);
71 * The following aren't currently used.
73 void (*pm_idle
)(void);
74 void (*pm_power_off
)(void);
77 * The idle thread. We try to conserve power, while trying to keep
78 * overall latency low. The architecture specific idle is passed
79 * a value to indicate the level of "idleness" of the system.
83 /* endless idle loop with no priority at all */
86 current
->counter
= -100;
89 void (*idle
)(void) = pm_idle
;
92 while (!current
->need_resched
)
95 #ifndef CONFIG_NO_PGT_CACHE
101 static char reboot_mode
= 'h';
103 int __init
reboot_setup(char *str
)
105 reboot_mode
= str
[0];
109 __setup("reboot=", reboot_setup
);
111 void machine_halt(void)
113 leds_event(led_halted
);
116 void machine_power_off(void)
118 leds_event(led_halted
);
123 void machine_restart(char * __unused
)
126 * Clean and disable cache, and turn off interrupts
131 * Tell the mm system that we are going to reboot -
132 * we may need it to insert some 1:1 mappings so that
135 setup_mm_for_reboot(reboot_mode
);
138 * Now call the architecture specific reboot code.
140 arch_reset(reboot_mode
);
143 * Whoops - the architecture was unable to reboot.
147 printk("Reboot failed -- System halted\n");
151 void show_regs(struct pt_regs
* regs
)
155 flags
= condition_codes(regs
);
157 printk("pc : [<%08lx>] lr : [<%08lx>]\n"
158 "sp : %08lx ip : %08lx fp : %08lx\n",
159 instruction_pointer(regs
),
160 regs
->ARM_lr
, regs
->ARM_sp
,
161 regs
->ARM_ip
, regs
->ARM_fp
);
162 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
163 regs
->ARM_r10
, regs
->ARM_r9
,
165 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
166 regs
->ARM_r7
, regs
->ARM_r6
,
167 regs
->ARM_r5
, regs
->ARM_r4
);
168 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
169 regs
->ARM_r3
, regs
->ARM_r2
,
170 regs
->ARM_r1
, regs
->ARM_r0
);
171 printk("Flags: %c%c%c%c",
172 flags
& CC_N_BIT
? 'N' : 'n',
173 flags
& CC_Z_BIT
? 'Z' : 'z',
174 flags
& CC_C_BIT
? 'C' : 'c',
175 flags
& CC_V_BIT
? 'V' : 'v');
176 printk(" IRQs %s FIQs %s Mode %s Segment %s\n",
177 interrupts_enabled(regs
) ? "on" : "off",
178 fast_interrupts_enabled(regs
) ? "on" : "off",
179 processor_modes
[processor_mode(regs
)],
180 get_fs() == get_ds() ? "kernel" : "user");
181 #if defined(CONFIG_CPU_32)
183 int ctrl
, transbase
, dac
;
185 " mrc p15, 0, %0, c1, c0\n"
186 " mrc p15, 0, %1, c2, c0\n"
187 " mrc p15, 0, %2, c3, c0\n"
188 : "=r" (ctrl
), "=r" (transbase
), "=r" (dac
));
189 printk("Control: %04X Table: %08X DAC: %08X\n",
190 ctrl
, transbase
, dac
);
195 void show_fpregs(struct user_fp
*regs
)
199 for (i
= 0; i
< 8; i
++) {
203 p
= (unsigned long *)(regs
->fpregs
+ i
);
205 switch (regs
->ftype
[i
]) {
206 case 1: type
= 'f'; break;
207 case 2: type
= 'd'; break;
208 case 3: type
= 'e'; break;
209 default: type
= '?'; break;
214 printk(" f%d(%c): %08lx %08lx %08lx%c",
215 i
, type
, p
[0], p
[1], p
[2], i
& 1 ? '\n' : ' ');
219 printk("FPSR: %08lx FPCR: %08lx\n",
220 (unsigned long)regs
->fpsr
,
221 (unsigned long)regs
->fpcr
);
225 * Task structure and kernel stack allocation.
227 static struct task_struct
*task_struct_head
;
228 static unsigned int nr_task_struct
;
231 #define EXTRA_TASK_STRUCT 4
233 #define EXTRA_TASK_STRUCT 0
236 struct task_struct
*alloc_task_struct(void)
238 struct task_struct
*tsk
;
240 if (EXTRA_TASK_STRUCT
)
241 tsk
= task_struct_head
;
246 task_struct_head
= tsk
->next_task
;
249 tsk
= ll_alloc_task_struct();
253 * The stack must be cleared if you want SYSRQ-T to
254 * give sensible stack usage information
257 char *p
= (char *)tsk
;
258 memzero(p
+KERNEL_STACK_SIZE
, KERNEL_STACK_SIZE
);
264 void __free_task_struct(struct task_struct
*p
)
266 if (EXTRA_TASK_STRUCT
&& nr_task_struct
< EXTRA_TASK_STRUCT
) {
267 p
->next_task
= task_struct_head
;
268 task_struct_head
= p
;
271 ll_free_task_struct(p
);
275 * Free current thread data structures etc..
277 void exit_thread(void)
281 void flush_thread(void)
283 memset(¤t
->thread
.debug
, 0, sizeof(struct debug_info
));
284 memset(¤t
->thread
.fpstate
, 0, sizeof(union fp_state
));
285 current
->used_math
= 0;
286 current
->flags
&= ~PF_USEDFPU
;
289 void release_thread(struct task_struct
*dead_task
)
293 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long esp
,
294 struct task_struct
* p
, struct pt_regs
* regs
)
296 struct pt_regs
* childregs
;
297 struct context_save_struct
* save
;
299 atomic_set(&p
->thread
.refcount
, 1);
301 childregs
= ((struct pt_regs
*)((unsigned long)p
+ 8192)) - 1;
303 childregs
->ARM_r0
= 0;
304 childregs
->ARM_sp
= esp
;
306 save
= ((struct context_save_struct
*)(childregs
)) - 1;
307 init_thread_css(save
);
308 p
->thread
.save
= save
;
314 * fill in the fpe structure for a core dump...
316 int dump_fpu (struct pt_regs
*regs
, struct user_fp
*fp
)
318 if (current
->used_math
)
319 memcpy(fp
, ¤t
->thread
.fpstate
.soft
, sizeof (*fp
));
321 return current
->used_math
;
325 * fill in the user structure for a core dump..
327 void dump_thread(struct pt_regs
* regs
, struct user
* dump
)
329 struct task_struct
*tsk
= current
;
331 dump
->magic
= CMAGIC
;
332 dump
->start_code
= tsk
->mm
->start_code
;
333 dump
->start_stack
= regs
->ARM_sp
& ~(PAGE_SIZE
- 1);
335 dump
->u_tsize
= (tsk
->mm
->end_code
- tsk
->mm
->start_code
) >> PAGE_SHIFT
;
336 dump
->u_dsize
= (tsk
->mm
->brk
- tsk
->mm
->start_data
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
339 dump
->u_debugreg
[0] = tsk
->thread
.debug
.bp
[0].address
;
340 dump
->u_debugreg
[1] = tsk
->thread
.debug
.bp
[1].address
;
341 dump
->u_debugreg
[2] = tsk
->thread
.debug
.bp
[0].insn
;
342 dump
->u_debugreg
[3] = tsk
->thread
.debug
.bp
[1].insn
;
343 dump
->u_debugreg
[4] = tsk
->thread
.debug
.nsaved
;
345 if (dump
->start_stack
< 0x04000000)
346 dump
->u_ssize
= (0x04000000 - dump
->start_stack
) >> PAGE_SHIFT
;
349 dump
->u_fpvalid
= dump_fpu (regs
, &dump
->u_fp
);
353 * This is the mechanism for creating a new kernel thread.
355 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
356 * who haven't done an "execve()") should use this: it will work within
357 * a system call from a "real" process, but the process memory space will
358 * not be free'd until both the parent and the child have exited.
360 pid_t
kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
362 extern long sys_exit(int) __attribute__((noreturn
));
365 __asm__
__volatile__(
366 "mov r0, %1 @ kernel_thread sys_clone
369 teq r0, #0 @ if we are the child
370 moveq fp, #0 @ ensure that fp is zero
373 : "Ir" (flags
| CLONE_VM
) : "r0", "r1");
380 * These bracket the sleeping functions..
382 extern void scheduling_functions_start_here(void);
383 extern void scheduling_functions_end_here(void);
384 #define first_sched ((unsigned long) scheduling_functions_start_here)
385 #define last_sched ((unsigned long) scheduling_functions_end_here)
387 unsigned long get_wchan(struct task_struct
*p
)
389 unsigned long fp
, lr
;
390 unsigned long stack_page
;
392 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
395 stack_page
= 4096 + (unsigned long)p
;
396 fp
= get_css_fp(&p
->thread
);
398 if (fp
< stack_page
|| fp
> 4092+stack_page
)
400 lr
= pc_pointer (((unsigned long *)fp
)[-1]);
401 if (lr
< first_sched
|| lr
> last_sched
)
403 fp
= *(unsigned long *) (fp
- 12);
404 } while (count
++ < 16);