2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/utsname.h>
30 #include <linux/delay.h>
31 #include <linux/module.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/tick.h>
37 #include <linux/prctl.h>
38 #include <linux/uaccess.h>
40 #include <linux/ftrace.h>
41 #include <linux/dmi.h>
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
50 #include <asm/proto.h>
53 #include <asm/syscalls.h>
56 asmlinkage
extern void ret_from_fork(void);
58 DEFINE_PER_CPU(unsigned long, old_rsp
);
59 static DEFINE_PER_CPU(unsigned char, is_idle
);
61 unsigned long kernel_thread_flags
= CLONE_VM
| CLONE_UNTRACED
;
63 static ATOMIC_NOTIFIER_HEAD(idle_notifier
);
65 void idle_notifier_register(struct notifier_block
*n
)
67 atomic_notifier_chain_register(&idle_notifier
, n
);
69 EXPORT_SYMBOL_GPL(idle_notifier_register
);
71 void idle_notifier_unregister(struct notifier_block
*n
)
73 atomic_notifier_chain_unregister(&idle_notifier
, n
);
75 EXPORT_SYMBOL_GPL(idle_notifier_unregister
);
79 percpu_write(is_idle
, 1);
80 atomic_notifier_call_chain(&idle_notifier
, IDLE_START
, NULL
);
83 static void __exit_idle(void)
85 if (x86_test_and_clear_bit_percpu(0, is_idle
) == 0)
87 atomic_notifier_call_chain(&idle_notifier
, IDLE_END
, NULL
);
90 /* Called from interrupts to signify idle end */
93 /* idle loop has pid 0 */
100 static inline void play_dead(void)
107 * The idle thread. There's no useful work to be
108 * done, so just try to conserve power and have a
109 * low exit latency (ie sit in a loop waiting for
110 * somebody to say that they'd like to reschedule)
114 current_thread_info()->status
|= TS_POLLING
;
117 * If we're the non-boot CPU, nothing set the stack canary up
118 * for us. CPU0 already has it initialized but no harm in
119 * doing it again. This is a good place for updating it, as
120 * we wont ever return from this function (so the invalid
121 * canaries already on the stack wont ever trigger).
123 boot_init_stack_canary();
125 /* endless idle loop with no priority at all */
127 tick_nohz_stop_sched_tick(1);
128 while (!need_resched()) {
132 if (cpu_is_offline(smp_processor_id()))
135 * Idle routines should keep interrupts disabled
136 * from here on, until they go to idle.
137 * Otherwise, idle callbacks can misfire.
141 /* Don't trace irqs off for idle */
142 stop_critical_timings();
144 start_critical_timings();
145 /* In many cases the interrupt that ended idle
146 has already called exit_idle. But some idle
147 loops can be woken up without interrupt. */
151 tick_nohz_restart_sched_tick();
152 preempt_enable_no_resched();
158 /* Prints also some state that isn't saved in the pt_regs */
159 void __show_regs(struct pt_regs
*regs
, int all
)
161 unsigned long cr0
= 0L, cr2
= 0L, cr3
= 0L, cr4
= 0L, fs
, gs
, shadowgs
;
162 unsigned long d0
, d1
, d2
, d3
, d6
, d7
;
163 unsigned int fsindex
, gsindex
;
164 unsigned int ds
, cs
, es
;
169 board
= dmi_get_system_info(DMI_PRODUCT_NAME
);
172 printk(KERN_INFO
"Pid: %d, comm: %.20s %s %s %.*s %s\n",
173 current
->pid
, current
->comm
, print_tainted(),
174 init_utsname()->release
,
175 (int)strcspn(init_utsname()->version
, " "),
176 init_utsname()->version
, board
);
177 printk(KERN_INFO
"RIP: %04lx:[<%016lx>] ", regs
->cs
& 0xffff, regs
->ip
);
178 printk_address(regs
->ip
, 1);
179 printk(KERN_INFO
"RSP: %04lx:%016lx EFLAGS: %08lx\n", regs
->ss
,
180 regs
->sp
, regs
->flags
);
181 printk(KERN_INFO
"RAX: %016lx RBX: %016lx RCX: %016lx\n",
182 regs
->ax
, regs
->bx
, regs
->cx
);
183 printk(KERN_INFO
"RDX: %016lx RSI: %016lx RDI: %016lx\n",
184 regs
->dx
, regs
->si
, regs
->di
);
185 printk(KERN_INFO
"RBP: %016lx R08: %016lx R09: %016lx\n",
186 regs
->bp
, regs
->r8
, regs
->r9
);
187 printk(KERN_INFO
"R10: %016lx R11: %016lx R12: %016lx\n",
188 regs
->r10
, regs
->r11
, regs
->r12
);
189 printk(KERN_INFO
"R13: %016lx R14: %016lx R15: %016lx\n",
190 regs
->r13
, regs
->r14
, regs
->r15
);
192 asm("movl %%ds,%0" : "=r" (ds
));
193 asm("movl %%cs,%0" : "=r" (cs
));
194 asm("movl %%es,%0" : "=r" (es
));
195 asm("movl %%fs,%0" : "=r" (fsindex
));
196 asm("movl %%gs,%0" : "=r" (gsindex
));
198 rdmsrl(MSR_FS_BASE
, fs
);
199 rdmsrl(MSR_GS_BASE
, gs
);
200 rdmsrl(MSR_KERNEL_GS_BASE
, shadowgs
);
210 printk(KERN_INFO
"FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
211 fs
, fsindex
, gs
, gsindex
, shadowgs
);
212 printk(KERN_INFO
"CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs
, ds
,
214 printk(KERN_INFO
"CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2
, cr3
,
220 printk(KERN_INFO
"DR0: %016lx DR1: %016lx DR2: %016lx\n", d0
, d1
, d2
);
224 printk(KERN_INFO
"DR3: %016lx DR6: %016lx DR7: %016lx\n", d3
, d6
, d7
);
227 void show_regs(struct pt_regs
*regs
)
229 show_registers(regs
);
230 show_trace(NULL
, regs
, (void *)(regs
+ 1), regs
->bp
);
233 void release_thread(struct task_struct
*dead_task
)
236 if (dead_task
->mm
->context
.size
) {
237 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
239 dead_task
->mm
->context
.ldt
,
240 dead_task
->mm
->context
.size
);
246 static inline void set_32bit_tls(struct task_struct
*t
, int tls
, u32 addr
)
248 struct user_desc ud
= {
255 struct desc_struct
*desc
= t
->thread
.tls_array
;
260 static inline u32
read_32bit_tls(struct task_struct
*t
, int tls
)
262 return get_desc_base(&t
->thread
.tls_array
[tls
]);
266 * This gets called before we allocate a new thread and copy
267 * the current task into it.
269 void prepare_to_copy(struct task_struct
*tsk
)
274 int copy_thread(unsigned long clone_flags
, unsigned long sp
,
275 unsigned long unused
,
276 struct task_struct
*p
, struct pt_regs
*regs
)
279 struct pt_regs
*childregs
;
280 struct task_struct
*me
= current
;
282 childregs
= ((struct pt_regs
*)
283 (THREAD_SIZE
+ task_stack_page(p
))) - 1;
289 childregs
->sp
= (unsigned long)childregs
;
291 p
->thread
.sp
= (unsigned long) childregs
;
292 p
->thread
.sp0
= (unsigned long) (childregs
+1);
293 p
->thread
.usersp
= me
->thread
.usersp
;
295 set_tsk_thread_flag(p
, TIF_FORK
);
297 p
->thread
.fs
= me
->thread
.fs
;
298 p
->thread
.gs
= me
->thread
.gs
;
300 savesegment(gs
, p
->thread
.gsindex
);
301 savesegment(fs
, p
->thread
.fsindex
);
302 savesegment(es
, p
->thread
.es
);
303 savesegment(ds
, p
->thread
.ds
);
305 if (unlikely(test_tsk_thread_flag(me
, TIF_IO_BITMAP
))) {
306 p
->thread
.io_bitmap_ptr
= kmalloc(IO_BITMAP_BYTES
, GFP_KERNEL
);
307 if (!p
->thread
.io_bitmap_ptr
) {
308 p
->thread
.io_bitmap_max
= 0;
311 memcpy(p
->thread
.io_bitmap_ptr
, me
->thread
.io_bitmap_ptr
,
313 set_tsk_thread_flag(p
, TIF_IO_BITMAP
);
317 * Set a new TLS for the child thread?
319 if (clone_flags
& CLONE_SETTLS
) {
320 #ifdef CONFIG_IA32_EMULATION
321 if (test_thread_flag(TIF_IA32
))
322 err
= do_set_thread_area(p
, -1,
323 (struct user_desc __user
*)childregs
->si
, 0);
326 err
= do_arch_prctl(p
, ARCH_SET_FS
, childregs
->r8
);
331 clear_tsk_thread_flag(p
, TIF_DS_AREA_MSR
);
332 p
->thread
.ds_ctx
= NULL
;
334 clear_tsk_thread_flag(p
, TIF_DEBUGCTLMSR
);
335 p
->thread
.debugctlmsr
= 0;
339 if (err
&& p
->thread
.io_bitmap_ptr
) {
340 kfree(p
->thread
.io_bitmap_ptr
);
341 p
->thread
.io_bitmap_max
= 0;
347 start_thread(struct pt_regs
*regs
, unsigned long new_ip
, unsigned long new_sp
)
355 percpu_write(old_rsp
, new_sp
);
356 regs
->cs
= __USER_CS
;
357 regs
->ss
= __USER_DS
;
361 * Free the old FP and other extended state
363 free_thread_xstate(current
);
365 EXPORT_SYMBOL_GPL(start_thread
);
368 * switch_to(x,y) should switch tasks from x to y.
370 * This could still be optimized:
371 * - fold all the options into a flag word and test it with a single test.
372 * - could test fs/gs bitsliced
374 * Kprobes not supported here. Set the probe on schedule instead.
375 * Function graph tracer not supported too.
377 __notrace_funcgraph
struct task_struct
*
378 __switch_to(struct task_struct
*prev_p
, struct task_struct
*next_p
)
380 struct thread_struct
*prev
= &prev_p
->thread
;
381 struct thread_struct
*next
= &next_p
->thread
;
382 int cpu
= smp_processor_id();
383 struct tss_struct
*tss
= &per_cpu(init_tss
, cpu
);
384 unsigned fsindex
, gsindex
;
388 * If the task has used fpu the last 5 timeslices, just do a full
389 * restore of the math state immediately to avoid the trap; the
390 * chances of needing FPU soon are obviously high now
392 preload_fpu
= tsk_used_math(next_p
) && next_p
->fpu_counter
> 5;
394 /* we're going to use this soon, after a few expensive things */
396 prefetch(next
->xstate
);
399 * Reload esp0, LDT and the page table pointer:
405 * This won't pick up thread selector changes, but I guess that is ok.
407 savesegment(es
, prev
->es
);
408 if (unlikely(next
->es
| prev
->es
))
409 loadsegment(es
, next
->es
);
411 savesegment(ds
, prev
->ds
);
412 if (unlikely(next
->ds
| prev
->ds
))
413 loadsegment(ds
, next
->ds
);
416 /* We must save %fs and %gs before load_TLS() because
417 * %fs and %gs may be cleared by load_TLS().
419 * (e.g. xen_load_tls())
421 savesegment(fs
, fsindex
);
422 savesegment(gs
, gsindex
);
426 /* Must be after DS reload */
429 /* Make sure cpu is ready for new context */
434 * Leave lazy mode, flushing any hypercalls made here.
435 * This must be done before restoring TLS segments so
436 * the GDT and LDT are properly updated, and must be
437 * done before math_state_restore, so the TS bit is up
440 arch_end_context_switch(next_p
);
445 * Segment register != 0 always requires a reload. Also
446 * reload when it has changed. When prev process used 64bit
447 * base always reload to avoid an information leak.
449 if (unlikely(fsindex
| next
->fsindex
| prev
->fs
)) {
450 loadsegment(fs
, next
->fsindex
);
452 * Check if the user used a selector != 0; if yes
453 * clear 64bit base, since overloaded base is always
454 * mapped to the Null selector
459 /* when next process has a 64bit base use it */
461 wrmsrl(MSR_FS_BASE
, next
->fs
);
462 prev
->fsindex
= fsindex
;
464 if (unlikely(gsindex
| next
->gsindex
| prev
->gs
)) {
465 load_gs_index(next
->gsindex
);
470 wrmsrl(MSR_KERNEL_GS_BASE
, next
->gs
);
471 prev
->gsindex
= gsindex
;
474 * Switch the PDA and FPU contexts.
476 prev
->usersp
= percpu_read(old_rsp
);
477 percpu_write(old_rsp
, next
->usersp
);
478 percpu_write(current_task
, next_p
);
480 percpu_write(kernel_stack
,
481 (unsigned long)task_stack_page(next_p
) +
482 THREAD_SIZE
- KERNEL_STACK_OFFSET
);
485 * Now maybe reload the debug registers and handle I/O bitmaps
487 if (unlikely(task_thread_info(next_p
)->flags
& _TIF_WORK_CTXSW_NEXT
||
488 task_thread_info(prev_p
)->flags
& _TIF_WORK_CTXSW_PREV
))
489 __switch_to_xtra(prev_p
, next_p
, tss
);
492 * Preload the FPU context, now that we've determined that the
493 * task is likely to be using it.
496 __math_state_restore();
501 * sys_execve() executes a new program.
504 long sys_execve(char __user
*name
, char __user
* __user
*argv
,
505 char __user
* __user
*envp
, struct pt_regs
*regs
)
510 filename
= getname(name
);
511 error
= PTR_ERR(filename
);
512 if (IS_ERR(filename
))
514 error
= do_execve(filename
, argv
, envp
, regs
);
519 void set_personality_64bit(void)
521 /* inherit personality from parent */
523 /* Make sure to be in 64bit mode */
524 clear_thread_flag(TIF_IA32
);
526 /* TBD: overwrites user setup. Should have two bits.
527 But 64bit processes have always behaved this way,
528 so it's not too bad. The main problem is just that
529 32bit childs are affected again. */
530 current
->personality
&= ~READ_IMPLIES_EXEC
;
534 sys_clone(unsigned long clone_flags
, unsigned long newsp
,
535 void __user
*parent_tid
, void __user
*child_tid
, struct pt_regs
*regs
)
539 return do_fork(clone_flags
, newsp
, regs
, 0, parent_tid
, child_tid
);
542 unsigned long get_wchan(struct task_struct
*p
)
548 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
550 stack
= (unsigned long)task_stack_page(p
);
551 if (p
->thread
.sp
< stack
|| p
->thread
.sp
>= stack
+THREAD_SIZE
)
553 fp
= *(u64
*)(p
->thread
.sp
);
555 if (fp
< (unsigned long)stack
||
556 fp
>= (unsigned long)stack
+THREAD_SIZE
)
559 if (!in_sched_functions(ip
))
562 } while (count
++ < 16);
566 long do_arch_prctl(struct task_struct
*task
, int code
, unsigned long addr
)
569 int doit
= task
== current
;
574 if (addr
>= TASK_SIZE_OF(task
))
577 /* handle small bases via the GDT because that's faster to
579 if (addr
<= 0xffffffff) {
580 set_32bit_tls(task
, GS_TLS
, addr
);
582 load_TLS(&task
->thread
, cpu
);
583 load_gs_index(GS_TLS_SEL
);
585 task
->thread
.gsindex
= GS_TLS_SEL
;
588 task
->thread
.gsindex
= 0;
589 task
->thread
.gs
= addr
;
592 ret
= checking_wrmsrl(MSR_KERNEL_GS_BASE
, addr
);
598 /* Not strictly needed for fs, but do it for symmetry
600 if (addr
>= TASK_SIZE_OF(task
))
603 /* handle small bases via the GDT because that's faster to
605 if (addr
<= 0xffffffff) {
606 set_32bit_tls(task
, FS_TLS
, addr
);
608 load_TLS(&task
->thread
, cpu
);
609 loadsegment(fs
, FS_TLS_SEL
);
611 task
->thread
.fsindex
= FS_TLS_SEL
;
614 task
->thread
.fsindex
= 0;
615 task
->thread
.fs
= addr
;
617 /* set the selector to 0 to not confuse
620 ret
= checking_wrmsrl(MSR_FS_BASE
, addr
);
627 if (task
->thread
.fsindex
== FS_TLS_SEL
)
628 base
= read_32bit_tls(task
, FS_TLS
);
630 rdmsrl(MSR_FS_BASE
, base
);
632 base
= task
->thread
.fs
;
633 ret
= put_user(base
, (unsigned long __user
*)addr
);
639 if (task
->thread
.gsindex
== GS_TLS_SEL
)
640 base
= read_32bit_tls(task
, GS_TLS
);
642 savesegment(gs
, gsindex
);
644 rdmsrl(MSR_KERNEL_GS_BASE
, base
);
646 base
= task
->thread
.gs
;
648 base
= task
->thread
.gs
;
649 ret
= put_user(base
, (unsigned long __user
*)addr
);
661 long sys_arch_prctl(int code
, unsigned long addr
)
663 return do_arch_prctl(current
, code
, addr
);