2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
43 #include <asm/processor.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
48 #include <asm/proto.h>
51 #include <asm/syscalls.h>
52 #include <asm/debugreg.h>
54 asmlinkage
extern void ret_from_fork(void);
56 DEFINE_PER_CPU(unsigned long, old_rsp
);
57 static DEFINE_PER_CPU(unsigned char, is_idle
);
59 static ATOMIC_NOTIFIER_HEAD(idle_notifier
);
61 void idle_notifier_register(struct notifier_block
*n
)
63 atomic_notifier_chain_register(&idle_notifier
, n
);
65 EXPORT_SYMBOL_GPL(idle_notifier_register
);
67 void idle_notifier_unregister(struct notifier_block
*n
)
69 atomic_notifier_chain_unregister(&idle_notifier
, n
);
71 EXPORT_SYMBOL_GPL(idle_notifier_unregister
);
75 percpu_write(is_idle
, 1);
76 atomic_notifier_call_chain(&idle_notifier
, IDLE_START
, NULL
);
79 static void __exit_idle(void)
81 if (x86_test_and_clear_bit_percpu(0, is_idle
) == 0)
83 atomic_notifier_call_chain(&idle_notifier
, IDLE_END
, NULL
);
86 /* Called from interrupts to signify idle end */
89 /* idle loop has pid 0 */
96 static inline void play_dead(void)
103 * The idle thread. There's no useful work to be
104 * done, so just try to conserve power and have a
105 * low exit latency (ie sit in a loop waiting for
106 * somebody to say that they'd like to reschedule)
110 current_thread_info()->status
|= TS_POLLING
;
113 * If we're the non-boot CPU, nothing set the stack canary up
114 * for us. CPU0 already has it initialized but no harm in
115 * doing it again. This is a good place for updating it, as
116 * we wont ever return from this function (so the invalid
117 * canaries already on the stack wont ever trigger).
119 boot_init_stack_canary();
121 /* endless idle loop with no priority at all */
123 tick_nohz_stop_sched_tick(1);
124 while (!need_resched()) {
128 if (cpu_is_offline(smp_processor_id()))
131 * Idle routines should keep interrupts disabled
132 * from here on, until they go to idle.
133 * Otherwise, idle callbacks can misfire.
137 /* Don't trace irqs off for idle */
138 stop_critical_timings();
140 start_critical_timings();
141 /* In many cases the interrupt that ended idle
142 has already called exit_idle. But some idle
143 loops can be woken up without interrupt. */
147 tick_nohz_restart_sched_tick();
148 preempt_enable_no_resched();
154 /* Prints also some state that isn't saved in the pt_regs */
155 void __show_regs(struct pt_regs
*regs
, int all
)
157 unsigned long cr0
= 0L, cr2
= 0L, cr3
= 0L, cr4
= 0L, fs
, gs
, shadowgs
;
158 unsigned long d0
, d1
, d2
, d3
, d6
, d7
;
159 unsigned int fsindex
, gsindex
;
160 unsigned int ds
, cs
, es
;
163 printk(KERN_DEFAULT
"RIP: %04lx:[<%016lx>] ", regs
->cs
& 0xffff, regs
->ip
);
164 printk_address(regs
->ip
, 1);
165 printk(KERN_DEFAULT
"RSP: %04lx:%016lx EFLAGS: %08lx\n", regs
->ss
,
166 regs
->sp
, regs
->flags
);
167 printk(KERN_DEFAULT
"RAX: %016lx RBX: %016lx RCX: %016lx\n",
168 regs
->ax
, regs
->bx
, regs
->cx
);
169 printk(KERN_DEFAULT
"RDX: %016lx RSI: %016lx RDI: %016lx\n",
170 regs
->dx
, regs
->si
, regs
->di
);
171 printk(KERN_DEFAULT
"RBP: %016lx R08: %016lx R09: %016lx\n",
172 regs
->bp
, regs
->r8
, regs
->r9
);
173 printk(KERN_DEFAULT
"R10: %016lx R11: %016lx R12: %016lx\n",
174 regs
->r10
, regs
->r11
, regs
->r12
);
175 printk(KERN_DEFAULT
"R13: %016lx R14: %016lx R15: %016lx\n",
176 regs
->r13
, regs
->r14
, regs
->r15
);
178 asm("movl %%ds,%0" : "=r" (ds
));
179 asm("movl %%cs,%0" : "=r" (cs
));
180 asm("movl %%es,%0" : "=r" (es
));
181 asm("movl %%fs,%0" : "=r" (fsindex
));
182 asm("movl %%gs,%0" : "=r" (gsindex
));
184 rdmsrl(MSR_FS_BASE
, fs
);
185 rdmsrl(MSR_GS_BASE
, gs
);
186 rdmsrl(MSR_KERNEL_GS_BASE
, shadowgs
);
196 printk(KERN_DEFAULT
"FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
197 fs
, fsindex
, gs
, gsindex
, shadowgs
);
198 printk(KERN_DEFAULT
"CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs
, ds
,
200 printk(KERN_DEFAULT
"CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2
, cr3
,
206 printk(KERN_DEFAULT
"DR0: %016lx DR1: %016lx DR2: %016lx\n", d0
, d1
, d2
);
210 printk(KERN_DEFAULT
"DR3: %016lx DR6: %016lx DR7: %016lx\n", d3
, d6
, d7
);
213 void release_thread(struct task_struct
*dead_task
)
216 if (dead_task
->mm
->context
.size
) {
217 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
219 dead_task
->mm
->context
.ldt
,
220 dead_task
->mm
->context
.size
);
226 static inline void set_32bit_tls(struct task_struct
*t
, int tls
, u32 addr
)
228 struct user_desc ud
= {
235 struct desc_struct
*desc
= t
->thread
.tls_array
;
240 static inline u32
read_32bit_tls(struct task_struct
*t
, int tls
)
242 return get_desc_base(&t
->thread
.tls_array
[tls
]);
246 * This gets called before we allocate a new thread and copy
247 * the current task into it.
249 void prepare_to_copy(struct task_struct
*tsk
)
254 int copy_thread(unsigned long clone_flags
, unsigned long sp
,
255 unsigned long unused
,
256 struct task_struct
*p
, struct pt_regs
*regs
)
259 struct pt_regs
*childregs
;
260 struct task_struct
*me
= current
;
262 childregs
= ((struct pt_regs
*)
263 (THREAD_SIZE
+ task_stack_page(p
))) - 1;
270 childregs
->sp
= (unsigned long)childregs
;
272 p
->thread
.sp
= (unsigned long) childregs
;
273 p
->thread
.sp0
= (unsigned long) (childregs
+1);
274 p
->thread
.usersp
= me
->thread
.usersp
;
276 set_tsk_thread_flag(p
, TIF_FORK
);
278 p
->thread
.io_bitmap_ptr
= NULL
;
280 savesegment(gs
, p
->thread
.gsindex
);
281 p
->thread
.gs
= p
->thread
.gsindex
? 0 : me
->thread
.gs
;
282 savesegment(fs
, p
->thread
.fsindex
);
283 p
->thread
.fs
= p
->thread
.fsindex
? 0 : me
->thread
.fs
;
284 savesegment(es
, p
->thread
.es
);
285 savesegment(ds
, p
->thread
.ds
);
288 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
290 if (unlikely(test_tsk_thread_flag(me
, TIF_IO_BITMAP
))) {
291 p
->thread
.io_bitmap_ptr
= kmalloc(IO_BITMAP_BYTES
, GFP_KERNEL
);
292 if (!p
->thread
.io_bitmap_ptr
) {
293 p
->thread
.io_bitmap_max
= 0;
296 memcpy(p
->thread
.io_bitmap_ptr
, me
->thread
.io_bitmap_ptr
,
298 set_tsk_thread_flag(p
, TIF_IO_BITMAP
);
302 * Set a new TLS for the child thread?
304 if (clone_flags
& CLONE_SETTLS
) {
305 #ifdef CONFIG_IA32_EMULATION
306 if (test_thread_flag(TIF_IA32
))
307 err
= do_set_thread_area(p
, -1,
308 (struct user_desc __user
*)childregs
->si
, 0);
311 err
= do_arch_prctl(p
, ARCH_SET_FS
, childregs
->r8
);
317 if (err
&& p
->thread
.io_bitmap_ptr
) {
318 kfree(p
->thread
.io_bitmap_ptr
);
319 p
->thread
.io_bitmap_max
= 0;
326 start_thread_common(struct pt_regs
*regs
, unsigned long new_ip
,
327 unsigned long new_sp
,
328 unsigned int _cs
, unsigned int _ss
, unsigned int _ds
)
331 loadsegment(es
, _ds
);
332 loadsegment(ds
, _ds
);
336 percpu_write(old_rsp
, new_sp
);
339 regs
->flags
= X86_EFLAGS_IF
;
341 * Free the old FP and other extended state
343 free_thread_xstate(current
);
347 start_thread(struct pt_regs
*regs
, unsigned long new_ip
, unsigned long new_sp
)
349 start_thread_common(regs
, new_ip
, new_sp
,
350 __USER_CS
, __USER_DS
, 0);
353 #ifdef CONFIG_IA32_EMULATION
354 void start_thread_ia32(struct pt_regs
*regs
, u32 new_ip
, u32 new_sp
)
356 start_thread_common(regs
, new_ip
, new_sp
,
357 __USER32_CS
, __USER32_DS
, __USER32_DS
);
362 * switch_to(x,y) should switch tasks from x to y.
364 * This could still be optimized:
365 * - fold all the options into a flag word and test it with a single test.
366 * - could test fs/gs bitsliced
368 * Kprobes not supported here. Set the probe on schedule instead.
369 * Function graph tracer not supported too.
371 __notrace_funcgraph
struct task_struct
*
372 __switch_to(struct task_struct
*prev_p
, struct task_struct
*next_p
)
374 struct thread_struct
*prev
= &prev_p
->thread
;
375 struct thread_struct
*next
= &next_p
->thread
;
376 int cpu
= smp_processor_id();
377 struct tss_struct
*tss
= &per_cpu(init_tss
, cpu
);
378 unsigned fsindex
, gsindex
;
382 * If the task has used fpu the last 5 timeslices, just do a full
383 * restore of the math state immediately to avoid the trap; the
384 * chances of needing FPU soon are obviously high now
386 preload_fpu
= tsk_used_math(next_p
) && next_p
->fpu_counter
> 5;
388 /* we're going to use this soon, after a few expensive things */
390 prefetch(next
->fpu
.state
);
393 * Reload esp0, LDT and the page table pointer:
399 * This won't pick up thread selector changes, but I guess that is ok.
401 savesegment(es
, prev
->es
);
402 if (unlikely(next
->es
| prev
->es
))
403 loadsegment(es
, next
->es
);
405 savesegment(ds
, prev
->ds
);
406 if (unlikely(next
->ds
| prev
->ds
))
407 loadsegment(ds
, next
->ds
);
410 /* We must save %fs and %gs before load_TLS() because
411 * %fs and %gs may be cleared by load_TLS().
413 * (e.g. xen_load_tls())
415 savesegment(fs
, fsindex
);
416 savesegment(gs
, gsindex
);
420 /* Must be after DS reload */
423 /* Make sure cpu is ready for new context */
428 * Leave lazy mode, flushing any hypercalls made here.
429 * This must be done before restoring TLS segments so
430 * the GDT and LDT are properly updated, and must be
431 * done before math_state_restore, so the TS bit is up
434 arch_end_context_switch(next_p
);
439 * Segment register != 0 always requires a reload. Also
440 * reload when it has changed. When prev process used 64bit
441 * base always reload to avoid an information leak.
443 if (unlikely(fsindex
| next
->fsindex
| prev
->fs
)) {
444 loadsegment(fs
, next
->fsindex
);
446 * Check if the user used a selector != 0; if yes
447 * clear 64bit base, since overloaded base is always
448 * mapped to the Null selector
453 /* when next process has a 64bit base use it */
455 wrmsrl(MSR_FS_BASE
, next
->fs
);
456 prev
->fsindex
= fsindex
;
458 if (unlikely(gsindex
| next
->gsindex
| prev
->gs
)) {
459 load_gs_index(next
->gsindex
);
464 wrmsrl(MSR_KERNEL_GS_BASE
, next
->gs
);
465 prev
->gsindex
= gsindex
;
468 * Switch the PDA and FPU contexts.
470 prev
->usersp
= percpu_read(old_rsp
);
471 percpu_write(old_rsp
, next
->usersp
);
472 percpu_write(current_task
, next_p
);
474 percpu_write(kernel_stack
,
475 (unsigned long)task_stack_page(next_p
) +
476 THREAD_SIZE
- KERNEL_STACK_OFFSET
);
479 * Now maybe reload the debug registers and handle I/O bitmaps
481 if (unlikely(task_thread_info(next_p
)->flags
& _TIF_WORK_CTXSW_NEXT
||
482 task_thread_info(prev_p
)->flags
& _TIF_WORK_CTXSW_PREV
))
483 __switch_to_xtra(prev_p
, next_p
, tss
);
486 * Preload the FPU context, now that we've determined that the
487 * task is likely to be using it.
490 __math_state_restore();
495 void set_personality_64bit(void)
497 /* inherit personality from parent */
499 /* Make sure to be in 64bit mode */
500 clear_thread_flag(TIF_IA32
);
502 /* TBD: overwrites user setup. Should have two bits.
503 But 64bit processes have always behaved this way,
504 so it's not too bad. The main problem is just that
505 32bit childs are affected again. */
506 current
->personality
&= ~READ_IMPLIES_EXEC
;
509 void set_personality_ia32(void)
511 /* inherit personality from parent */
513 /* Make sure to be in 32bit mode */
514 set_thread_flag(TIF_IA32
);
515 current
->personality
|= force_personality32
;
517 /* Prepare the first "return" to user space */
518 current_thread_info()->status
|= TS_COMPAT
;
521 unsigned long get_wchan(struct task_struct
*p
)
527 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
529 stack
= (unsigned long)task_stack_page(p
);
530 if (p
->thread
.sp
< stack
|| p
->thread
.sp
>= stack
+THREAD_SIZE
)
532 fp
= *(u64
*)(p
->thread
.sp
);
534 if (fp
< (unsigned long)stack
||
535 fp
>= (unsigned long)stack
+THREAD_SIZE
)
538 if (!in_sched_functions(ip
))
541 } while (count
++ < 16);
545 long do_arch_prctl(struct task_struct
*task
, int code
, unsigned long addr
)
548 int doit
= task
== current
;
553 if (addr
>= TASK_SIZE_OF(task
))
556 /* handle small bases via the GDT because that's faster to
558 if (addr
<= 0xffffffff) {
559 set_32bit_tls(task
, GS_TLS
, addr
);
561 load_TLS(&task
->thread
, cpu
);
562 load_gs_index(GS_TLS_SEL
);
564 task
->thread
.gsindex
= GS_TLS_SEL
;
567 task
->thread
.gsindex
= 0;
568 task
->thread
.gs
= addr
;
571 ret
= checking_wrmsrl(MSR_KERNEL_GS_BASE
, addr
);
577 /* Not strictly needed for fs, but do it for symmetry
579 if (addr
>= TASK_SIZE_OF(task
))
582 /* handle small bases via the GDT because that's faster to
584 if (addr
<= 0xffffffff) {
585 set_32bit_tls(task
, FS_TLS
, addr
);
587 load_TLS(&task
->thread
, cpu
);
588 loadsegment(fs
, FS_TLS_SEL
);
590 task
->thread
.fsindex
= FS_TLS_SEL
;
593 task
->thread
.fsindex
= 0;
594 task
->thread
.fs
= addr
;
596 /* set the selector to 0 to not confuse
599 ret
= checking_wrmsrl(MSR_FS_BASE
, addr
);
606 if (task
->thread
.fsindex
== FS_TLS_SEL
)
607 base
= read_32bit_tls(task
, FS_TLS
);
609 rdmsrl(MSR_FS_BASE
, base
);
611 base
= task
->thread
.fs
;
612 ret
= put_user(base
, (unsigned long __user
*)addr
);
618 if (task
->thread
.gsindex
== GS_TLS_SEL
)
619 base
= read_32bit_tls(task
, GS_TLS
);
621 savesegment(gs
, gsindex
);
623 rdmsrl(MSR_KERNEL_GS_BASE
, base
);
625 base
= task
->thread
.gs
;
627 base
= task
->thread
.gs
;
628 ret
= put_user(base
, (unsigned long __user
*)addr
);
640 long sys_arch_prctl(int code
, unsigned long addr
)
642 return do_arch_prctl(current
, code
, addr
);
645 unsigned long KSTK_ESP(struct task_struct
*task
)
647 return (test_tsk_thread_flag(task
, TIF_IA32
)) ?
648 (task_pt_regs(task
)->sp
) : ((task
)->thread
.usersp
);