x86, cpu: mv display_cacheinfo -> cpu_detect_cache_sizes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / process_64.c
blob2386999bfcd27456bb968338fb1112694dd9d3a3
1 /*
2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * X86-64 port
8 * Andi Kleen.
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/utsname.h>
30 #include <linux/delay.h>
31 #include <linux/module.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/tick.h>
37 #include <linux/prctl.h>
38 #include <linux/uaccess.h>
39 #include <linux/io.h>
40 #include <linux/ftrace.h>
41 #include <linux/dmi.h>
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
45 #include <asm/processor.h>
46 #include <asm/i387.h>
47 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
49 #include <asm/desc.h>
50 #include <asm/proto.h>
51 #include <asm/ia32.h>
52 #include <asm/idle.h>
53 #include <asm/syscalls.h>
54 #include <asm/ds.h>
56 asmlinkage extern void ret_from_fork(void);
58 DEFINE_PER_CPU(unsigned long, old_rsp);
59 static DEFINE_PER_CPU(unsigned char, is_idle);
61 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
63 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
65 void idle_notifier_register(struct notifier_block *n)
67 atomic_notifier_chain_register(&idle_notifier, n);
69 EXPORT_SYMBOL_GPL(idle_notifier_register);
71 void idle_notifier_unregister(struct notifier_block *n)
73 atomic_notifier_chain_unregister(&idle_notifier, n);
75 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
77 void enter_idle(void)
79 percpu_write(is_idle, 1);
80 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
83 static void __exit_idle(void)
85 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
86 return;
87 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
90 /* Called from interrupts to signify idle end */
91 void exit_idle(void)
93 /* idle loop has pid 0 */
94 if (current->pid)
95 return;
96 __exit_idle();
99 #ifndef CONFIG_SMP
100 static inline void play_dead(void)
102 BUG();
104 #endif
107 * The idle thread. There's no useful work to be
108 * done, so just try to conserve power and have a
109 * low exit latency (ie sit in a loop waiting for
110 * somebody to say that they'd like to reschedule)
112 void cpu_idle(void)
114 current_thread_info()->status |= TS_POLLING;
117 * If we're the non-boot CPU, nothing set the stack canary up
118 * for us. CPU0 already has it initialized but no harm in
119 * doing it again. This is a good place for updating it, as
120 * we wont ever return from this function (so the invalid
121 * canaries already on the stack wont ever trigger).
123 boot_init_stack_canary();
125 /* endless idle loop with no priority at all */
126 while (1) {
127 tick_nohz_stop_sched_tick(1);
128 while (!need_resched()) {
130 rmb();
132 if (cpu_is_offline(smp_processor_id()))
133 play_dead();
135 * Idle routines should keep interrupts disabled
136 * from here on, until they go to idle.
137 * Otherwise, idle callbacks can misfire.
139 local_irq_disable();
140 enter_idle();
141 /* Don't trace irqs off for idle */
142 stop_critical_timings();
143 pm_idle();
144 start_critical_timings();
145 /* In many cases the interrupt that ended idle
146 has already called exit_idle. But some idle
147 loops can be woken up without interrupt. */
148 __exit_idle();
151 tick_nohz_restart_sched_tick();
152 preempt_enable_no_resched();
153 schedule();
154 preempt_disable();
158 /* Prints also some state that isn't saved in the pt_regs */
159 void __show_regs(struct pt_regs *regs, int all)
161 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
162 unsigned long d0, d1, d2, d3, d6, d7;
163 unsigned int fsindex, gsindex;
164 unsigned int ds, cs, es;
165 const char *board;
167 printk("\n");
168 print_modules();
169 board = dmi_get_system_info(DMI_PRODUCT_NAME);
170 if (!board)
171 board = "";
172 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
173 current->pid, current->comm, print_tainted(),
174 init_utsname()->release,
175 (int)strcspn(init_utsname()->version, " "),
176 init_utsname()->version, board);
177 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
178 printk_address(regs->ip, 1);
179 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
180 regs->sp, regs->flags);
181 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
182 regs->ax, regs->bx, regs->cx);
183 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
184 regs->dx, regs->si, regs->di);
185 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
186 regs->bp, regs->r8, regs->r9);
187 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
188 regs->r10, regs->r11, regs->r12);
189 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
190 regs->r13, regs->r14, regs->r15);
192 asm("movl %%ds,%0" : "=r" (ds));
193 asm("movl %%cs,%0" : "=r" (cs));
194 asm("movl %%es,%0" : "=r" (es));
195 asm("movl %%fs,%0" : "=r" (fsindex));
196 asm("movl %%gs,%0" : "=r" (gsindex));
198 rdmsrl(MSR_FS_BASE, fs);
199 rdmsrl(MSR_GS_BASE, gs);
200 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
202 if (!all)
203 return;
205 cr0 = read_cr0();
206 cr2 = read_cr2();
207 cr3 = read_cr3();
208 cr4 = read_cr4();
210 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
211 fs, fsindex, gs, gsindex, shadowgs);
212 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
213 es, cr0);
214 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
215 cr4);
217 get_debugreg(d0, 0);
218 get_debugreg(d1, 1);
219 get_debugreg(d2, 2);
220 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
221 get_debugreg(d3, 3);
222 get_debugreg(d6, 6);
223 get_debugreg(d7, 7);
224 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
227 void show_regs(struct pt_regs *regs)
229 show_registers(regs);
230 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
233 void release_thread(struct task_struct *dead_task)
235 if (dead_task->mm) {
236 if (dead_task->mm->context.size) {
237 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
238 dead_task->comm,
239 dead_task->mm->context.ldt,
240 dead_task->mm->context.size);
241 BUG();
246 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
248 struct user_desc ud = {
249 .base_addr = addr,
250 .limit = 0xfffff,
251 .seg_32bit = 1,
252 .limit_in_pages = 1,
253 .useable = 1,
255 struct desc_struct *desc = t->thread.tls_array;
256 desc += tls;
257 fill_ldt(desc, &ud);
260 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
262 return get_desc_base(&t->thread.tls_array[tls]);
266 * This gets called before we allocate a new thread and copy
267 * the current task into it.
269 void prepare_to_copy(struct task_struct *tsk)
271 unlazy_fpu(tsk);
274 int copy_thread(unsigned long clone_flags, unsigned long sp,
275 unsigned long unused,
276 struct task_struct *p, struct pt_regs *regs)
278 int err;
279 struct pt_regs *childregs;
280 struct task_struct *me = current;
282 childregs = ((struct pt_regs *)
283 (THREAD_SIZE + task_stack_page(p))) - 1;
284 *childregs = *regs;
286 childregs->ax = 0;
287 childregs->sp = sp;
288 if (sp == ~0UL)
289 childregs->sp = (unsigned long)childregs;
291 p->thread.sp = (unsigned long) childregs;
292 p->thread.sp0 = (unsigned long) (childregs+1);
293 p->thread.usersp = me->thread.usersp;
295 set_tsk_thread_flag(p, TIF_FORK);
297 p->thread.fs = me->thread.fs;
298 p->thread.gs = me->thread.gs;
300 savesegment(gs, p->thread.gsindex);
301 savesegment(fs, p->thread.fsindex);
302 savesegment(es, p->thread.es);
303 savesegment(ds, p->thread.ds);
305 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
306 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
307 if (!p->thread.io_bitmap_ptr) {
308 p->thread.io_bitmap_max = 0;
309 return -ENOMEM;
311 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
312 IO_BITMAP_BYTES);
313 set_tsk_thread_flag(p, TIF_IO_BITMAP);
317 * Set a new TLS for the child thread?
319 if (clone_flags & CLONE_SETTLS) {
320 #ifdef CONFIG_IA32_EMULATION
321 if (test_thread_flag(TIF_IA32))
322 err = do_set_thread_area(p, -1,
323 (struct user_desc __user *)childregs->si, 0);
324 else
325 #endif
326 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
327 if (err)
328 goto out;
331 clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
332 p->thread.ds_ctx = NULL;
334 clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
335 p->thread.debugctlmsr = 0;
337 err = 0;
338 out:
339 if (err && p->thread.io_bitmap_ptr) {
340 kfree(p->thread.io_bitmap_ptr);
341 p->thread.io_bitmap_max = 0;
343 return err;
346 void
347 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
349 loadsegment(fs, 0);
350 loadsegment(es, 0);
351 loadsegment(ds, 0);
352 load_gs_index(0);
353 regs->ip = new_ip;
354 regs->sp = new_sp;
355 percpu_write(old_rsp, new_sp);
356 regs->cs = __USER_CS;
357 regs->ss = __USER_DS;
358 regs->flags = 0x200;
359 set_fs(USER_DS);
361 * Free the old FP and other extended state
363 free_thread_xstate(current);
365 EXPORT_SYMBOL_GPL(start_thread);
368 * switch_to(x,y) should switch tasks from x to y.
370 * This could still be optimized:
371 * - fold all the options into a flag word and test it with a single test.
372 * - could test fs/gs bitsliced
374 * Kprobes not supported here. Set the probe on schedule instead.
375 * Function graph tracer not supported too.
377 __notrace_funcgraph struct task_struct *
378 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
380 struct thread_struct *prev = &prev_p->thread;
381 struct thread_struct *next = &next_p->thread;
382 int cpu = smp_processor_id();
383 struct tss_struct *tss = &per_cpu(init_tss, cpu);
384 unsigned fsindex, gsindex;
385 bool preload_fpu;
388 * If the task has used fpu the last 5 timeslices, just do a full
389 * restore of the math state immediately to avoid the trap; the
390 * chances of needing FPU soon are obviously high now
392 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
394 /* we're going to use this soon, after a few expensive things */
395 if (preload_fpu)
396 prefetch(next->xstate);
399 * Reload esp0, LDT and the page table pointer:
401 load_sp0(tss, next);
404 * Switch DS and ES.
405 * This won't pick up thread selector changes, but I guess that is ok.
407 savesegment(es, prev->es);
408 if (unlikely(next->es | prev->es))
409 loadsegment(es, next->es);
411 savesegment(ds, prev->ds);
412 if (unlikely(next->ds | prev->ds))
413 loadsegment(ds, next->ds);
416 /* We must save %fs and %gs before load_TLS() because
417 * %fs and %gs may be cleared by load_TLS().
419 * (e.g. xen_load_tls())
421 savesegment(fs, fsindex);
422 savesegment(gs, gsindex);
424 load_TLS(next, cpu);
426 /* Must be after DS reload */
427 unlazy_fpu(prev_p);
429 /* Make sure cpu is ready for new context */
430 if (preload_fpu)
431 clts();
434 * Leave lazy mode, flushing any hypercalls made here.
435 * This must be done before restoring TLS segments so
436 * the GDT and LDT are properly updated, and must be
437 * done before math_state_restore, so the TS bit is up
438 * to date.
440 arch_end_context_switch(next_p);
443 * Switch FS and GS.
445 * Segment register != 0 always requires a reload. Also
446 * reload when it has changed. When prev process used 64bit
447 * base always reload to avoid an information leak.
449 if (unlikely(fsindex | next->fsindex | prev->fs)) {
450 loadsegment(fs, next->fsindex);
452 * Check if the user used a selector != 0; if yes
453 * clear 64bit base, since overloaded base is always
454 * mapped to the Null selector
456 if (fsindex)
457 prev->fs = 0;
459 /* when next process has a 64bit base use it */
460 if (next->fs)
461 wrmsrl(MSR_FS_BASE, next->fs);
462 prev->fsindex = fsindex;
464 if (unlikely(gsindex | next->gsindex | prev->gs)) {
465 load_gs_index(next->gsindex);
466 if (gsindex)
467 prev->gs = 0;
469 if (next->gs)
470 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
471 prev->gsindex = gsindex;
474 * Switch the PDA and FPU contexts.
476 prev->usersp = percpu_read(old_rsp);
477 percpu_write(old_rsp, next->usersp);
478 percpu_write(current_task, next_p);
480 percpu_write(kernel_stack,
481 (unsigned long)task_stack_page(next_p) +
482 THREAD_SIZE - KERNEL_STACK_OFFSET);
485 * Now maybe reload the debug registers and handle I/O bitmaps
487 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
488 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
489 __switch_to_xtra(prev_p, next_p, tss);
492 * Preload the FPU context, now that we've determined that the
493 * task is likely to be using it.
495 if (preload_fpu)
496 __math_state_restore();
497 return prev_p;
501 * sys_execve() executes a new program.
503 asmlinkage
504 long sys_execve(char __user *name, char __user * __user *argv,
505 char __user * __user *envp, struct pt_regs *regs)
507 long error;
508 char *filename;
510 filename = getname(name);
511 error = PTR_ERR(filename);
512 if (IS_ERR(filename))
513 return error;
514 error = do_execve(filename, argv, envp, regs);
515 putname(filename);
516 return error;
519 void set_personality_64bit(void)
521 /* inherit personality from parent */
523 /* Make sure to be in 64bit mode */
524 clear_thread_flag(TIF_IA32);
526 /* TBD: overwrites user setup. Should have two bits.
527 But 64bit processes have always behaved this way,
528 so it's not too bad. The main problem is just that
529 32bit childs are affected again. */
530 current->personality &= ~READ_IMPLIES_EXEC;
533 asmlinkage long
534 sys_clone(unsigned long clone_flags, unsigned long newsp,
535 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
537 if (!newsp)
538 newsp = regs->sp;
539 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
542 unsigned long get_wchan(struct task_struct *p)
544 unsigned long stack;
545 u64 fp, ip;
546 int count = 0;
548 if (!p || p == current || p->state == TASK_RUNNING)
549 return 0;
550 stack = (unsigned long)task_stack_page(p);
551 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
552 return 0;
553 fp = *(u64 *)(p->thread.sp);
554 do {
555 if (fp < (unsigned long)stack ||
556 fp >= (unsigned long)stack+THREAD_SIZE)
557 return 0;
558 ip = *(u64 *)(fp+8);
559 if (!in_sched_functions(ip))
560 return ip;
561 fp = *(u64 *)fp;
562 } while (count++ < 16);
563 return 0;
566 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
568 int ret = 0;
569 int doit = task == current;
570 int cpu;
572 switch (code) {
573 case ARCH_SET_GS:
574 if (addr >= TASK_SIZE_OF(task))
575 return -EPERM;
576 cpu = get_cpu();
577 /* handle small bases via the GDT because that's faster to
578 switch. */
579 if (addr <= 0xffffffff) {
580 set_32bit_tls(task, GS_TLS, addr);
581 if (doit) {
582 load_TLS(&task->thread, cpu);
583 load_gs_index(GS_TLS_SEL);
585 task->thread.gsindex = GS_TLS_SEL;
586 task->thread.gs = 0;
587 } else {
588 task->thread.gsindex = 0;
589 task->thread.gs = addr;
590 if (doit) {
591 load_gs_index(0);
592 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
595 put_cpu();
596 break;
597 case ARCH_SET_FS:
598 /* Not strictly needed for fs, but do it for symmetry
599 with gs */
600 if (addr >= TASK_SIZE_OF(task))
601 return -EPERM;
602 cpu = get_cpu();
603 /* handle small bases via the GDT because that's faster to
604 switch. */
605 if (addr <= 0xffffffff) {
606 set_32bit_tls(task, FS_TLS, addr);
607 if (doit) {
608 load_TLS(&task->thread, cpu);
609 loadsegment(fs, FS_TLS_SEL);
611 task->thread.fsindex = FS_TLS_SEL;
612 task->thread.fs = 0;
613 } else {
614 task->thread.fsindex = 0;
615 task->thread.fs = addr;
616 if (doit) {
617 /* set the selector to 0 to not confuse
618 __switch_to */
619 loadsegment(fs, 0);
620 ret = checking_wrmsrl(MSR_FS_BASE, addr);
623 put_cpu();
624 break;
625 case ARCH_GET_FS: {
626 unsigned long base;
627 if (task->thread.fsindex == FS_TLS_SEL)
628 base = read_32bit_tls(task, FS_TLS);
629 else if (doit)
630 rdmsrl(MSR_FS_BASE, base);
631 else
632 base = task->thread.fs;
633 ret = put_user(base, (unsigned long __user *)addr);
634 break;
636 case ARCH_GET_GS: {
637 unsigned long base;
638 unsigned gsindex;
639 if (task->thread.gsindex == GS_TLS_SEL)
640 base = read_32bit_tls(task, GS_TLS);
641 else if (doit) {
642 savesegment(gs, gsindex);
643 if (gsindex)
644 rdmsrl(MSR_KERNEL_GS_BASE, base);
645 else
646 base = task->thread.gs;
647 } else
648 base = task->thread.gs;
649 ret = put_user(base, (unsigned long __user *)addr);
650 break;
653 default:
654 ret = -EINVAL;
655 break;
658 return ret;
661 long sys_arch_prctl(int code, unsigned long addr)
663 return do_arch_prctl(current, code, addr);