[PATCH] non lazy "sleazy" fpu implementation
[linux-2.6/kvm.git] / arch / x86_64 / kernel / process.c
blob9e9a70e50c72aed73cd2c465af6f7252b6d7e8df
1 /*
2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 *
9 * X86-64 port
10 * Andi Kleen.
12 * CPU hotplug support - ashok.raj@intel.com
16 * This file handles the architecture-dependent parts of process handling..
19 #include <stdarg.h>
21 #include <linux/cpu.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/elfcore.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/module.h>
31 #include <linux/a.out.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/ptrace.h>
35 #include <linux/utsname.h>
36 #include <linux/random.h>
37 #include <linux/notifier.h>
38 #include <linux/kprobes.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/processor.h>
45 #include <asm/i387.h>
46 #include <asm/mmu_context.h>
47 #include <asm/pda.h>
48 #include <asm/prctl.h>
49 #include <asm/kdebug.h>
50 #include <asm/desc.h>
51 #include <asm/proto.h>
52 #include <asm/ia32.h>
53 #include <asm/idle.h>
55 asmlinkage extern void ret_from_fork(void);
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 unsigned long boot_option_idle_override = 0;
60 EXPORT_SYMBOL(boot_option_idle_override);
63 * Powermanagement idle function, if any..
65 void (*pm_idle)(void);
66 EXPORT_SYMBOL(pm_idle);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
71 void idle_notifier_register(struct notifier_block *n)
73 atomic_notifier_chain_register(&idle_notifier, n);
75 EXPORT_SYMBOL_GPL(idle_notifier_register);
77 void idle_notifier_unregister(struct notifier_block *n)
79 atomic_notifier_chain_unregister(&idle_notifier, n);
81 EXPORT_SYMBOL(idle_notifier_unregister);
83 enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
84 static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
86 void enter_idle(void)
88 __get_cpu_var(idle_state) = CPU_IDLE;
89 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
92 static void __exit_idle(void)
94 __get_cpu_var(idle_state) = CPU_NOT_IDLE;
95 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
98 /* Called from interrupts to signify idle end */
99 void exit_idle(void)
101 if (current->pid | read_pda(irqcount))
102 return;
103 __exit_idle();
107 * We use this if we don't have any better
108 * idle routine..
110 static void default_idle(void)
112 local_irq_enable();
114 current_thread_info()->status &= ~TS_POLLING;
115 smp_mb__after_clear_bit();
116 while (!need_resched()) {
117 local_irq_disable();
118 if (!need_resched())
119 safe_halt();
120 else
121 local_irq_enable();
123 current_thread_info()->status |= TS_POLLING;
127 * On SMP it's slightly faster (but much more power-consuming!)
128 * to poll the ->need_resched flag instead of waiting for the
129 * cross-CPU IPI to arrive. Use this option with caution.
131 static void poll_idle (void)
133 local_irq_enable();
135 asm volatile(
136 "2:"
137 "testl %0,%1;"
138 "rep; nop;"
139 "je 2b;"
141 "i" (_TIF_NEED_RESCHED),
142 "m" (current_thread_info()->flags));
145 void cpu_idle_wait(void)
147 unsigned int cpu, this_cpu = get_cpu();
148 cpumask_t map;
150 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
151 put_cpu();
153 cpus_clear(map);
154 for_each_online_cpu(cpu) {
155 per_cpu(cpu_idle_state, cpu) = 1;
156 cpu_set(cpu, map);
159 __get_cpu_var(cpu_idle_state) = 0;
161 wmb();
162 do {
163 ssleep(1);
164 for_each_online_cpu(cpu) {
165 if (cpu_isset(cpu, map) &&
166 !per_cpu(cpu_idle_state, cpu))
167 cpu_clear(cpu, map);
169 cpus_and(map, map, cpu_online_map);
170 } while (!cpus_empty(map));
172 EXPORT_SYMBOL_GPL(cpu_idle_wait);
174 #ifdef CONFIG_HOTPLUG_CPU
175 DECLARE_PER_CPU(int, cpu_state);
177 #include <asm/nmi.h>
178 /* We halt the CPU with physical CPU hotplug */
179 static inline void play_dead(void)
181 idle_task_exit();
182 wbinvd();
183 mb();
184 /* Ack it */
185 __get_cpu_var(cpu_state) = CPU_DEAD;
187 local_irq_disable();
188 while (1)
189 halt();
191 #else
192 static inline void play_dead(void)
194 BUG();
196 #endif /* CONFIG_HOTPLUG_CPU */
199 * The idle thread. There's no useful work to be
200 * done, so just try to conserve power and have a
201 * low exit latency (ie sit in a loop waiting for
202 * somebody to say that they'd like to reschedule)
204 void cpu_idle (void)
206 current_thread_info()->status |= TS_POLLING;
207 /* endless idle loop with no priority at all */
208 while (1) {
209 while (!need_resched()) {
210 void (*idle)(void);
212 if (__get_cpu_var(cpu_idle_state))
213 __get_cpu_var(cpu_idle_state) = 0;
215 rmb();
216 idle = pm_idle;
217 if (!idle)
218 idle = default_idle;
219 if (cpu_is_offline(smp_processor_id()))
220 play_dead();
221 enter_idle();
222 idle();
223 __exit_idle();
226 preempt_enable_no_resched();
227 schedule();
228 preempt_disable();
233 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
234 * which can obviate IPI to trigger checking of need_resched.
235 * We execute MONITOR against need_resched and enter optimized wait state
236 * through MWAIT. Whenever someone changes need_resched, we would be woken
237 * up from MWAIT (without an IPI).
239 static void mwait_idle(void)
241 local_irq_enable();
243 while (!need_resched()) {
244 __monitor((void *)&current_thread_info()->flags, 0, 0);
245 smp_mb();
246 if (need_resched())
247 break;
248 __mwait(0, 0);
252 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
254 static int printed;
255 if (cpu_has(c, X86_FEATURE_MWAIT)) {
257 * Skip, if setup has overridden idle.
258 * One CPU supports mwait => All CPUs supports mwait
260 if (!pm_idle) {
261 if (!printed) {
262 printk("using mwait in idle threads.\n");
263 printed = 1;
265 pm_idle = mwait_idle;
270 static int __init idle_setup (char *str)
272 if (!strncmp(str, "poll", 4)) {
273 printk("using polling idle threads.\n");
274 pm_idle = poll_idle;
277 boot_option_idle_override = 1;
278 return 1;
281 __setup("idle=", idle_setup);
283 /* Prints also some state that isn't saved in the pt_regs */
284 void __show_regs(struct pt_regs * regs)
286 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
287 unsigned int fsindex,gsindex;
288 unsigned int ds,cs,es;
290 printk("\n");
291 print_modules();
292 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
293 current->pid, current->comm, print_tainted(),
294 system_utsname.release,
295 (int)strcspn(system_utsname.version, " "),
296 system_utsname.version);
297 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
298 printk_address(regs->rip);
299 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
300 regs->eflags);
301 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
302 regs->rax, regs->rbx, regs->rcx);
303 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
304 regs->rdx, regs->rsi, regs->rdi);
305 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
306 regs->rbp, regs->r8, regs->r9);
307 printk("R10: %016lx R11: %016lx R12: %016lx\n",
308 regs->r10, regs->r11, regs->r12);
309 printk("R13: %016lx R14: %016lx R15: %016lx\n",
310 regs->r13, regs->r14, regs->r15);
312 asm("movl %%ds,%0" : "=r" (ds));
313 asm("movl %%cs,%0" : "=r" (cs));
314 asm("movl %%es,%0" : "=r" (es));
315 asm("movl %%fs,%0" : "=r" (fsindex));
316 asm("movl %%gs,%0" : "=r" (gsindex));
318 rdmsrl(MSR_FS_BASE, fs);
319 rdmsrl(MSR_GS_BASE, gs);
320 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
322 asm("movq %%cr0, %0": "=r" (cr0));
323 asm("movq %%cr2, %0": "=r" (cr2));
324 asm("movq %%cr3, %0": "=r" (cr3));
325 asm("movq %%cr4, %0": "=r" (cr4));
327 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
328 fs,fsindex,gs,gsindex,shadowgs);
329 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
330 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
333 void show_regs(struct pt_regs *regs)
335 printk("CPU %d:", smp_processor_id());
336 __show_regs(regs);
337 show_trace(NULL, regs, (void *)(regs + 1));
341 * Free current thread data structures etc..
343 void exit_thread(void)
345 struct task_struct *me = current;
346 struct thread_struct *t = &me->thread;
348 if (me->thread.io_bitmap_ptr) {
349 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
351 kfree(t->io_bitmap_ptr);
352 t->io_bitmap_ptr = NULL;
353 clear_thread_flag(TIF_IO_BITMAP);
355 * Careful, clear this in the TSS too:
357 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
358 t->io_bitmap_max = 0;
359 put_cpu();
363 void flush_thread(void)
365 struct task_struct *tsk = current;
366 struct thread_info *t = current_thread_info();
368 if (t->flags & _TIF_ABI_PENDING) {
369 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
370 if (t->flags & _TIF_IA32)
371 current_thread_info()->status |= TS_COMPAT;
373 t->flags &= ~_TIF_DEBUG;
375 tsk->thread.debugreg0 = 0;
376 tsk->thread.debugreg1 = 0;
377 tsk->thread.debugreg2 = 0;
378 tsk->thread.debugreg3 = 0;
379 tsk->thread.debugreg6 = 0;
380 tsk->thread.debugreg7 = 0;
381 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
383 * Forget coprocessor state..
385 clear_fpu(tsk);
386 clear_used_math();
389 void release_thread(struct task_struct *dead_task)
391 if (dead_task->mm) {
392 if (dead_task->mm->context.size) {
393 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
394 dead_task->comm,
395 dead_task->mm->context.ldt,
396 dead_task->mm->context.size);
397 BUG();
402 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
404 struct user_desc ud = {
405 .base_addr = addr,
406 .limit = 0xfffff,
407 .seg_32bit = 1,
408 .limit_in_pages = 1,
409 .useable = 1,
411 struct n_desc_struct *desc = (void *)t->thread.tls_array;
412 desc += tls;
413 desc->a = LDT_entry_a(&ud);
414 desc->b = LDT_entry_b(&ud);
417 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
419 struct desc_struct *desc = (void *)t->thread.tls_array;
420 desc += tls;
421 return desc->base0 |
422 (((u32)desc->base1) << 16) |
423 (((u32)desc->base2) << 24);
427 * This gets called before we allocate a new thread and copy
428 * the current task into it.
430 void prepare_to_copy(struct task_struct *tsk)
432 unlazy_fpu(tsk);
435 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
436 unsigned long unused,
437 struct task_struct * p, struct pt_regs * regs)
439 int err;
440 struct pt_regs * childregs;
441 struct task_struct *me = current;
443 childregs = ((struct pt_regs *)
444 (THREAD_SIZE + task_stack_page(p))) - 1;
445 *childregs = *regs;
447 childregs->rax = 0;
448 childregs->rsp = rsp;
449 if (rsp == ~0UL)
450 childregs->rsp = (unsigned long)childregs;
452 p->thread.rsp = (unsigned long) childregs;
453 p->thread.rsp0 = (unsigned long) (childregs+1);
454 p->thread.userrsp = me->thread.userrsp;
456 set_tsk_thread_flag(p, TIF_FORK);
458 p->thread.fs = me->thread.fs;
459 p->thread.gs = me->thread.gs;
461 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
462 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
463 asm("mov %%es,%0" : "=m" (p->thread.es));
464 asm("mov %%ds,%0" : "=m" (p->thread.ds));
466 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
467 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
468 if (!p->thread.io_bitmap_ptr) {
469 p->thread.io_bitmap_max = 0;
470 return -ENOMEM;
472 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
473 IO_BITMAP_BYTES);
474 set_tsk_thread_flag(p, TIF_IO_BITMAP);
478 * Set a new TLS for the child thread?
480 if (clone_flags & CLONE_SETTLS) {
481 #ifdef CONFIG_IA32_EMULATION
482 if (test_thread_flag(TIF_IA32))
483 err = ia32_child_tls(p, childregs);
484 else
485 #endif
486 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
487 if (err)
488 goto out;
490 err = 0;
491 out:
492 if (err && p->thread.io_bitmap_ptr) {
493 kfree(p->thread.io_bitmap_ptr);
494 p->thread.io_bitmap_max = 0;
496 return err;
500 * This special macro can be used to load a debugging register
502 #define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
504 static inline void __switch_to_xtra(struct task_struct *prev_p,
505 struct task_struct *next_p,
506 struct tss_struct *tss)
508 struct thread_struct *prev, *next;
510 prev = &prev_p->thread,
511 next = &next_p->thread;
513 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
514 loaddebug(next, 0);
515 loaddebug(next, 1);
516 loaddebug(next, 2);
517 loaddebug(next, 3);
518 /* no 4 and 5 */
519 loaddebug(next, 6);
520 loaddebug(next, 7);
523 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
525 * Copy the relevant range of the IO bitmap.
526 * Normally this is 128 bytes or less:
528 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
529 max(prev->io_bitmap_max, next->io_bitmap_max));
530 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
532 * Clear any possible leftover bits:
534 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
539 * switch_to(x,y) should switch tasks from x to y.
541 * This could still be optimized:
542 * - fold all the options into a flag word and test it with a single test.
543 * - could test fs/gs bitsliced
545 * Kprobes not supported here. Set the probe on schedule instead.
547 __kprobes struct task_struct *
548 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
550 struct thread_struct *prev = &prev_p->thread,
551 *next = &next_p->thread;
552 int cpu = smp_processor_id();
553 struct tss_struct *tss = &per_cpu(init_tss, cpu);
555 /* we're going to use this soon, after a few expensive things */
556 if (next_p->fpu_counter>5)
557 prefetch(&next->i387.fxsave);
560 * Reload esp0, LDT and the page table pointer:
562 tss->rsp0 = next->rsp0;
565 * Switch DS and ES.
566 * This won't pick up thread selector changes, but I guess that is ok.
568 asm volatile("mov %%es,%0" : "=m" (prev->es));
569 if (unlikely(next->es | prev->es))
570 loadsegment(es, next->es);
572 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
573 if (unlikely(next->ds | prev->ds))
574 loadsegment(ds, next->ds);
576 load_TLS(next, cpu);
579 * Switch FS and GS.
582 unsigned fsindex;
583 asm volatile("movl %%fs,%0" : "=r" (fsindex));
584 /* segment register != 0 always requires a reload.
585 also reload when it has changed.
586 when prev process used 64bit base always reload
587 to avoid an information leak. */
588 if (unlikely(fsindex | next->fsindex | prev->fs)) {
589 loadsegment(fs, next->fsindex);
590 /* check if the user used a selector != 0
591 * if yes clear 64bit base, since overloaded base
592 * is always mapped to the Null selector
594 if (fsindex)
595 prev->fs = 0;
597 /* when next process has a 64bit base use it */
598 if (next->fs)
599 wrmsrl(MSR_FS_BASE, next->fs);
600 prev->fsindex = fsindex;
603 unsigned gsindex;
604 asm volatile("movl %%gs,%0" : "=r" (gsindex));
605 if (unlikely(gsindex | next->gsindex | prev->gs)) {
606 load_gs_index(next->gsindex);
607 if (gsindex)
608 prev->gs = 0;
610 if (next->gs)
611 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
612 prev->gsindex = gsindex;
616 * Switch the PDA and FPU contexts.
618 prev->userrsp = read_pda(oldrsp);
619 write_pda(oldrsp, next->userrsp);
620 write_pda(pcurrent, next_p);
622 /* This must be here to ensure both math_state_restore() and
623 kernel_fpu_begin() work consistently.
624 And the AMD workaround requires it to be after DS reload. */
625 unlazy_fpu(prev_p);
626 write_pda(kernelstack,
627 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
630 * Now maybe reload the debug registers and handle I/O bitmaps
632 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
633 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
634 __switch_to_xtra(prev_p, next_p, tss);
636 /* If the task has used fpu the last 5 timeslices, just do a full
637 * restore of the math state immediately to avoid the trap; the
638 * chances of needing FPU soon are obviously high now
640 if (next_p->fpu_counter>5)
641 math_state_restore();
642 return prev_p;
646 * sys_execve() executes a new program.
648 asmlinkage
649 long sys_execve(char __user *name, char __user * __user *argv,
650 char __user * __user *envp, struct pt_regs regs)
652 long error;
653 char * filename;
655 filename = getname(name);
656 error = PTR_ERR(filename);
657 if (IS_ERR(filename))
658 return error;
659 error = do_execve(filename, argv, envp, &regs);
660 if (error == 0) {
661 task_lock(current);
662 current->ptrace &= ~PT_DTRACE;
663 task_unlock(current);
665 putname(filename);
666 return error;
669 void set_personality_64bit(void)
671 /* inherit personality from parent */
673 /* Make sure to be in 64bit mode */
674 clear_thread_flag(TIF_IA32);
676 /* TBD: overwrites user setup. Should have two bits.
677 But 64bit processes have always behaved this way,
678 so it's not too bad. The main problem is just that
679 32bit childs are affected again. */
680 current->personality &= ~READ_IMPLIES_EXEC;
683 asmlinkage long sys_fork(struct pt_regs *regs)
685 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
688 asmlinkage long
689 sys_clone(unsigned long clone_flags, unsigned long newsp,
690 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
692 if (!newsp)
693 newsp = regs->rsp;
694 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
698 * This is trivial, and on the face of it looks like it
699 * could equally well be done in user mode.
701 * Not so, for quite unobvious reasons - register pressure.
702 * In user mode vfork() cannot have a stack frame, and if
703 * done by calling the "clone()" system call directly, you
704 * do not have enough call-clobbered registers to hold all
705 * the information you need.
707 asmlinkage long sys_vfork(struct pt_regs *regs)
709 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
710 NULL, NULL);
713 unsigned long get_wchan(struct task_struct *p)
715 unsigned long stack;
716 u64 fp,rip;
717 int count = 0;
719 if (!p || p == current || p->state==TASK_RUNNING)
720 return 0;
721 stack = (unsigned long)task_stack_page(p);
722 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
723 return 0;
724 fp = *(u64 *)(p->thread.rsp);
725 do {
726 if (fp < (unsigned long)stack ||
727 fp > (unsigned long)stack+THREAD_SIZE)
728 return 0;
729 rip = *(u64 *)(fp+8);
730 if (!in_sched_functions(rip))
731 return rip;
732 fp = *(u64 *)fp;
733 } while (count++ < 16);
734 return 0;
737 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
739 int ret = 0;
740 int doit = task == current;
741 int cpu;
743 switch (code) {
744 case ARCH_SET_GS:
745 if (addr >= TASK_SIZE_OF(task))
746 return -EPERM;
747 cpu = get_cpu();
748 /* handle small bases via the GDT because that's faster to
749 switch. */
750 if (addr <= 0xffffffff) {
751 set_32bit_tls(task, GS_TLS, addr);
752 if (doit) {
753 load_TLS(&task->thread, cpu);
754 load_gs_index(GS_TLS_SEL);
756 task->thread.gsindex = GS_TLS_SEL;
757 task->thread.gs = 0;
758 } else {
759 task->thread.gsindex = 0;
760 task->thread.gs = addr;
761 if (doit) {
762 load_gs_index(0);
763 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
766 put_cpu();
767 break;
768 case ARCH_SET_FS:
769 /* Not strictly needed for fs, but do it for symmetry
770 with gs */
771 if (addr >= TASK_SIZE_OF(task))
772 return -EPERM;
773 cpu = get_cpu();
774 /* handle small bases via the GDT because that's faster to
775 switch. */
776 if (addr <= 0xffffffff) {
777 set_32bit_tls(task, FS_TLS, addr);
778 if (doit) {
779 load_TLS(&task->thread, cpu);
780 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
782 task->thread.fsindex = FS_TLS_SEL;
783 task->thread.fs = 0;
784 } else {
785 task->thread.fsindex = 0;
786 task->thread.fs = addr;
787 if (doit) {
788 /* set the selector to 0 to not confuse
789 __switch_to */
790 asm volatile("movl %0,%%fs" :: "r" (0));
791 ret = checking_wrmsrl(MSR_FS_BASE, addr);
794 put_cpu();
795 break;
796 case ARCH_GET_FS: {
797 unsigned long base;
798 if (task->thread.fsindex == FS_TLS_SEL)
799 base = read_32bit_tls(task, FS_TLS);
800 else if (doit)
801 rdmsrl(MSR_FS_BASE, base);
802 else
803 base = task->thread.fs;
804 ret = put_user(base, (unsigned long __user *)addr);
805 break;
807 case ARCH_GET_GS: {
808 unsigned long base;
809 unsigned gsindex;
810 if (task->thread.gsindex == GS_TLS_SEL)
811 base = read_32bit_tls(task, GS_TLS);
812 else if (doit) {
813 asm("movl %%gs,%0" : "=r" (gsindex));
814 if (gsindex)
815 rdmsrl(MSR_KERNEL_GS_BASE, base);
816 else
817 base = task->thread.gs;
819 else
820 base = task->thread.gs;
821 ret = put_user(base, (unsigned long __user *)addr);
822 break;
825 default:
826 ret = -EINVAL;
827 break;
830 return ret;
833 long sys_arch_prctl(int code, unsigned long addr)
835 return do_arch_prctl(current, code, addr);
839 * Capture the user space registers if the task is not running (in user space)
841 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
843 struct pt_regs *pp, ptregs;
845 pp = task_pt_regs(tsk);
847 ptregs = *pp;
848 ptregs.cs &= 0xffff;
849 ptregs.ss &= 0xffff;
851 elf_core_copy_regs(regs, &ptregs);
853 return 1;
856 unsigned long arch_align_stack(unsigned long sp)
858 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
859 sp -= get_random_int() % 8192;
860 return sp & ~0xf;