1 #include <linux/errno.h>
2 #include <linux/kernel.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <trace/power.h>
13 #include <asm/system.h>
16 #include <asm/uaccess.h>
19 unsigned long idle_halt
;
20 EXPORT_SYMBOL(idle_halt
);
21 unsigned long idle_nomwait
;
22 EXPORT_SYMBOL(idle_nomwait
);
24 struct kmem_cache
*task_xstate_cachep
;
26 DEFINE_TRACE(power_start
);
27 DEFINE_TRACE(power_end
);
29 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
32 if (src
->thread
.xstate
) {
33 dst
->thread
.xstate
= kmem_cache_alloc(task_xstate_cachep
,
35 if (!dst
->thread
.xstate
)
37 WARN_ON((unsigned long)dst
->thread
.xstate
& 15);
38 memcpy(dst
->thread
.xstate
, src
->thread
.xstate
, xstate_size
);
43 void free_thread_xstate(struct task_struct
*tsk
)
45 if (tsk
->thread
.xstate
) {
46 kmem_cache_free(task_xstate_cachep
, tsk
->thread
.xstate
);
47 tsk
->thread
.xstate
= NULL
;
51 void free_thread_info(struct thread_info
*ti
)
53 free_thread_xstate(ti
->task
);
54 free_pages((unsigned long)ti
, get_order(THREAD_SIZE
));
57 void arch_task_cache_init(void)
60 kmem_cache_create("task_xstate", xstate_size
,
61 __alignof__(union thread_xstate
),
66 * Free current thread data structures etc..
68 void exit_thread(void)
70 struct task_struct
*me
= current
;
71 struct thread_struct
*t
= &me
->thread
;
72 unsigned long *bp
= t
->io_bitmap_ptr
;
75 struct tss_struct
*tss
= &per_cpu(init_tss
, get_cpu());
77 t
->io_bitmap_ptr
= NULL
;
78 clear_thread_flag(TIF_IO_BITMAP
);
80 * Careful, clear this in the TSS too:
82 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
88 ds_exit_thread(current
);
91 void flush_thread(void)
93 struct task_struct
*tsk
= current
;
96 if (test_tsk_thread_flag(tsk
, TIF_ABI_PENDING
)) {
97 clear_tsk_thread_flag(tsk
, TIF_ABI_PENDING
);
98 if (test_tsk_thread_flag(tsk
, TIF_IA32
)) {
99 clear_tsk_thread_flag(tsk
, TIF_IA32
);
101 set_tsk_thread_flag(tsk
, TIF_IA32
);
102 current_thread_info()->status
|= TS_COMPAT
;
107 clear_tsk_thread_flag(tsk
, TIF_DEBUG
);
109 tsk
->thread
.debugreg0
= 0;
110 tsk
->thread
.debugreg1
= 0;
111 tsk
->thread
.debugreg2
= 0;
112 tsk
->thread
.debugreg3
= 0;
113 tsk
->thread
.debugreg6
= 0;
114 tsk
->thread
.debugreg7
= 0;
115 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
117 * Forget coprocessor state..
119 tsk
->fpu_counter
= 0;
124 static void hard_disable_TSC(void)
126 write_cr4(read_cr4() | X86_CR4_TSD
);
129 void disable_TSC(void)
132 if (!test_and_set_thread_flag(TIF_NOTSC
))
134 * Must flip the CPU state synchronously with
135 * TIF_NOTSC in the current running context.
141 static void hard_enable_TSC(void)
143 write_cr4(read_cr4() & ~X86_CR4_TSD
);
146 static void enable_TSC(void)
149 if (test_and_clear_thread_flag(TIF_NOTSC
))
151 * Must flip the CPU state synchronously with
152 * TIF_NOTSC in the current running context.
158 int get_tsc_mode(unsigned long adr
)
162 if (test_thread_flag(TIF_NOTSC
))
163 val
= PR_TSC_SIGSEGV
;
167 return put_user(val
, (unsigned int __user
*)adr
);
170 int set_tsc_mode(unsigned int val
)
172 if (val
== PR_TSC_SIGSEGV
)
174 else if (val
== PR_TSC_ENABLE
)
182 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
183 struct tss_struct
*tss
)
185 struct thread_struct
*prev
, *next
;
187 prev
= &prev_p
->thread
;
188 next
= &next_p
->thread
;
190 if (test_tsk_thread_flag(next_p
, TIF_DS_AREA_MSR
) ||
191 test_tsk_thread_flag(prev_p
, TIF_DS_AREA_MSR
))
192 ds_switch_to(prev_p
, next_p
);
193 else if (next
->debugctlmsr
!= prev
->debugctlmsr
)
194 update_debugctlmsr(next
->debugctlmsr
);
196 if (test_tsk_thread_flag(next_p
, TIF_DEBUG
)) {
197 set_debugreg(next
->debugreg0
, 0);
198 set_debugreg(next
->debugreg1
, 1);
199 set_debugreg(next
->debugreg2
, 2);
200 set_debugreg(next
->debugreg3
, 3);
202 set_debugreg(next
->debugreg6
, 6);
203 set_debugreg(next
->debugreg7
, 7);
206 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
207 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
208 /* prev and next are different */
209 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
215 if (test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
217 * Copy the relevant range of the IO bitmap.
218 * Normally this is 128 bytes or less:
220 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
221 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
222 } else if (test_tsk_thread_flag(prev_p
, TIF_IO_BITMAP
)) {
224 * Clear any possible leftover bits:
226 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
230 int sys_fork(struct pt_regs
*regs
)
232 return do_fork(SIGCHLD
, regs
->sp
, regs
, 0, NULL
, NULL
);
236 * This is trivial, and on the face of it looks like it
237 * could equally well be done in user mode.
239 * Not so, for quite unobvious reasons - register pressure.
240 * In user mode vfork() cannot have a stack frame, and if
241 * done by calling the "clone()" system call directly, you
242 * do not have enough call-clobbered registers to hold all
243 * the information you need.
245 int sys_vfork(struct pt_regs
*regs
)
247 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->sp
, regs
, 0,
253 * Idle related variables and functions
255 unsigned long boot_option_idle_override
= 0;
256 EXPORT_SYMBOL(boot_option_idle_override
);
259 * Powermanagement idle function, if any..
261 void (*pm_idle
)(void);
262 EXPORT_SYMBOL(pm_idle
);
266 * This halt magic was a workaround for ancient floppy DMA
267 * wreckage. It should be safe to remove.
269 static int hlt_counter
;
270 void disable_hlt(void)
274 EXPORT_SYMBOL(disable_hlt
);
276 void enable_hlt(void)
280 EXPORT_SYMBOL(enable_hlt
);
282 static inline int hlt_use_halt(void)
284 return (!hlt_counter
&& boot_cpu_data
.hlt_works_ok
);
287 static inline int hlt_use_halt(void)
294 * We use this if we don't have any better
297 void default_idle(void)
299 if (hlt_use_halt()) {
300 struct power_trace it
;
302 trace_power_start(&it
, POWER_CSTATE
, 1);
303 current_thread_info()->status
&= ~TS_POLLING
;
305 * TS_POLLING-cleared state must be visible before we
311 safe_halt(); /* enables interrupts racelessly */
314 current_thread_info()->status
|= TS_POLLING
;
315 trace_power_end(&it
);
318 /* loop is done by the caller */
322 #ifdef CONFIG_APM_MODULE
323 EXPORT_SYMBOL(default_idle
);
326 void stop_this_cpu(void *dummy
)
332 set_cpu_online(smp_processor_id(), false);
333 disable_local_APIC();
336 if (hlt_works(smp_processor_id()))
341 static void do_nothing(void *unused
)
346 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
347 * pm_idle and update to new pm_idle value. Required while changing pm_idle
348 * handler on SMP systems.
350 * Caller must have changed pm_idle to the new value before the call. Old
351 * pm_idle value will not be used by any CPU after the return of this function.
353 void cpu_idle_wait(void)
356 /* kick all the CPUs so that they exit out of pm_idle */
357 smp_call_function(do_nothing
, NULL
, 1);
359 EXPORT_SYMBOL_GPL(cpu_idle_wait
);
362 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
363 * which can obviate IPI to trigger checking of need_resched.
364 * We execute MONITOR against need_resched and enter optimized wait state
365 * through MWAIT. Whenever someone changes need_resched, we would be woken
366 * up from MWAIT (without an IPI).
368 * New with Core Duo processors, MWAIT can take some hints based on CPU
371 void mwait_idle_with_hints(unsigned long ax
, unsigned long cx
)
373 struct power_trace it
;
375 trace_power_start(&it
, POWER_CSTATE
, (ax
>>4)+1);
376 if (!need_resched()) {
377 if (cpu_has(¤t_cpu_data
, X86_FEATURE_CLFLUSH_MONITOR
))
378 clflush((void *)¤t_thread_info()->flags
);
380 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
385 trace_power_end(&it
);
388 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
389 static void mwait_idle(void)
391 struct power_trace it
;
392 if (!need_resched()) {
393 trace_power_start(&it
, POWER_CSTATE
, 1);
394 if (cpu_has(¤t_cpu_data
, X86_FEATURE_CLFLUSH_MONITOR
))
395 clflush((void *)¤t_thread_info()->flags
);
397 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
403 trace_power_end(&it
);
409 * On SMP it's slightly faster (but much more power-consuming!)
410 * to poll the ->work.need_resched flag instead of waiting for the
411 * cross-CPU IPI to arrive. Use this option with caution.
413 static void poll_idle(void)
415 struct power_trace it
;
417 trace_power_start(&it
, POWER_CSTATE
, 0);
419 while (!need_resched())
421 trace_power_end(&it
);
425 * mwait selection logic:
427 * It depends on the CPU. For AMD CPUs that support MWAIT this is
428 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
429 * then depend on a clock divisor and current Pstate of the core. If
430 * all cores of a processor are in halt state (C1) the processor can
431 * enter the C1E (C1 enhanced) state. If mwait is used this will never
434 * idle=mwait overrides this decision and forces the usage of mwait.
436 static int __cpuinitdata force_mwait
;
438 #define MWAIT_INFO 0x05
439 #define MWAIT_ECX_EXTENDED_INFO 0x01
440 #define MWAIT_EDX_C1 0xf0
442 static int __cpuinit
mwait_usable(const struct cpuinfo_x86
*c
)
444 u32 eax
, ebx
, ecx
, edx
;
449 if (c
->cpuid_level
< MWAIT_INFO
)
452 cpuid(MWAIT_INFO
, &eax
, &ebx
, &ecx
, &edx
);
453 /* Check, whether EDX has extended info about MWAIT */
454 if (!(ecx
& MWAIT_ECX_EXTENDED_INFO
))
458 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
461 return (edx
& MWAIT_EDX_C1
);
465 * Check for AMD CPUs, which have potentially C1E support
467 static int __cpuinit
check_c1e_idle(const struct cpuinfo_x86
*c
)
469 if (c
->x86_vendor
!= X86_VENDOR_AMD
)
475 /* Family 0x0f models < rev F do not have C1E */
476 if (c
->x86
== 0x0f && c
->x86_model
< 0x40)
482 static cpumask_var_t c1e_mask
;
483 static int c1e_detected
;
485 void c1e_remove_cpu(int cpu
)
487 if (c1e_mask
!= NULL
)
488 cpumask_clear_cpu(cpu
, c1e_mask
);
492 * C1E aware idle routine. We check for C1E active in the interrupt
493 * pending message MSR. If we detect C1E, then we handle it the same
494 * way as C3 power states (local apic timer and TSC stop)
496 static void c1e_idle(void)
504 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
505 if (lo
& K8_INTP_C1E_ACTIVE_MASK
) {
507 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
508 mark_tsc_unstable("TSC halt in AMD C1E");
509 printk(KERN_INFO
"System has AMD C1E enabled\n");
510 set_cpu_cap(&boot_cpu_data
, X86_FEATURE_AMDC1E
);
515 int cpu
= smp_processor_id();
517 if (!cpumask_test_cpu(cpu
, c1e_mask
)) {
518 cpumask_set_cpu(cpu
, c1e_mask
);
520 * Force broadcast so ACPI can not interfere. Needs
521 * to run with interrupts enabled as it uses
525 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE
,
527 printk(KERN_INFO
"Switch to broadcast mode on CPU%d\n",
531 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
536 * The switch back from broadcast mode needs to be
537 * called with interrupts disabled.
540 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
546 void __cpuinit
select_idle_routine(const struct cpuinfo_x86
*c
)
549 if (pm_idle
== poll_idle
&& smp_num_siblings
> 1) {
550 printk(KERN_WARNING
"WARNING: polling idle and HT enabled,"
551 " performance may degrade.\n");
557 if (cpu_has(c
, X86_FEATURE_MWAIT
) && mwait_usable(c
)) {
559 * One CPU supports mwait => All CPUs supports mwait
561 printk(KERN_INFO
"using mwait in idle threads.\n");
562 pm_idle
= mwait_idle
;
563 } else if (check_c1e_idle(c
)) {
564 printk(KERN_INFO
"using C1E aware idle routine\n");
567 pm_idle
= default_idle
;
570 void __init
init_c1e_mask(void)
572 /* If we're using c1e_idle, we need to allocate c1e_mask. */
573 if (pm_idle
== c1e_idle
) {
574 alloc_cpumask_var(&c1e_mask
, GFP_KERNEL
);
575 cpumask_clear(c1e_mask
);
579 static int __init
idle_setup(char *str
)
584 if (!strcmp(str
, "poll")) {
585 printk("using polling idle threads.\n");
587 } else if (!strcmp(str
, "mwait"))
589 else if (!strcmp(str
, "halt")) {
591 * When the boot option of idle=halt is added, halt is
592 * forced to be used for CPU idle. In such case CPU C2/C3
593 * won't be used again.
594 * To continue to load the CPU idle driver, don't touch
595 * the boot_option_idle_override.
597 pm_idle
= default_idle
;
600 } else if (!strcmp(str
, "nomwait")) {
602 * If the boot option of "idle=nomwait" is added,
603 * it means that mwait will be disabled for CPU C2/C3
604 * states. In such case it won't touch the variable
605 * of boot_option_idle_override.
612 boot_option_idle_override
= 1;
615 early_param("idle", idle_setup
);
617 unsigned long arch_align_stack(unsigned long sp
)
619 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
620 sp
-= get_random_int() % 8192;
624 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
626 unsigned long range_end
= mm
->brk
+ 0x02000000;
627 return randomize_range(mm
->brk
, range_end
, 0) ? : mm
->brk
;