2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/latency.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
46 * Include the apic definitions for x86 to have the APIC timer related defines
47 * available also for UP (on SMP it gets magically included via linux/smp.h).
48 * asm/acpi.h is not an option, as it would require more include magic. Also
49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
56 #include <asm/uaccess.h>
58 #include <acpi/acpi_bus.h>
59 #include <acpi/processor.h>
61 #define ACPI_PROCESSOR_COMPONENT 0x01000000
62 #define ACPI_PROCESSOR_CLASS "processor"
63 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
64 ACPI_MODULE_NAME("processor_idle");
65 #define ACPI_PROCESSOR_FILE_POWER "power"
66 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68 #ifndef CONFIG_CPU_IDLE
69 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
70 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
71 static void (*pm_idle_save
) (void) __read_mostly
;
73 #define C2_OVERHEAD 1 /* 1us */
74 #define C3_OVERHEAD 1 /* 1us */
76 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
78 static unsigned int max_cstate __read_mostly
= ACPI_PROCESSOR_MAX_POWER
;
79 module_param(max_cstate
, uint
, 0000);
80 static unsigned int nocst __read_mostly
;
81 module_param(nocst
, uint
, 0000);
83 #ifndef CONFIG_CPU_IDLE
85 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
86 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
87 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
88 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
89 * reduce history for more aggressive entry into C3
91 static unsigned int bm_history __read_mostly
=
92 (HZ
>= 800 ? 0xFFFFFFFF : ((1U << (HZ
/ 25)) - 1));
93 module_param(bm_history
, uint
, 0644);
95 static int acpi_processor_set_power_policy(struct acpi_processor
*pr
);
100 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
101 * For now disable this. Probably a bug somewhere else.
103 * To skip this limit, boot/load with a large max_cstate limit.
105 static int set_max_cstate(struct dmi_system_id
*id
)
107 if (max_cstate
> ACPI_PROCESSOR_MAX_POWER
)
110 printk(KERN_NOTICE PREFIX
"%s detected - limiting to C%ld max_cstate."
111 " Override with \"processor.max_cstate=%d\"\n", id
->ident
,
112 (long)id
->driver_data
, ACPI_PROCESSOR_MAX_POWER
+ 1);
114 max_cstate
= (long)id
->driver_data
;
119 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
120 callers to only run once -AK */
121 static struct dmi_system_id __cpuinitdata processor_power_dmi_table
[] = {
122 { set_max_cstate
, "IBM ThinkPad R40e", {
123 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
124 DMI_MATCH(DMI_BIOS_VERSION
,"1SET70WW")}, (void *)1},
125 { set_max_cstate
, "IBM ThinkPad R40e", {
126 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
127 DMI_MATCH(DMI_BIOS_VERSION
,"1SET60WW")}, (void *)1},
128 { set_max_cstate
, "IBM ThinkPad R40e", {
129 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
130 DMI_MATCH(DMI_BIOS_VERSION
,"1SET43WW") }, (void*)1},
131 { set_max_cstate
, "IBM ThinkPad R40e", {
132 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
133 DMI_MATCH(DMI_BIOS_VERSION
,"1SET45WW") }, (void*)1},
134 { set_max_cstate
, "IBM ThinkPad R40e", {
135 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
136 DMI_MATCH(DMI_BIOS_VERSION
,"1SET47WW") }, (void*)1},
137 { set_max_cstate
, "IBM ThinkPad R40e", {
138 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
139 DMI_MATCH(DMI_BIOS_VERSION
,"1SET50WW") }, (void*)1},
140 { set_max_cstate
, "IBM ThinkPad R40e", {
141 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
142 DMI_MATCH(DMI_BIOS_VERSION
,"1SET52WW") }, (void*)1},
143 { set_max_cstate
, "IBM ThinkPad R40e", {
144 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
145 DMI_MATCH(DMI_BIOS_VERSION
,"1SET55WW") }, (void*)1},
146 { set_max_cstate
, "IBM ThinkPad R40e", {
147 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
148 DMI_MATCH(DMI_BIOS_VERSION
,"1SET56WW") }, (void*)1},
149 { set_max_cstate
, "IBM ThinkPad R40e", {
150 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
151 DMI_MATCH(DMI_BIOS_VERSION
,"1SET59WW") }, (void*)1},
152 { set_max_cstate
, "IBM ThinkPad R40e", {
153 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
154 DMI_MATCH(DMI_BIOS_VERSION
,"1SET60WW") }, (void*)1},
155 { set_max_cstate
, "IBM ThinkPad R40e", {
156 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
157 DMI_MATCH(DMI_BIOS_VERSION
,"1SET61WW") }, (void*)1},
158 { set_max_cstate
, "IBM ThinkPad R40e", {
159 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
160 DMI_MATCH(DMI_BIOS_VERSION
,"1SET62WW") }, (void*)1},
161 { set_max_cstate
, "IBM ThinkPad R40e", {
162 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
163 DMI_MATCH(DMI_BIOS_VERSION
,"1SET64WW") }, (void*)1},
164 { set_max_cstate
, "IBM ThinkPad R40e", {
165 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
166 DMI_MATCH(DMI_BIOS_VERSION
,"1SET65WW") }, (void*)1},
167 { set_max_cstate
, "IBM ThinkPad R40e", {
168 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
169 DMI_MATCH(DMI_BIOS_VERSION
,"1SET68WW") }, (void*)1},
170 { set_max_cstate
, "Medion 41700", {
171 DMI_MATCH(DMI_BIOS_VENDOR
,"Phoenix Technologies LTD"),
172 DMI_MATCH(DMI_BIOS_VERSION
,"R01-A1J")}, (void *)1},
173 { set_max_cstate
, "Clevo 5600D", {
174 DMI_MATCH(DMI_BIOS_VENDOR
,"Phoenix Technologies LTD"),
175 DMI_MATCH(DMI_BIOS_VERSION
,"SHE845M0.86C.0013.D.0302131307")},
180 static inline u32
ticks_elapsed(u32 t1
, u32 t2
)
184 else if (!(acpi_gbl_FADT
.flags
& ACPI_FADT_32BIT_TIMER
))
185 return (((0x00FFFFFF - t1
) + t2
) & 0x00FFFFFF);
187 return ((0xFFFFFFFF - t1
) + t2
);
190 static inline u32
ticks_elapsed_in_us(u32 t1
, u32 t2
)
193 return PM_TIMER_TICKS_TO_US(t2
- t1
);
194 else if (!(acpi_gbl_FADT
.flags
& ACPI_FADT_32BIT_TIMER
))
195 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1
) + t2
) & 0x00FFFFFF);
197 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1
) + t2
);
200 #ifndef CONFIG_CPU_IDLE
203 acpi_processor_power_activate(struct acpi_processor
*pr
,
204 struct acpi_processor_cx
*new)
206 struct acpi_processor_cx
*old
;
211 old
= pr
->power
.state
;
214 old
->promotion
.count
= 0;
215 new->demotion
.count
= 0;
217 /* Cleanup from old state. */
221 /* Disable bus master reload */
222 if (new->type
!= ACPI_STATE_C3
&& pr
->flags
.bm_check
)
223 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0);
228 /* Prepare to use new state. */
231 /* Enable bus master reload */
232 if (old
->type
!= ACPI_STATE_C3
&& pr
->flags
.bm_check
)
233 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 1);
237 pr
->power
.state
= new;
242 static void acpi_safe_halt(void)
244 current_thread_info()->status
&= ~TS_POLLING
;
246 * TS_POLLING-cleared state must be visible before we
252 current_thread_info()->status
|= TS_POLLING
;
255 static atomic_t c3_cpu_count
;
257 /* Common C-state entry for C2, C3, .. */
258 static void acpi_cstate_enter(struct acpi_processor_cx
*cstate
)
260 if (cstate
->space_id
== ACPI_CSTATE_FFH
) {
261 /* Call into architectural FFH based C-state */
262 acpi_processor_ffh_cstate_enter(cstate
);
265 /* IO port based C-state */
266 inb(cstate
->address
);
267 /* Dummy wait op - must do something useless after P_LVL2 read
268 because chipsets cannot guarantee that STPCLK# signal
269 gets asserted in time to freeze execution properly. */
270 unused
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
273 #endif /* !CONFIG_CPU_IDLE */
275 #ifdef ARCH_APICTIMER_STOPS_ON_C3
278 * Some BIOS implementations switch to C3 in the published C2 state.
279 * This seems to be a common problem on AMD boxen, but other vendors
280 * are affected too. We pick the most conservative approach: we assume
281 * that the local APIC stops in both C2 and C3.
283 static void acpi_timer_check_state(int state
, struct acpi_processor
*pr
,
284 struct acpi_processor_cx
*cx
)
286 struct acpi_processor_power
*pwr
= &pr
->power
;
287 u8 type
= local_apic_timer_c2_ok
? ACPI_STATE_C3
: ACPI_STATE_C2
;
290 * Check, if one of the previous states already marked the lapic
293 if (pwr
->timer_broadcast_on_state
< state
)
296 if (cx
->type
>= type
)
297 pr
->power
.timer_broadcast_on_state
= state
;
300 static void acpi_propagate_timer_broadcast(struct acpi_processor
*pr
)
302 #ifdef CONFIG_GENERIC_CLOCKEVENTS
303 unsigned long reason
;
305 reason
= pr
->power
.timer_broadcast_on_state
< INT_MAX
?
306 CLOCK_EVT_NOTIFY_BROADCAST_ON
: CLOCK_EVT_NOTIFY_BROADCAST_OFF
;
308 clockevents_notify(reason
, &pr
->id
);
310 cpumask_t mask
= cpumask_of_cpu(pr
->id
);
312 if (pr
->power
.timer_broadcast_on_state
< INT_MAX
)
313 on_each_cpu(switch_APIC_timer_to_ipi
, &mask
, 1, 1);
315 on_each_cpu(switch_ipi_to_APIC_timer
, &mask
, 1, 1);
319 /* Power(C) State timer broadcast control */
320 static void acpi_state_timer_broadcast(struct acpi_processor
*pr
,
321 struct acpi_processor_cx
*cx
,
324 #ifdef CONFIG_GENERIC_CLOCKEVENTS
326 int state
= cx
- pr
->power
.states
;
328 if (state
>= pr
->power
.timer_broadcast_on_state
) {
329 unsigned long reason
;
331 reason
= broadcast
? CLOCK_EVT_NOTIFY_BROADCAST_ENTER
:
332 CLOCK_EVT_NOTIFY_BROADCAST_EXIT
;
333 clockevents_notify(reason
, &pr
->id
);
340 static void acpi_timer_check_state(int state
, struct acpi_processor
*pr
,
341 struct acpi_processor_cx
*cstate
) { }
342 static void acpi_propagate_timer_broadcast(struct acpi_processor
*pr
) { }
343 static void acpi_state_timer_broadcast(struct acpi_processor
*pr
,
344 struct acpi_processor_cx
*cx
,
352 * Suspend / resume control
354 static int acpi_idle_suspend
;
356 int acpi_processor_suspend(struct acpi_device
* device
, pm_message_t state
)
358 acpi_idle_suspend
= 1;
362 int acpi_processor_resume(struct acpi_device
* device
)
364 acpi_idle_suspend
= 0;
368 #ifndef CONFIG_CPU_IDLE
369 static void acpi_processor_idle(void)
371 struct acpi_processor
*pr
= NULL
;
372 struct acpi_processor_cx
*cx
= NULL
;
373 struct acpi_processor_cx
*next_state
= NULL
;
378 * Interrupts must be disabled during bus mastering calculations and
379 * for C2/C3 transitions.
383 pr
= processors
[smp_processor_id()];
390 * Check whether we truly need to go idle, or should
393 if (unlikely(need_resched())) {
398 cx
= pr
->power
.state
;
399 if (!cx
|| acpi_idle_suspend
) {
410 * Check for bus mastering activity (if required), record, and check
413 if (pr
->flags
.bm_check
) {
415 unsigned long diff
= jiffies
- pr
->power
.bm_check_timestamp
;
420 pr
->power
.bm_activity
<<= diff
;
422 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS
, &bm_status
);
424 pr
->power
.bm_activity
|= 0x1;
425 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS
, 1);
428 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
429 * the true state of bus mastering activity; forcing us to
430 * manually check the BMIDEA bit of each IDE channel.
432 else if (errata
.piix4
.bmisx
) {
433 if ((inb_p(errata
.piix4
.bmisx
+ 0x02) & 0x01)
434 || (inb_p(errata
.piix4
.bmisx
+ 0x0A) & 0x01))
435 pr
->power
.bm_activity
|= 0x1;
438 pr
->power
.bm_check_timestamp
= jiffies
;
441 * If bus mastering is or was active this jiffy, demote
442 * to avoid a faulty transition. Note that the processor
443 * won't enter a low-power state during this call (to this
444 * function) but should upon the next.
446 * TBD: A better policy might be to fallback to the demotion
447 * state (use it for this quantum only) istead of
448 * demoting -- and rely on duration as our sole demotion
449 * qualification. This may, however, introduce DMA
450 * issues (e.g. floppy DMA transfer overrun/underrun).
452 if ((pr
->power
.bm_activity
& 0x1) &&
453 cx
->demotion
.threshold
.bm
) {
455 next_state
= cx
->demotion
.state
;
460 #ifdef CONFIG_HOTPLUG_CPU
462 * Check for P_LVL2_UP flag before entering C2 and above on
463 * an SMP system. We do it here instead of doing it at _CST/P_LVL
464 * detection phase, to work cleanly with logical CPU hotplug.
466 if ((cx
->type
!= ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
467 !pr
->flags
.has_cst
&& !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
))
468 cx
= &pr
->power
.states
[ACPI_STATE_C1
];
474 * Invoke the current Cx state to put the processor to sleep.
476 if (cx
->type
== ACPI_STATE_C2
|| cx
->type
== ACPI_STATE_C3
) {
477 current_thread_info()->status
&= ~TS_POLLING
;
479 * TS_POLLING-cleared state must be visible before we
483 if (need_resched()) {
484 current_thread_info()->status
|= TS_POLLING
;
495 * Use the appropriate idle routine, the one that would
496 * be used without acpi C-states.
504 * TBD: Can't get time duration while in C1, as resumes
505 * go to an ISR rather than here. Need to instrument
506 * base interrupt handler.
508 * Note: the TSC better not stop in C1, sched_clock() will
511 sleep_ticks
= 0xFFFFFFFF;
515 /* Get start time (ticks) */
516 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
517 /* Tell the scheduler that we are going deep-idle: */
518 sched_clock_idle_sleep_event();
520 acpi_state_timer_broadcast(pr
, cx
, 1);
521 acpi_cstate_enter(cx
);
522 /* Get end time (ticks) */
523 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
525 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
526 /* TSC halts in C2, so notify users */
527 mark_tsc_unstable("possible TSC halt in C2");
529 /* Compute time (ticks) that we were actually asleep */
530 sleep_ticks
= ticks_elapsed(t1
, t2
);
532 /* Tell the scheduler how much we idled: */
533 sched_clock_idle_wakeup_event(sleep_ticks
*PM_TIMER_TICK_NS
);
535 /* Re-enable interrupts */
537 /* Do not account our idle-switching overhead: */
538 sleep_ticks
-= cx
->latency_ticks
+ C2_OVERHEAD
;
540 current_thread_info()->status
|= TS_POLLING
;
541 acpi_state_timer_broadcast(pr
, cx
, 0);
547 * bm_check implies we need ARB_DIS
548 * !bm_check implies we need cache flush
549 * bm_control implies whether we can do ARB_DIS
551 * That leaves a case where bm_check is set and bm_control is
552 * not set. In that case we cannot do much, we enter C3
553 * without doing anything.
555 if (pr
->flags
.bm_check
&& pr
->flags
.bm_control
) {
556 if (atomic_inc_return(&c3_cpu_count
) ==
559 * All CPUs are trying to go to C3
560 * Disable bus master arbitration
562 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 1);
564 } else if (!pr
->flags
.bm_check
) {
565 /* SMP with no shared cache... Invalidate cache */
566 ACPI_FLUSH_CPU_CACHE();
569 /* Get start time (ticks) */
570 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
572 acpi_state_timer_broadcast(pr
, cx
, 1);
573 /* Tell the scheduler that we are going deep-idle: */
574 sched_clock_idle_sleep_event();
575 acpi_cstate_enter(cx
);
576 /* Get end time (ticks) */
577 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
578 if (pr
->flags
.bm_check
&& pr
->flags
.bm_control
) {
579 /* Enable bus master arbitration */
580 atomic_dec(&c3_cpu_count
);
581 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 0);
584 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
585 /* TSC halts in C3, so notify users */
586 mark_tsc_unstable("TSC halts in C3");
588 /* Compute time (ticks) that we were actually asleep */
589 sleep_ticks
= ticks_elapsed(t1
, t2
);
590 /* Tell the scheduler how much we idled: */
591 sched_clock_idle_wakeup_event(sleep_ticks
*PM_TIMER_TICK_NS
);
593 /* Re-enable interrupts */
595 /* Do not account our idle-switching overhead: */
596 sleep_ticks
-= cx
->latency_ticks
+ C3_OVERHEAD
;
598 current_thread_info()->status
|= TS_POLLING
;
599 acpi_state_timer_broadcast(pr
, cx
, 0);
607 if ((cx
->type
!= ACPI_STATE_C1
) && (sleep_ticks
> 0))
608 cx
->time
+= sleep_ticks
;
610 next_state
= pr
->power
.state
;
612 #ifdef CONFIG_HOTPLUG_CPU
613 /* Don't do promotion/demotion */
614 if ((cx
->type
== ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
615 !pr
->flags
.has_cst
&& !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
)) {
624 * Track the number of longs (time asleep is greater than threshold)
625 * and promote when the count threshold is reached. Note that bus
626 * mastering activity may prevent promotions.
627 * Do not promote above max_cstate.
629 if (cx
->promotion
.state
&&
630 ((cx
->promotion
.state
- pr
->power
.states
) <= max_cstate
)) {
631 if (sleep_ticks
> cx
->promotion
.threshold
.ticks
&&
632 cx
->promotion
.state
->latency
<= system_latency_constraint()) {
633 cx
->promotion
.count
++;
634 cx
->demotion
.count
= 0;
635 if (cx
->promotion
.count
>=
636 cx
->promotion
.threshold
.count
) {
637 if (pr
->flags
.bm_check
) {
639 (pr
->power
.bm_activity
& cx
->
640 promotion
.threshold
.bm
)) {
646 next_state
= cx
->promotion
.state
;
656 * Track the number of shorts (time asleep is less than time threshold)
657 * and demote when the usage threshold is reached.
659 if (cx
->demotion
.state
) {
660 if (sleep_ticks
< cx
->demotion
.threshold
.ticks
) {
661 cx
->demotion
.count
++;
662 cx
->promotion
.count
= 0;
663 if (cx
->demotion
.count
>= cx
->demotion
.threshold
.count
) {
664 next_state
= cx
->demotion
.state
;
672 * Demote if current state exceeds max_cstate
673 * or if the latency of the current state is unacceptable
675 if ((pr
->power
.state
- pr
->power
.states
) > max_cstate
||
676 pr
->power
.state
->latency
> system_latency_constraint()) {
677 if (cx
->demotion
.state
)
678 next_state
= cx
->demotion
.state
;
684 * If we're going to start using a new Cx state we must clean up
685 * from the previous and prepare to use the new.
687 if (next_state
!= pr
->power
.state
)
688 acpi_processor_power_activate(pr
, next_state
);
691 static int acpi_processor_set_power_policy(struct acpi_processor
*pr
)
694 unsigned int state_is_set
= 0;
695 struct acpi_processor_cx
*lower
= NULL
;
696 struct acpi_processor_cx
*higher
= NULL
;
697 struct acpi_processor_cx
*cx
;
704 * This function sets the default Cx state policy (OS idle handler).
705 * Our scheme is to promote quickly to C2 but more conservatively
706 * to C3. We're favoring C2 for its characteristics of low latency
707 * (quick response), good power savings, and ability to allow bus
708 * mastering activity. Note that the Cx state policy is completely
709 * customizable and can be altered dynamically.
713 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
714 cx
= &pr
->power
.states
[i
];
719 pr
->power
.state
= cx
;
728 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
729 cx
= &pr
->power
.states
[i
];
734 cx
->demotion
.state
= lower
;
735 cx
->demotion
.threshold
.ticks
= cx
->latency_ticks
;
736 cx
->demotion
.threshold
.count
= 1;
737 if (cx
->type
== ACPI_STATE_C3
)
738 cx
->demotion
.threshold
.bm
= bm_history
;
745 for (i
= (ACPI_PROCESSOR_MAX_POWER
- 1); i
> 0; i
--) {
746 cx
= &pr
->power
.states
[i
];
751 cx
->promotion
.state
= higher
;
752 cx
->promotion
.threshold
.ticks
= cx
->latency_ticks
;
753 if (cx
->type
>= ACPI_STATE_C2
)
754 cx
->promotion
.threshold
.count
= 4;
756 cx
->promotion
.threshold
.count
= 10;
757 if (higher
->type
== ACPI_STATE_C3
)
758 cx
->promotion
.threshold
.bm
= bm_history
;
766 #endif /* !CONFIG_CPU_IDLE */
768 static int acpi_processor_get_power_info_fadt(struct acpi_processor
*pr
)
777 /* if info is obtained from pblk/fadt, type equals state */
778 pr
->power
.states
[ACPI_STATE_C2
].type
= ACPI_STATE_C2
;
779 pr
->power
.states
[ACPI_STATE_C3
].type
= ACPI_STATE_C3
;
781 #ifndef CONFIG_HOTPLUG_CPU
783 * Check for P_LVL2_UP flag before entering C2 and above on
786 if ((num_online_cpus() > 1) &&
787 !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
))
791 /* determine C2 and C3 address from pblk */
792 pr
->power
.states
[ACPI_STATE_C2
].address
= pr
->pblk
+ 4;
793 pr
->power
.states
[ACPI_STATE_C3
].address
= pr
->pblk
+ 5;
795 /* determine latencies from FADT */
796 pr
->power
.states
[ACPI_STATE_C2
].latency
= acpi_gbl_FADT
.C2latency
;
797 pr
->power
.states
[ACPI_STATE_C3
].latency
= acpi_gbl_FADT
.C3latency
;
799 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
800 "lvl2[0x%08x] lvl3[0x%08x]\n",
801 pr
->power
.states
[ACPI_STATE_C2
].address
,
802 pr
->power
.states
[ACPI_STATE_C3
].address
));
807 static int acpi_processor_get_power_info_default(struct acpi_processor
*pr
)
809 if (!pr
->power
.states
[ACPI_STATE_C1
].valid
) {
810 /* set the first C-State to C1 */
811 /* all processors need to support C1 */
812 pr
->power
.states
[ACPI_STATE_C1
].type
= ACPI_STATE_C1
;
813 pr
->power
.states
[ACPI_STATE_C1
].valid
= 1;
815 /* the C0 state only exists as a filler in our array */
816 pr
->power
.states
[ACPI_STATE_C0
].valid
= 1;
820 static int acpi_processor_get_power_info_cst(struct acpi_processor
*pr
)
822 acpi_status status
= 0;
826 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
827 union acpi_object
*cst
;
835 status
= acpi_evaluate_object(pr
->handle
, "_CST", NULL
, &buffer
);
836 if (ACPI_FAILURE(status
)) {
837 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "No _CST, giving up\n"));
841 cst
= buffer
.pointer
;
843 /* There must be at least 2 elements */
844 if (!cst
|| (cst
->type
!= ACPI_TYPE_PACKAGE
) || cst
->package
.count
< 2) {
845 printk(KERN_ERR PREFIX
"not enough elements in _CST\n");
850 count
= cst
->package
.elements
[0].integer
.value
;
852 /* Validate number of power states. */
853 if (count
< 1 || count
!= cst
->package
.count
- 1) {
854 printk(KERN_ERR PREFIX
"count given by _CST is not valid\n");
859 /* Tell driver that at least _CST is supported. */
860 pr
->flags
.has_cst
= 1;
862 for (i
= 1; i
<= count
; i
++) {
863 union acpi_object
*element
;
864 union acpi_object
*obj
;
865 struct acpi_power_register
*reg
;
866 struct acpi_processor_cx cx
;
868 memset(&cx
, 0, sizeof(cx
));
870 element
= &(cst
->package
.elements
[i
]);
871 if (element
->type
!= ACPI_TYPE_PACKAGE
)
874 if (element
->package
.count
!= 4)
877 obj
= &(element
->package
.elements
[0]);
879 if (obj
->type
!= ACPI_TYPE_BUFFER
)
882 reg
= (struct acpi_power_register
*)obj
->buffer
.pointer
;
884 if (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_IO
&&
885 (reg
->space_id
!= ACPI_ADR_SPACE_FIXED_HARDWARE
))
888 /* There should be an easy way to extract an integer... */
889 obj
= &(element
->package
.elements
[1]);
890 if (obj
->type
!= ACPI_TYPE_INTEGER
)
893 cx
.type
= obj
->integer
.value
;
895 * Some buggy BIOSes won't list C1 in _CST -
896 * Let acpi_processor_get_power_info_default() handle them later
898 if (i
== 1 && cx
.type
!= ACPI_STATE_C1
)
901 cx
.address
= reg
->address
;
902 cx
.index
= current_count
+ 1;
904 cx
.space_id
= ACPI_CSTATE_SYSTEMIO
;
905 if (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
) {
906 if (acpi_processor_ffh_cstate_probe
907 (pr
->id
, &cx
, reg
) == 0) {
908 cx
.space_id
= ACPI_CSTATE_FFH
;
909 } else if (cx
.type
!= ACPI_STATE_C1
) {
911 * C1 is a special case where FIXED_HARDWARE
912 * can be handled in non-MWAIT way as well.
913 * In that case, save this _CST entry info.
914 * That is, we retain space_id of SYSTEM_IO for
916 * Otherwise, ignore this info and continue.
922 obj
= &(element
->package
.elements
[2]);
923 if (obj
->type
!= ACPI_TYPE_INTEGER
)
926 cx
.latency
= obj
->integer
.value
;
928 obj
= &(element
->package
.elements
[3]);
929 if (obj
->type
!= ACPI_TYPE_INTEGER
)
932 cx
.power
= obj
->integer
.value
;
935 memcpy(&(pr
->power
.states
[current_count
]), &cx
, sizeof(cx
));
938 * We support total ACPI_PROCESSOR_MAX_POWER - 1
939 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
941 if (current_count
>= (ACPI_PROCESSOR_MAX_POWER
- 1)) {
943 "Limiting number of power states to max (%d)\n",
944 ACPI_PROCESSOR_MAX_POWER
);
946 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
951 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found %d power states\n",
954 /* Validate number of power states discovered */
955 if (current_count
< 2)
959 kfree(buffer
.pointer
);
964 static void acpi_processor_power_verify_c2(struct acpi_processor_cx
*cx
)
971 * C2 latency must be less than or equal to 100
974 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C2_LATENCY
) {
975 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
976 "latency too large [%d]\n", cx
->latency
));
981 * Otherwise we've met all of our C2 requirements.
982 * Normalize the C2 latency to expidite policy
986 #ifndef CONFIG_CPU_IDLE
987 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
989 cx
->latency_ticks
= cx
->latency
;
995 static void acpi_processor_power_verify_c3(struct acpi_processor
*pr
,
996 struct acpi_processor_cx
*cx
)
998 static int bm_check_flag
;
1005 * C3 latency must be less than or equal to 1000
1008 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C3_LATENCY
) {
1009 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1010 "latency too large [%d]\n", cx
->latency
));
1015 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1016 * DMA transfers are used by any ISA device to avoid livelock.
1017 * Note that we could disable Type-F DMA (as recommended by
1018 * the erratum), but this is known to disrupt certain ISA
1019 * devices thus we take the conservative approach.
1021 else if (errata
.piix4
.fdma
) {
1022 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1023 "C3 not supported on PIIX4 with Type-F DMA\n"));
1027 /* All the logic here assumes flags.bm_check is same across all CPUs */
1028 if (!bm_check_flag
) {
1029 /* Determine whether bm_check is needed based on CPU */
1030 acpi_processor_power_init_bm_check(&(pr
->flags
), pr
->id
);
1031 bm_check_flag
= pr
->flags
.bm_check
;
1033 pr
->flags
.bm_check
= bm_check_flag
;
1036 if (pr
->flags
.bm_check
) {
1037 if (!pr
->flags
.bm_control
) {
1038 if (pr
->flags
.has_cst
!= 1) {
1039 /* bus mastering control is necessary */
1040 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1041 "C3 support requires BM control\n"));
1044 /* Here we enter C3 without bus mastering */
1045 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1046 "C3 support without BM control\n"));
1051 * WBINVD should be set in fadt, for C3 state to be
1052 * supported on when bm_check is not required.
1054 if (!(acpi_gbl_FADT
.flags
& ACPI_FADT_WBINVD
)) {
1055 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1056 "Cache invalidation should work properly"
1057 " for C3 to be enabled on SMP systems\n"));
1060 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0);
1064 * Otherwise we've met all of our C3 requirements.
1065 * Normalize the C3 latency to expidite policy. Enable
1066 * checking of bus mastering status (bm_check) so we can
1067 * use this in our C3 policy
1071 #ifndef CONFIG_CPU_IDLE
1072 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
1074 cx
->latency_ticks
= cx
->latency
;
1080 static int acpi_processor_power_verify(struct acpi_processor
*pr
)
1083 unsigned int working
= 0;
1085 pr
->power
.timer_broadcast_on_state
= INT_MAX
;
1087 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
1088 struct acpi_processor_cx
*cx
= &pr
->power
.states
[i
];
1096 acpi_processor_power_verify_c2(cx
);
1098 acpi_timer_check_state(i
, pr
, cx
);
1102 acpi_processor_power_verify_c3(pr
, cx
);
1104 acpi_timer_check_state(i
, pr
, cx
);
1112 acpi_propagate_timer_broadcast(pr
);
1117 static int acpi_processor_get_power_info(struct acpi_processor
*pr
)
1123 /* NOTE: the idle thread may not be running while calling
1126 /* Zero initialize all the C-states info. */
1127 memset(pr
->power
.states
, 0, sizeof(pr
->power
.states
));
1129 result
= acpi_processor_get_power_info_cst(pr
);
1130 if (result
== -ENODEV
)
1131 result
= acpi_processor_get_power_info_fadt(pr
);
1136 acpi_processor_get_power_info_default(pr
);
1138 pr
->power
.count
= acpi_processor_power_verify(pr
);
1140 #ifndef CONFIG_CPU_IDLE
1142 * Set Default Policy
1143 * ------------------
1144 * Now that we know which states are supported, set the default
1145 * policy. Note that this policy can be changed dynamically
1146 * (e.g. encourage deeper sleeps to conserve battery life when
1149 result
= acpi_processor_set_power_policy(pr
);
1155 * if one state of type C2 or C3 is available, mark this
1156 * CPU as being "idle manageable"
1158 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
1159 if (pr
->power
.states
[i
].valid
) {
1160 pr
->power
.count
= i
;
1161 if (pr
->power
.states
[i
].type
>= ACPI_STATE_C2
)
1162 pr
->flags
.power
= 1;
1169 static int acpi_processor_power_seq_show(struct seq_file
*seq
, void *offset
)
1171 struct acpi_processor
*pr
= seq
->private;
1178 seq_printf(seq
, "active state: C%zd\n"
1180 "bus master activity: %08x\n"
1181 "maximum allowed latency: %d usec\n",
1182 pr
->power
.state
? pr
->power
.state
- pr
->power
.states
: 0,
1183 max_cstate
, (unsigned)pr
->power
.bm_activity
,
1184 system_latency_constraint());
1186 seq_puts(seq
, "states:\n");
1188 for (i
= 1; i
<= pr
->power
.count
; i
++) {
1189 seq_printf(seq
, " %cC%d: ",
1190 (&pr
->power
.states
[i
] ==
1191 pr
->power
.state
? '*' : ' '), i
);
1193 if (!pr
->power
.states
[i
].valid
) {
1194 seq_puts(seq
, "<not supported>\n");
1198 switch (pr
->power
.states
[i
].type
) {
1200 seq_printf(seq
, "type[C1] ");
1203 seq_printf(seq
, "type[C2] ");
1206 seq_printf(seq
, "type[C3] ");
1209 seq_printf(seq
, "type[--] ");
1213 if (pr
->power
.states
[i
].promotion
.state
)
1214 seq_printf(seq
, "promotion[C%zd] ",
1215 (pr
->power
.states
[i
].promotion
.state
-
1218 seq_puts(seq
, "promotion[--] ");
1220 if (pr
->power
.states
[i
].demotion
.state
)
1221 seq_printf(seq
, "demotion[C%zd] ",
1222 (pr
->power
.states
[i
].demotion
.state
-
1225 seq_puts(seq
, "demotion[--] ");
1227 seq_printf(seq
, "latency[%03d] usage[%08d] duration[%020llu]\n",
1228 pr
->power
.states
[i
].latency
,
1229 pr
->power
.states
[i
].usage
,
1230 (unsigned long long)pr
->power
.states
[i
].time
);
1237 static int acpi_processor_power_open_fs(struct inode
*inode
, struct file
*file
)
1239 return single_open(file
, acpi_processor_power_seq_show
,
1243 static const struct file_operations acpi_processor_power_fops
= {
1244 .open
= acpi_processor_power_open_fs
,
1246 .llseek
= seq_lseek
,
1247 .release
= single_release
,
1250 #ifndef CONFIG_CPU_IDLE
1252 int acpi_processor_cst_has_changed(struct acpi_processor
*pr
)
1264 if (!pr
->flags
.power_setup_done
)
1267 /* Fall back to the default idle loop */
1268 pm_idle
= pm_idle_save
;
1269 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1271 pr
->flags
.power
= 0;
1272 result
= acpi_processor_get_power_info(pr
);
1273 if ((pr
->flags
.power
== 1) && (pr
->flags
.power_setup_done
))
1274 pm_idle
= acpi_processor_idle
;
1280 static void smp_callback(void *v
)
1282 /* we already woke the CPU up, nothing more to do */
1286 * This function gets called when a part of the kernel has a new latency
1287 * requirement. This means we need to get all processors out of their C-state,
1288 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1289 * wakes them all right up.
1291 static int acpi_processor_latency_notify(struct notifier_block
*b
,
1292 unsigned long l
, void *v
)
1294 smp_call_function(smp_callback
, NULL
, 0, 1);
1298 static struct notifier_block acpi_processor_latency_notifier
= {
1299 .notifier_call
= acpi_processor_latency_notify
,
1304 #else /* CONFIG_CPU_IDLE */
1307 * acpi_idle_bm_check - checks if bus master activity was detected
1309 static int acpi_idle_bm_check(void)
1313 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS
, &bm_status
);
1315 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS
, 1);
1317 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1318 * the true state of bus mastering activity; forcing us to
1319 * manually check the BMIDEA bit of each IDE channel.
1321 else if (errata
.piix4
.bmisx
) {
1322 if ((inb_p(errata
.piix4
.bmisx
+ 0x02) & 0x01)
1323 || (inb_p(errata
.piix4
.bmisx
+ 0x0A) & 0x01))
1330 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1331 * @pr: the processor
1332 * @target: the new target state
1334 static inline void acpi_idle_update_bm_rld(struct acpi_processor
*pr
,
1335 struct acpi_processor_cx
*target
)
1337 if (pr
->flags
.bm_rld_set
&& target
->type
!= ACPI_STATE_C3
) {
1338 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0);
1339 pr
->flags
.bm_rld_set
= 0;
1342 if (!pr
->flags
.bm_rld_set
&& target
->type
== ACPI_STATE_C3
) {
1343 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 1);
1344 pr
->flags
.bm_rld_set
= 1;
1349 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1352 static inline void acpi_idle_do_entry(struct acpi_processor_cx
*cx
)
1354 if (cx
->space_id
== ACPI_CSTATE_FFH
) {
1355 /* Call into architectural FFH based C-state */
1356 acpi_processor_ffh_cstate_enter(cx
);
1359 /* IO port based C-state */
1361 /* Dummy wait op - must do something useless after P_LVL2 read
1362 because chipsets cannot guarantee that STPCLK# signal
1363 gets asserted in time to freeze execution properly. */
1364 unused
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1369 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1370 * @dev: the target CPU
1371 * @state: the state data
1373 * This is equivalent to the HALT instruction.
1375 static int acpi_idle_enter_c1(struct cpuidle_device
*dev
,
1376 struct cpuidle_state
*state
)
1378 struct acpi_processor
*pr
;
1379 struct acpi_processor_cx
*cx
= cpuidle_get_statedata(state
);
1380 pr
= processors
[smp_processor_id()];
1385 if (pr
->flags
.bm_check
)
1386 acpi_idle_update_bm_rld(pr
, cx
);
1388 current_thread_info()->status
&= ~TS_POLLING
;
1390 * TS_POLLING-cleared state must be visible before we test
1394 if (!need_resched())
1396 current_thread_info()->status
|= TS_POLLING
;
1404 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1405 * @dev: the target CPU
1406 * @state: the state data
1408 static int acpi_idle_enter_simple(struct cpuidle_device
*dev
,
1409 struct cpuidle_state
*state
)
1411 struct acpi_processor
*pr
;
1412 struct acpi_processor_cx
*cx
= cpuidle_get_statedata(state
);
1414 pr
= processors
[smp_processor_id()];
1419 if (acpi_idle_suspend
)
1420 return(acpi_idle_enter_c1(dev
, state
));
1422 if (pr
->flags
.bm_check
)
1423 acpi_idle_update_bm_rld(pr
, cx
);
1425 local_irq_disable();
1426 current_thread_info()->status
&= ~TS_POLLING
;
1428 * TS_POLLING-cleared state must be visible before we test
1433 if (unlikely(need_resched())) {
1434 current_thread_info()->status
|= TS_POLLING
;
1439 if (cx
->type
== ACPI_STATE_C3
)
1440 ACPI_FLUSH_CPU_CACHE();
1442 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1443 acpi_state_timer_broadcast(pr
, cx
, 1);
1444 acpi_idle_do_entry(cx
);
1445 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1447 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1448 /* TSC could halt in idle, so notify users */
1449 mark_tsc_unstable("TSC halts in idle");;
1453 current_thread_info()->status
|= TS_POLLING
;
1457 acpi_state_timer_broadcast(pr
, cx
, 0);
1458 cx
->time
+= ticks_elapsed(t1
, t2
);
1459 return ticks_elapsed_in_us(t1
, t2
);
1462 static int c3_cpu_count
;
1463 static DEFINE_SPINLOCK(c3_lock
);
1466 * acpi_idle_enter_bm - enters C3 with proper BM handling
1467 * @dev: the target CPU
1468 * @state: the state data
1470 * If BM is detected, the deepest non-C3 idle state is entered instead.
1472 static int acpi_idle_enter_bm(struct cpuidle_device
*dev
,
1473 struct cpuidle_state
*state
)
1475 struct acpi_processor
*pr
;
1476 struct acpi_processor_cx
*cx
= cpuidle_get_statedata(state
);
1478 pr
= processors
[smp_processor_id()];
1483 if (acpi_idle_suspend
)
1484 return(acpi_idle_enter_c1(dev
, state
));
1486 local_irq_disable();
1487 current_thread_info()->status
&= ~TS_POLLING
;
1489 * TS_POLLING-cleared state must be visible before we test
1494 if (unlikely(need_resched())) {
1495 current_thread_info()->status
|= TS_POLLING
;
1501 * Must be done before busmaster disable as we might need to
1504 acpi_state_timer_broadcast(pr
, cx
, 1);
1506 if (acpi_idle_bm_check()) {
1507 cx
= pr
->power
.bm_state
;
1509 acpi_idle_update_bm_rld(pr
, cx
);
1511 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1512 acpi_idle_do_entry(cx
);
1513 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1515 acpi_idle_update_bm_rld(pr
, cx
);
1517 spin_lock(&c3_lock
);
1519 /* Disable bus master arbitration when all CPUs are in C3 */
1520 if (c3_cpu_count
== num_online_cpus())
1521 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 1);
1522 spin_unlock(&c3_lock
);
1524 t1
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1525 acpi_idle_do_entry(cx
);
1526 t2
= inl(acpi_gbl_FADT
.xpm_timer_block
.address
);
1528 spin_lock(&c3_lock
);
1529 /* Re-enable bus master arbitration */
1530 if (c3_cpu_count
== num_online_cpus())
1531 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 0);
1533 spin_unlock(&c3_lock
);
1536 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1537 /* TSC could halt in idle, so notify users */
1538 mark_tsc_unstable("TSC halts in idle");
1542 current_thread_info()->status
|= TS_POLLING
;
1546 acpi_state_timer_broadcast(pr
, cx
, 0);
1547 cx
->time
+= ticks_elapsed(t1
, t2
);
1548 return ticks_elapsed_in_us(t1
, t2
);
1551 struct cpuidle_driver acpi_idle_driver
= {
1552 .name
= "acpi_idle",
1553 .owner
= THIS_MODULE
,
1557 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1558 * @pr: the ACPI processor
1560 static int acpi_processor_setup_cpuidle(struct acpi_processor
*pr
)
1563 struct acpi_processor_cx
*cx
;
1564 struct cpuidle_state
*state
;
1565 struct cpuidle_device
*dev
= &pr
->power
.dev
;
1567 if (!pr
->flags
.power_setup_done
)
1570 if (pr
->flags
.power
== 0) {
1574 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
&& i
<= max_cstate
; i
++) {
1575 cx
= &pr
->power
.states
[i
];
1576 state
= &dev
->states
[count
];
1581 #ifdef CONFIG_HOTPLUG_CPU
1582 if ((cx
->type
!= ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
1583 !pr
->flags
.has_cst
&&
1584 !(acpi_gbl_FADT
.flags
& ACPI_FADT_C2_MP_SUPPORTED
))
1587 cpuidle_set_statedata(state
, cx
);
1589 snprintf(state
->name
, CPUIDLE_NAME_LEN
, "C%d", i
);
1590 state
->exit_latency
= cx
->latency
;
1591 state
->target_residency
= cx
->latency
* 6;
1592 state
->power_usage
= cx
->power
;
1597 state
->flags
|= CPUIDLE_FLAG_SHALLOW
;
1598 state
->enter
= acpi_idle_enter_c1
;
1602 state
->flags
|= CPUIDLE_FLAG_BALANCED
;
1603 state
->flags
|= CPUIDLE_FLAG_TIME_VALID
;
1604 state
->enter
= acpi_idle_enter_simple
;
1608 state
->flags
|= CPUIDLE_FLAG_DEEP
;
1609 state
->flags
|= CPUIDLE_FLAG_TIME_VALID
;
1610 state
->flags
|= CPUIDLE_FLAG_CHECK_BM
;
1611 state
->enter
= pr
->flags
.bm_check
?
1612 acpi_idle_enter_bm
:
1613 acpi_idle_enter_simple
;
1620 dev
->state_count
= count
;
1625 /* find the deepest state that can handle active BM */
1626 if (pr
->flags
.bm_check
) {
1627 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
&& i
<= max_cstate
; i
++)
1628 if (pr
->power
.states
[i
].type
== ACPI_STATE_C3
)
1630 pr
->power
.bm_state
= &pr
->power
.states
[i
-1];
1636 int acpi_processor_cst_has_changed(struct acpi_processor
*pr
)
1647 if (!pr
->flags
.power_setup_done
)
1650 cpuidle_pause_and_lock();
1651 cpuidle_disable_device(&pr
->power
.dev
);
1652 acpi_processor_get_power_info(pr
);
1653 acpi_processor_setup_cpuidle(pr
);
1654 ret
= cpuidle_enable_device(&pr
->power
.dev
);
1655 cpuidle_resume_and_unlock();
1660 #endif /* CONFIG_CPU_IDLE */
1662 int __cpuinit
acpi_processor_power_init(struct acpi_processor
*pr
,
1663 struct acpi_device
*device
)
1665 acpi_status status
= 0;
1666 static int first_run
;
1667 struct proc_dir_entry
*entry
= NULL
;
1672 dmi_check_system(processor_power_dmi_table
);
1673 if (max_cstate
< ACPI_C_STATES_MAX
)
1675 "ACPI: processor limited to max C-state %d\n",
1678 #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP)
1679 register_latency_notifier(&acpi_processor_latency_notifier
);
1686 if (acpi_gbl_FADT
.cst_control
&& !nocst
) {
1688 acpi_os_write_port(acpi_gbl_FADT
.smi_command
, acpi_gbl_FADT
.cst_control
, 8);
1689 if (ACPI_FAILURE(status
)) {
1690 ACPI_EXCEPTION((AE_INFO
, status
,
1691 "Notifying BIOS of _CST ability failed"));
1695 acpi_processor_get_power_info(pr
);
1696 pr
->flags
.power_setup_done
= 1;
1699 * Install the idle handler if processor power management is supported.
1700 * Note that we use previously set idle handler will be used on
1701 * platforms that only support C1.
1703 if ((pr
->flags
.power
) && (!boot_option_idle_override
)) {
1704 #ifdef CONFIG_CPU_IDLE
1705 acpi_processor_setup_cpuidle(pr
);
1706 pr
->power
.dev
.cpu
= pr
->id
;
1707 if (cpuidle_register_device(&pr
->power
.dev
))
1711 printk(KERN_INFO PREFIX
"CPU%d (power states:", pr
->id
);
1712 for (i
= 1; i
<= pr
->power
.count
; i
++)
1713 if (pr
->power
.states
[i
].valid
)
1714 printk(" C%d[C%d]", i
,
1715 pr
->power
.states
[i
].type
);
1718 #ifndef CONFIG_CPU_IDLE
1720 pm_idle_save
= pm_idle
;
1721 pm_idle
= acpi_processor_idle
;
1727 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
1728 S_IRUGO
, acpi_device_dir(device
));
1732 entry
->proc_fops
= &acpi_processor_power_fops
;
1733 entry
->data
= acpi_driver_data(device
);
1734 entry
->owner
= THIS_MODULE
;
1740 int acpi_processor_power_exit(struct acpi_processor
*pr
,
1741 struct acpi_device
*device
)
1743 #ifdef CONFIG_CPU_IDLE
1744 if ((pr
->flags
.power
) && (!boot_option_idle_override
))
1745 cpuidle_unregister_device(&pr
->power
.dev
);
1747 pr
->flags
.power_setup_done
= 0;
1749 if (acpi_device_dir(device
))
1750 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
1751 acpi_device_dir(device
));
1753 #ifndef CONFIG_CPU_IDLE
1755 /* Unregister the idle handler when processor #0 is removed. */
1757 pm_idle
= pm_idle_save
;
1760 * We are about to unload the current idle thread pm callback
1761 * (pm_idle), Wait for all processors to update cached/local
1762 * copies of pm_idle before proceeding.
1766 unregister_latency_notifier(&acpi_processor_latency_notifier
);