2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
43 #include <asm/uaccess.h>
45 #include <acpi/acpi_bus.h>
46 #include <acpi/processor.h>
48 #define ACPI_PROCESSOR_COMPONENT 0x01000000
49 #define ACPI_PROCESSOR_CLASS "processor"
50 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
51 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
52 ACPI_MODULE_NAME("acpi_processor")
53 #define ACPI_PROCESSOR_FILE_POWER "power"
54 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
55 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
56 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
57 static void (*pm_idle_save
) (void);
58 module_param(max_cstate
, uint
, 0644);
60 static unsigned int nocst
= 0;
61 module_param(nocst
, uint
, 0000);
64 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
65 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
66 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
67 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
68 * reduce history for more aggressive entry into C3
70 static unsigned int bm_history
=
71 (HZ
>= 800 ? 0xFFFFFFFF : ((1U << (HZ
/ 25)) - 1));
72 module_param(bm_history
, uint
, 0644);
73 /* --------------------------------------------------------------------------
75 -------------------------------------------------------------------------- */
78 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
79 * For now disable this. Probably a bug somewhere else.
81 * To skip this limit, boot/load with a large max_cstate limit.
83 static int set_max_cstate(struct dmi_system_id
*id
)
85 if (max_cstate
> ACPI_PROCESSOR_MAX_POWER
)
88 printk(KERN_NOTICE PREFIX
"%s detected - limiting to C%ld max_cstate."
89 " Override with \"processor.max_cstate=%d\"\n", id
->ident
,
90 (long)id
->driver_data
, ACPI_PROCESSOR_MAX_POWER
+ 1);
92 max_cstate
= (long)id
->driver_data
;
97 static struct dmi_system_id __initdata processor_power_dmi_table
[] = {
98 {set_max_cstate
, "IBM ThinkPad R40e", {
99 DMI_MATCH(DMI_BIOS_VENDOR
,
101 DMI_MATCH(DMI_BIOS_VERSION
,
104 {set_max_cstate
, "Medion 41700", {
105 DMI_MATCH(DMI_BIOS_VENDOR
,
106 "Phoenix Technologies LTD"),
107 DMI_MATCH(DMI_BIOS_VERSION
,
108 "R01-A1J")}, (void *)1},
109 {set_max_cstate
, "Clevo 5600D", {
110 DMI_MATCH(DMI_BIOS_VENDOR
,
111 "Phoenix Technologies LTD"),
112 DMI_MATCH(DMI_BIOS_VERSION
,
113 "SHE845M0.86C.0013.D.0302131307")},
118 static inline u32
ticks_elapsed(u32 t1
, u32 t2
)
122 else if (!acpi_fadt
.tmr_val_ext
)
123 return (((0x00FFFFFF - t1
) + t2
) & 0x00FFFFFF);
125 return ((0xFFFFFFFF - t1
) + t2
);
129 acpi_processor_power_activate(struct acpi_processor
*pr
,
130 struct acpi_processor_cx
*new)
132 struct acpi_processor_cx
*old
;
137 old
= pr
->power
.state
;
140 old
->promotion
.count
= 0;
141 new->demotion
.count
= 0;
143 /* Cleanup from old state. */
147 /* Disable bus master reload */
148 if (new->type
!= ACPI_STATE_C3
&& pr
->flags
.bm_check
)
149 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0,
150 ACPI_MTX_DO_NOT_LOCK
);
155 /* Prepare to use new state. */
158 /* Enable bus master reload */
159 if (old
->type
!= ACPI_STATE_C3
&& pr
->flags
.bm_check
)
160 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 1,
161 ACPI_MTX_DO_NOT_LOCK
);
165 pr
->power
.state
= new;
170 static void acpi_safe_halt(void)
172 int polling
= test_thread_flag(TIF_POLLING_NRFLAG
);
174 clear_thread_flag(TIF_POLLING_NRFLAG
);
175 smp_mb__after_clear_bit();
180 set_thread_flag(TIF_POLLING_NRFLAG
);
183 static atomic_t c3_cpu_count
;
185 static void acpi_processor_idle(void)
187 struct acpi_processor
*pr
= NULL
;
188 struct acpi_processor_cx
*cx
= NULL
;
189 struct acpi_processor_cx
*next_state
= NULL
;
193 pr
= processors
[smp_processor_id()];
198 * Interrupts must be disabled during bus mastering calculations and
199 * for C2/C3 transitions.
204 * Check whether we truly need to go idle, or should
207 if (unlikely(need_resched())) {
212 cx
= pr
->power
.state
;
224 * Check for bus mastering activity (if required), record, and check
227 if (pr
->flags
.bm_check
) {
229 unsigned long diff
= jiffies
- pr
->power
.bm_check_timestamp
;
235 /* if we didn't get called, assume there was busmaster activity */
238 pr
->power
.bm_activity
|= 0x1;
239 pr
->power
.bm_activity
<<= 1;
242 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS
,
243 &bm_status
, ACPI_MTX_DO_NOT_LOCK
);
245 pr
->power
.bm_activity
++;
246 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS
,
247 1, ACPI_MTX_DO_NOT_LOCK
);
250 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
251 * the true state of bus mastering activity; forcing us to
252 * manually check the BMIDEA bit of each IDE channel.
254 else if (errata
.piix4
.bmisx
) {
255 if ((inb_p(errata
.piix4
.bmisx
+ 0x02) & 0x01)
256 || (inb_p(errata
.piix4
.bmisx
+ 0x0A) & 0x01))
257 pr
->power
.bm_activity
++;
260 pr
->power
.bm_check_timestamp
= jiffies
;
263 * Apply bus mastering demotion policy. Automatically demote
264 * to avoid a faulty transition. Note that the processor
265 * won't enter a low-power state during this call (to this
266 * funciton) but should upon the next.
268 * TBD: A better policy might be to fallback to the demotion
269 * state (use it for this quantum only) istead of
270 * demoting -- and rely on duration as our sole demotion
271 * qualification. This may, however, introduce DMA
272 * issues (e.g. floppy DMA transfer overrun/underrun).
274 if (pr
->power
.bm_activity
& cx
->demotion
.threshold
.bm
) {
276 next_state
= cx
->demotion
.state
;
283 #ifdef CONFIG_HOTPLUG_CPU
285 * Check for P_LVL2_UP flag before entering C2 and above on
286 * an SMP system. We do it here instead of doing it at _CST/P_LVL
287 * detection phase, to work cleanly with logical CPU hotplug.
289 if ((cx
->type
!= ACPI_STATE_C1
) && (num_online_cpus() > 1) &&
290 !pr
->flags
.has_cst
&& acpi_fadt
.plvl2_up
)
291 cx
->type
= ACPI_STATE_C1
;
296 * Invoke the current Cx state to put the processor to sleep.
303 * Use the appropriate idle routine, the one that would
304 * be used without acpi C-states.
312 * TBD: Can't get time duration while in C1, as resumes
313 * go to an ISR rather than here. Need to instrument
314 * base interrupt handler.
316 sleep_ticks
= 0xFFFFFFFF;
320 /* Get start time (ticks) */
321 t1
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
324 /* Dummy op - must do something useless after P_LVL2 read */
325 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
326 /* Get end time (ticks) */
327 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
328 /* Re-enable interrupts */
330 /* Compute time (ticks) that we were actually asleep */
332 ticks_elapsed(t1
, t2
) - cx
->latency_ticks
- C2_OVERHEAD
;
337 if (pr
->flags
.bm_check
) {
338 if (atomic_inc_return(&c3_cpu_count
) ==
341 * All CPUs are trying to go to C3
342 * Disable bus master arbitration
344 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 1,
345 ACPI_MTX_DO_NOT_LOCK
);
348 /* SMP with no shared cache... Invalidate cache */
349 ACPI_FLUSH_CPU_CACHE();
352 /* Get start time (ticks) */
353 t1
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
356 /* Dummy op - must do something useless after P_LVL3 read */
357 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
358 /* Get end time (ticks) */
359 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
360 if (pr
->flags
.bm_check
) {
361 /* Enable bus master arbitration */
362 atomic_dec(&c3_cpu_count
);
363 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 0,
364 ACPI_MTX_DO_NOT_LOCK
);
367 /* Re-enable interrupts */
369 /* Compute time (ticks) that we were actually asleep */
371 ticks_elapsed(t1
, t2
) - cx
->latency_ticks
- C3_OVERHEAD
;
379 next_state
= pr
->power
.state
;
384 * Track the number of longs (time asleep is greater than threshold)
385 * and promote when the count threshold is reached. Note that bus
386 * mastering activity may prevent promotions.
387 * Do not promote above max_cstate.
389 if (cx
->promotion
.state
&&
390 ((cx
->promotion
.state
- pr
->power
.states
) <= max_cstate
)) {
391 if (sleep_ticks
> cx
->promotion
.threshold
.ticks
) {
392 cx
->promotion
.count
++;
393 cx
->demotion
.count
= 0;
394 if (cx
->promotion
.count
>=
395 cx
->promotion
.threshold
.count
) {
396 if (pr
->flags
.bm_check
) {
398 (pr
->power
.bm_activity
& cx
->
399 promotion
.threshold
.bm
)) {
405 next_state
= cx
->promotion
.state
;
415 * Track the number of shorts (time asleep is less than time threshold)
416 * and demote when the usage threshold is reached.
418 if (cx
->demotion
.state
) {
419 if (sleep_ticks
< cx
->demotion
.threshold
.ticks
) {
420 cx
->demotion
.count
++;
421 cx
->promotion
.count
= 0;
422 if (cx
->demotion
.count
>= cx
->demotion
.threshold
.count
) {
423 next_state
= cx
->demotion
.state
;
431 * Demote if current state exceeds max_cstate
433 if ((pr
->power
.state
- pr
->power
.states
) > max_cstate
) {
434 if (cx
->demotion
.state
)
435 next_state
= cx
->demotion
.state
;
441 * If we're going to start using a new Cx state we must clean up
442 * from the previous and prepare to use the new.
444 if (next_state
!= pr
->power
.state
)
445 acpi_processor_power_activate(pr
, next_state
);
448 static int acpi_processor_set_power_policy(struct acpi_processor
*pr
)
451 unsigned int state_is_set
= 0;
452 struct acpi_processor_cx
*lower
= NULL
;
453 struct acpi_processor_cx
*higher
= NULL
;
454 struct acpi_processor_cx
*cx
;
456 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
459 return_VALUE(-EINVAL
);
462 * This function sets the default Cx state policy (OS idle handler).
463 * Our scheme is to promote quickly to C2 but more conservatively
464 * to C3. We're favoring C2 for its characteristics of low latency
465 * (quick response), good power savings, and ability to allow bus
466 * mastering activity. Note that the Cx state policy is completely
467 * customizable and can be altered dynamically.
471 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
472 cx
= &pr
->power
.states
[i
];
477 pr
->power
.state
= cx
;
483 return_VALUE(-ENODEV
);
486 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
487 cx
= &pr
->power
.states
[i
];
492 cx
->demotion
.state
= lower
;
493 cx
->demotion
.threshold
.ticks
= cx
->latency_ticks
;
494 cx
->demotion
.threshold
.count
= 1;
495 if (cx
->type
== ACPI_STATE_C3
)
496 cx
->demotion
.threshold
.bm
= bm_history
;
503 for (i
= (ACPI_PROCESSOR_MAX_POWER
- 1); i
> 0; i
--) {
504 cx
= &pr
->power
.states
[i
];
509 cx
->promotion
.state
= higher
;
510 cx
->promotion
.threshold
.ticks
= cx
->latency_ticks
;
511 if (cx
->type
>= ACPI_STATE_C2
)
512 cx
->promotion
.threshold
.count
= 4;
514 cx
->promotion
.threshold
.count
= 10;
515 if (higher
->type
== ACPI_STATE_C3
)
516 cx
->promotion
.threshold
.bm
= bm_history
;
525 static int acpi_processor_get_power_info_fadt(struct acpi_processor
*pr
)
527 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
530 return_VALUE(-EINVAL
);
533 return_VALUE(-ENODEV
);
535 memset(pr
->power
.states
, 0, sizeof(pr
->power
.states
));
537 /* if info is obtained from pblk/fadt, type equals state */
538 pr
->power
.states
[ACPI_STATE_C1
].type
= ACPI_STATE_C1
;
539 pr
->power
.states
[ACPI_STATE_C2
].type
= ACPI_STATE_C2
;
540 pr
->power
.states
[ACPI_STATE_C3
].type
= ACPI_STATE_C3
;
542 /* the C0 state only exists as a filler in our array,
543 * and all processors need to support C1 */
544 pr
->power
.states
[ACPI_STATE_C0
].valid
= 1;
545 pr
->power
.states
[ACPI_STATE_C1
].valid
= 1;
547 #ifndef CONFIG_HOTPLUG_CPU
549 * Check for P_LVL2_UP flag before entering C2 and above on
552 if ((num_online_cpus() > 1) && acpi_fadt
.plvl2_up
)
553 return_VALUE(-ENODEV
);
556 /* determine C2 and C3 address from pblk */
557 pr
->power
.states
[ACPI_STATE_C2
].address
= pr
->pblk
+ 4;
558 pr
->power
.states
[ACPI_STATE_C3
].address
= pr
->pblk
+ 5;
560 /* determine latencies from FADT */
561 pr
->power
.states
[ACPI_STATE_C2
].latency
= acpi_fadt
.plvl2_lat
;
562 pr
->power
.states
[ACPI_STATE_C3
].latency
= acpi_fadt
.plvl3_lat
;
564 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
565 "lvl2[0x%08x] lvl3[0x%08x]\n",
566 pr
->power
.states
[ACPI_STATE_C2
].address
,
567 pr
->power
.states
[ACPI_STATE_C3
].address
));
572 static int acpi_processor_get_power_info_default_c1(struct acpi_processor
*pr
)
574 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1");
576 memset(pr
->power
.states
, 0, sizeof(pr
->power
.states
));
578 /* if info is obtained from pblk/fadt, type equals state */
579 pr
->power
.states
[ACPI_STATE_C1
].type
= ACPI_STATE_C1
;
580 pr
->power
.states
[ACPI_STATE_C2
].type
= ACPI_STATE_C2
;
581 pr
->power
.states
[ACPI_STATE_C3
].type
= ACPI_STATE_C3
;
583 /* the C0 state only exists as a filler in our array,
584 * and all processors need to support C1 */
585 pr
->power
.states
[ACPI_STATE_C0
].valid
= 1;
586 pr
->power
.states
[ACPI_STATE_C1
].valid
= 1;
591 static int acpi_processor_get_power_info_cst(struct acpi_processor
*pr
)
593 acpi_status status
= 0;
596 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
597 union acpi_object
*cst
;
599 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
602 return_VALUE(-ENODEV
);
605 for (i
= 0; i
< ACPI_PROCESSOR_MAX_POWER
; i
++)
606 memset(&(pr
->power
.states
[i
]), 0,
607 sizeof(struct acpi_processor_cx
));
609 status
= acpi_evaluate_object(pr
->handle
, "_CST", NULL
, &buffer
);
610 if (ACPI_FAILURE(status
)) {
611 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "No _CST, giving up\n"));
612 return_VALUE(-ENODEV
);
615 cst
= (union acpi_object
*)buffer
.pointer
;
617 /* There must be at least 2 elements */
618 if (!cst
|| (cst
->type
!= ACPI_TYPE_PACKAGE
) || cst
->package
.count
< 2) {
619 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
620 "not enough elements in _CST\n"));
625 count
= cst
->package
.elements
[0].integer
.value
;
627 /* Validate number of power states. */
628 if (count
< 1 || count
!= cst
->package
.count
- 1) {
629 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
630 "count given by _CST is not valid\n"));
635 /* We support up to ACPI_PROCESSOR_MAX_POWER. */
636 if (count
> ACPI_PROCESSOR_MAX_POWER
) {
638 "Limiting number of power states to max (%d)\n",
639 ACPI_PROCESSOR_MAX_POWER
);
641 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
642 count
= ACPI_PROCESSOR_MAX_POWER
;
645 /* Tell driver that at least _CST is supported. */
646 pr
->flags
.has_cst
= 1;
648 for (i
= 1; i
<= count
; i
++) {
649 union acpi_object
*element
;
650 union acpi_object
*obj
;
651 struct acpi_power_register
*reg
;
652 struct acpi_processor_cx cx
;
654 memset(&cx
, 0, sizeof(cx
));
656 element
= (union acpi_object
*)&(cst
->package
.elements
[i
]);
657 if (element
->type
!= ACPI_TYPE_PACKAGE
)
660 if (element
->package
.count
!= 4)
663 obj
= (union acpi_object
*)&(element
->package
.elements
[0]);
665 if (obj
->type
!= ACPI_TYPE_BUFFER
)
668 reg
= (struct acpi_power_register
*)obj
->buffer
.pointer
;
670 if (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_IO
&&
671 (reg
->space_id
!= ACPI_ADR_SPACE_FIXED_HARDWARE
))
674 cx
.address
= (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
) ?
677 /* There should be an easy way to extract an integer... */
678 obj
= (union acpi_object
*)&(element
->package
.elements
[1]);
679 if (obj
->type
!= ACPI_TYPE_INTEGER
)
682 cx
.type
= obj
->integer
.value
;
684 if ((cx
.type
!= ACPI_STATE_C1
) &&
685 (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_IO
))
688 if ((cx
.type
< ACPI_STATE_C1
) || (cx
.type
> ACPI_STATE_C3
))
691 obj
= (union acpi_object
*)&(element
->package
.elements
[2]);
692 if (obj
->type
!= ACPI_TYPE_INTEGER
)
695 cx
.latency
= obj
->integer
.value
;
697 obj
= (union acpi_object
*)&(element
->package
.elements
[3]);
698 if (obj
->type
!= ACPI_TYPE_INTEGER
)
701 cx
.power
= obj
->integer
.value
;
704 memcpy(&(pr
->power
.states
[pr
->power
.count
]), &cx
, sizeof(cx
));
707 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found %d power states\n",
710 /* Validate number of power states discovered */
711 if (pr
->power
.count
< 2)
715 acpi_os_free(buffer
.pointer
);
717 return_VALUE(status
);
720 static void acpi_processor_power_verify_c2(struct acpi_processor_cx
*cx
)
722 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
728 * C2 latency must be less than or equal to 100
731 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C2_LATENCY
) {
732 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
733 "latency too large [%d]\n", cx
->latency
));
738 * Otherwise we've met all of our C2 requirements.
739 * Normalize the C2 latency to expidite policy
742 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
747 static void acpi_processor_power_verify_c3(struct acpi_processor
*pr
,
748 struct acpi_processor_cx
*cx
)
750 static int bm_check_flag
;
752 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
758 * C3 latency must be less than or equal to 1000
761 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C3_LATENCY
) {
762 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
763 "latency too large [%d]\n", cx
->latency
));
768 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
769 * DMA transfers are used by any ISA device to avoid livelock.
770 * Note that we could disable Type-F DMA (as recommended by
771 * the erratum), but this is known to disrupt certain ISA
772 * devices thus we take the conservative approach.
774 else if (errata
.piix4
.fdma
) {
775 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
776 "C3 not supported on PIIX4 with Type-F DMA\n"));
780 /* All the logic here assumes flags.bm_check is same across all CPUs */
781 if (!bm_check_flag
) {
782 /* Determine whether bm_check is needed based on CPU */
783 acpi_processor_power_init_bm_check(&(pr
->flags
), pr
->id
);
784 bm_check_flag
= pr
->flags
.bm_check
;
786 pr
->flags
.bm_check
= bm_check_flag
;
789 if (pr
->flags
.bm_check
) {
790 /* bus mastering control is necessary */
791 if (!pr
->flags
.bm_control
) {
792 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
793 "C3 support requires bus mastering control\n"));
798 * WBINVD should be set in fadt, for C3 state to be
799 * supported on when bm_check is not required.
801 if (acpi_fadt
.wb_invd
!= 1) {
802 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
803 "Cache invalidation should work properly"
804 " for C3 to be enabled on SMP systems\n"));
807 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
,
808 0, ACPI_MTX_DO_NOT_LOCK
);
812 * Otherwise we've met all of our C3 requirements.
813 * Normalize the C3 latency to expidite policy. Enable
814 * checking of bus mastering status (bm_check) so we can
815 * use this in our C3 policy
818 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
823 static int acpi_processor_power_verify(struct acpi_processor
*pr
)
826 unsigned int working
= 0;
828 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
829 struct acpi_processor_cx
*cx
= &pr
->power
.states
[i
];
837 acpi_processor_power_verify_c2(cx
);
841 acpi_processor_power_verify_c3(pr
, cx
);
852 static int acpi_processor_get_power_info(struct acpi_processor
*pr
)
857 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
859 /* NOTE: the idle thread may not be running while calling
862 result
= acpi_processor_get_power_info_cst(pr
);
863 if (result
== -ENODEV
)
864 result
= acpi_processor_get_power_info_fadt(pr
);
866 if ((result
) || (acpi_processor_power_verify(pr
) < 2))
867 result
= acpi_processor_get_power_info_default_c1(pr
);
872 * Now that we know which states are supported, set the default
873 * policy. Note that this policy can be changed dynamically
874 * (e.g. encourage deeper sleeps to conserve battery life when
877 result
= acpi_processor_set_power_policy(pr
);
879 return_VALUE(result
);
882 * if one state of type C2 or C3 is available, mark this
883 * CPU as being "idle manageable"
885 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
886 if (pr
->power
.states
[i
].valid
) {
888 if (pr
->power
.states
[i
].type
>= ACPI_STATE_C2
)
896 int acpi_processor_cst_has_changed(struct acpi_processor
*pr
)
900 ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
903 return_VALUE(-EINVAL
);
906 return_VALUE(-ENODEV
);
909 if (!pr
->flags
.power_setup_done
)
910 return_VALUE(-ENODEV
);
912 /* Fall back to the default idle loop */
913 pm_idle
= pm_idle_save
;
914 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
917 result
= acpi_processor_get_power_info(pr
);
918 if ((pr
->flags
.power
== 1) && (pr
->flags
.power_setup_done
))
919 pm_idle
= acpi_processor_idle
;
921 return_VALUE(result
);
926 static int acpi_processor_power_seq_show(struct seq_file
*seq
, void *offset
)
928 struct acpi_processor
*pr
= (struct acpi_processor
*)seq
->private;
931 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
936 seq_printf(seq
, "active state: C%zd\n"
938 "bus master activity: %08x\n",
939 pr
->power
.state
? pr
->power
.state
- pr
->power
.states
: 0,
940 max_cstate
, (unsigned)pr
->power
.bm_activity
);
942 seq_puts(seq
, "states:\n");
944 for (i
= 1; i
<= pr
->power
.count
; i
++) {
945 seq_printf(seq
, " %cC%d: ",
946 (&pr
->power
.states
[i
] ==
947 pr
->power
.state
? '*' : ' '), i
);
949 if (!pr
->power
.states
[i
].valid
) {
950 seq_puts(seq
, "<not supported>\n");
954 switch (pr
->power
.states
[i
].type
) {
956 seq_printf(seq
, "type[C1] ");
959 seq_printf(seq
, "type[C2] ");
962 seq_printf(seq
, "type[C3] ");
965 seq_printf(seq
, "type[--] ");
969 if (pr
->power
.states
[i
].promotion
.state
)
970 seq_printf(seq
, "promotion[C%zd] ",
971 (pr
->power
.states
[i
].promotion
.state
-
974 seq_puts(seq
, "promotion[--] ");
976 if (pr
->power
.states
[i
].demotion
.state
)
977 seq_printf(seq
, "demotion[C%zd] ",
978 (pr
->power
.states
[i
].demotion
.state
-
981 seq_puts(seq
, "demotion[--] ");
983 seq_printf(seq
, "latency[%03d] usage[%08d]\n",
984 pr
->power
.states
[i
].latency
,
985 pr
->power
.states
[i
].usage
);
992 static int acpi_processor_power_open_fs(struct inode
*inode
, struct file
*file
)
994 return single_open(file
, acpi_processor_power_seq_show
,
998 static struct file_operations acpi_processor_power_fops
= {
999 .open
= acpi_processor_power_open_fs
,
1001 .llseek
= seq_lseek
,
1002 .release
= single_release
,
1005 int acpi_processor_power_init(struct acpi_processor
*pr
,
1006 struct acpi_device
*device
)
1008 acpi_status status
= 0;
1009 static int first_run
= 0;
1010 struct proc_dir_entry
*entry
= NULL
;
1013 ACPI_FUNCTION_TRACE("acpi_processor_power_init");
1016 dmi_check_system(processor_power_dmi_table
);
1017 if (max_cstate
< ACPI_C_STATES_MAX
)
1019 "ACPI: processor limited to max C-state %d\n",
1025 return_VALUE(-EINVAL
);
1027 if (acpi_fadt
.cst_cnt
&& !nocst
) {
1029 acpi_os_write_port(acpi_fadt
.smi_cmd
, acpi_fadt
.cst_cnt
, 8);
1030 if (ACPI_FAILURE(status
)) {
1031 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
1032 "Notifying BIOS of _CST ability failed\n"));
1036 acpi_processor_power_init_pdc(&(pr
->power
), pr
->id
);
1037 acpi_processor_set_pdc(pr
, pr
->power
.pdc
);
1038 acpi_processor_get_power_info(pr
);
1041 * Install the idle handler if processor power management is supported.
1042 * Note that we use previously set idle handler will be used on
1043 * platforms that only support C1.
1045 if ((pr
->flags
.power
) && (!boot_option_idle_override
)) {
1046 printk(KERN_INFO PREFIX
"CPU%d (power states:", pr
->id
);
1047 for (i
= 1; i
<= pr
->power
.count
; i
++)
1048 if (pr
->power
.states
[i
].valid
)
1049 printk(" C%d[C%d]", i
,
1050 pr
->power
.states
[i
].type
);
1054 pm_idle_save
= pm_idle
;
1055 pm_idle
= acpi_processor_idle
;
1060 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
1061 S_IRUGO
, acpi_device_dir(device
));
1063 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
1064 "Unable to create '%s' fs entry\n",
1065 ACPI_PROCESSOR_FILE_POWER
));
1067 entry
->proc_fops
= &acpi_processor_power_fops
;
1068 entry
->data
= acpi_driver_data(device
);
1069 entry
->owner
= THIS_MODULE
;
1072 pr
->flags
.power_setup_done
= 1;
1077 int acpi_processor_power_exit(struct acpi_processor
*pr
,
1078 struct acpi_device
*device
)
1080 ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
1082 pr
->flags
.power_setup_done
= 0;
1084 if (acpi_device_dir(device
))
1085 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
1086 acpi_device_dir(device
));
1088 /* Unregister the idle handler when processor #0 is removed. */
1090 pm_idle
= pm_idle_save
;
1093 * We are about to unload the current idle thread pm callback
1094 * (pm_idle), Wait for all processors to update cached/local
1095 * copies of pm_idle before proceeding.