2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/acpi.h>
36 #include <linux/dmi.h>
37 #include <linux/moduleparam.h>
40 #include <asm/uaccess.h>
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
45 #define ACPI_PROCESSOR_COMPONENT 0x01000000
46 #define ACPI_PROCESSOR_CLASS "processor"
47 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
48 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
49 ACPI_MODULE_NAME ("acpi_processor")
51 #define ACPI_PROCESSOR_FILE_POWER "power"
53 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
54 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
55 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
57 static void (*pm_idle_save
)(void);
58 module_param(max_cstate
, uint
, 0644);
60 static unsigned int nocst
= 0;
61 module_param(nocst
, uint
, 0000);
64 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
65 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
66 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
67 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
68 * reduce history for more aggressive entry into C3
70 static unsigned int bm_history
= (HZ
>= 800 ? 0xFFFFFFFF : ((1U << (HZ
/ 25)) - 1));
71 module_param(bm_history
, uint
, 0644);
72 /* --------------------------------------------------------------------------
74 -------------------------------------------------------------------------- */
77 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
78 * For now disable this. Probably a bug somewhere else.
80 * To skip this limit, boot/load with a large max_cstate limit.
82 static int no_c2c3(struct dmi_system_id
*id
)
84 if (max_cstate
> ACPI_PROCESSOR_MAX_POWER
)
87 printk(KERN_NOTICE PREFIX
"%s detected - C2,C3 disabled."
88 " Override with \"processor.max_cstate=%d\"\n", id
->ident
,
89 ACPI_PROCESSOR_MAX_POWER
+ 1);
99 static struct dmi_system_id __initdata processor_power_dmi_table
[] = {
100 { no_c2c3
, "IBM ThinkPad R40e", {
101 DMI_MATCH(DMI_BIOS_VENDOR
,"IBM"),
102 DMI_MATCH(DMI_BIOS_VERSION
,"1SET60WW") }},
103 { no_c2c3
, "Medion 41700", {
104 DMI_MATCH(DMI_BIOS_VENDOR
,"Phoenix Technologies LTD"),
105 DMI_MATCH(DMI_BIOS_VERSION
,"R01-A1J") }},
117 else if (!acpi_fadt
.tmr_val_ext
)
118 return (((0x00FFFFFF - t1
) + t2
) & 0x00FFFFFF);
120 return ((0xFFFFFFFF - t1
) + t2
);
125 acpi_processor_power_activate (
126 struct acpi_processor
*pr
,
127 struct acpi_processor_cx
*new)
129 struct acpi_processor_cx
*old
;
134 old
= pr
->power
.state
;
137 old
->promotion
.count
= 0;
138 new->demotion
.count
= 0;
140 /* Cleanup from old state. */
144 /* Disable bus master reload */
145 if (new->type
!= ACPI_STATE_C3
)
146 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0, ACPI_MTX_DO_NOT_LOCK
);
151 /* Prepare to use new state. */
154 /* Enable bus master reload */
155 if (old
->type
!= ACPI_STATE_C3
)
156 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 1, ACPI_MTX_DO_NOT_LOCK
);
160 pr
->power
.state
= new;
166 static void acpi_processor_idle (void)
168 struct acpi_processor
*pr
= NULL
;
169 struct acpi_processor_cx
*cx
= NULL
;
170 struct acpi_processor_cx
*next_state
= NULL
;
174 pr
= processors
[_smp_processor_id()];
179 * Interrupts must be disabled during bus mastering calculations and
180 * for C2/C3 transitions.
185 * Check whether we truly need to go idle, or should
188 if (unlikely(need_resched())) {
193 cx
= pr
->power
.state
;
200 * Check for bus mastering activity (if required), record, and check
203 if (pr
->flags
.bm_check
) {
205 unsigned long diff
= jiffies
- pr
->power
.bm_check_timestamp
;
211 /* if we didn't get called, assume there was busmaster activity */
214 pr
->power
.bm_activity
|= 0x1;
215 pr
->power
.bm_activity
<<= 1;
218 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS
,
219 &bm_status
, ACPI_MTX_DO_NOT_LOCK
);
221 pr
->power
.bm_activity
++;
222 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS
,
223 1, ACPI_MTX_DO_NOT_LOCK
);
226 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
227 * the true state of bus mastering activity; forcing us to
228 * manually check the BMIDEA bit of each IDE channel.
230 else if (errata
.piix4
.bmisx
) {
231 if ((inb_p(errata
.piix4
.bmisx
+ 0x02) & 0x01)
232 || (inb_p(errata
.piix4
.bmisx
+ 0x0A) & 0x01))
233 pr
->power
.bm_activity
++;
236 pr
->power
.bm_check_timestamp
= jiffies
;
239 * Apply bus mastering demotion policy. Automatically demote
240 * to avoid a faulty transition. Note that the processor
241 * won't enter a low-power state during this call (to this
242 * funciton) but should upon the next.
244 * TBD: A better policy might be to fallback to the demotion
245 * state (use it for this quantum only) istead of
246 * demoting -- and rely on duration as our sole demotion
247 * qualification. This may, however, introduce DMA
248 * issues (e.g. floppy DMA transfer overrun/underrun).
250 if (pr
->power
.bm_activity
& cx
->demotion
.threshold
.bm
) {
252 next_state
= cx
->demotion
.state
;
262 * Invoke the current Cx state to put the processor to sleep.
269 * Use the appropriate idle routine, the one that would
270 * be used without acpi C-states.
277 * TBD: Can't get time duration while in C1, as resumes
278 * go to an ISR rather than here. Need to instrument
279 * base interrupt handler.
281 sleep_ticks
= 0xFFFFFFFF;
285 /* Get start time (ticks) */
286 t1
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
289 /* Dummy op - must do something useless after P_LVL2 read */
290 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
291 /* Get end time (ticks) */
292 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
293 /* Re-enable interrupts */
295 /* Compute time (ticks) that we were actually asleep */
296 sleep_ticks
= ticks_elapsed(t1
, t2
) - cx
->latency_ticks
- C2_OVERHEAD
;
300 /* Disable bus master arbitration */
301 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 1, ACPI_MTX_DO_NOT_LOCK
);
302 /* Get start time (ticks) */
303 t1
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
306 /* Dummy op - must do something useless after P_LVL3 read */
307 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
308 /* Get end time (ticks) */
309 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
310 /* Enable bus master arbitration */
311 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 0, ACPI_MTX_DO_NOT_LOCK
);
312 /* Re-enable interrupts */
314 /* Compute time (ticks) that we were actually asleep */
315 sleep_ticks
= ticks_elapsed(t1
, t2
) - cx
->latency_ticks
- C3_OVERHEAD
;
323 next_state
= pr
->power
.state
;
328 * Track the number of longs (time asleep is greater than threshold)
329 * and promote when the count threshold is reached. Note that bus
330 * mastering activity may prevent promotions.
331 * Do not promote above max_cstate.
333 if (cx
->promotion
.state
&&
334 ((cx
->promotion
.state
- pr
->power
.states
) <= max_cstate
)) {
335 if (sleep_ticks
> cx
->promotion
.threshold
.ticks
) {
336 cx
->promotion
.count
++;
337 cx
->demotion
.count
= 0;
338 if (cx
->promotion
.count
>= cx
->promotion
.threshold
.count
) {
339 if (pr
->flags
.bm_check
) {
340 if (!(pr
->power
.bm_activity
& cx
->promotion
.threshold
.bm
)) {
341 next_state
= cx
->promotion
.state
;
346 next_state
= cx
->promotion
.state
;
356 * Track the number of shorts (time asleep is less than time threshold)
357 * and demote when the usage threshold is reached.
359 if (cx
->demotion
.state
) {
360 if (sleep_ticks
< cx
->demotion
.threshold
.ticks
) {
361 cx
->demotion
.count
++;
362 cx
->promotion
.count
= 0;
363 if (cx
->demotion
.count
>= cx
->demotion
.threshold
.count
) {
364 next_state
= cx
->demotion
.state
;
372 * Demote if current state exceeds max_cstate
374 if ((pr
->power
.state
- pr
->power
.states
) > max_cstate
) {
375 if (cx
->demotion
.state
)
376 next_state
= cx
->demotion
.state
;
382 * If we're going to start using a new Cx state we must clean up
383 * from the previous and prepare to use the new.
385 if (next_state
!= pr
->power
.state
)
386 acpi_processor_power_activate(pr
, next_state
);
391 /* do C1 instead of busy loop */
401 acpi_processor_set_power_policy (
402 struct acpi_processor
*pr
)
405 unsigned int state_is_set
= 0;
406 struct acpi_processor_cx
*lower
= NULL
;
407 struct acpi_processor_cx
*higher
= NULL
;
408 struct acpi_processor_cx
*cx
;
410 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
413 return_VALUE(-EINVAL
);
416 * This function sets the default Cx state policy (OS idle handler).
417 * Our scheme is to promote quickly to C2 but more conservatively
418 * to C3. We're favoring C2 for its characteristics of low latency
419 * (quick response), good power savings, and ability to allow bus
420 * mastering activity. Note that the Cx state policy is completely
421 * customizable and can be altered dynamically.
425 for (i
=1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
426 cx
= &pr
->power
.states
[i
];
431 pr
->power
.state
= cx
;
437 return_VALUE(-ENODEV
);
440 for (i
=1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
441 cx
= &pr
->power
.states
[i
];
446 cx
->demotion
.state
= lower
;
447 cx
->demotion
.threshold
.ticks
= cx
->latency_ticks
;
448 cx
->demotion
.threshold
.count
= 1;
449 if (cx
->type
== ACPI_STATE_C3
)
450 cx
->demotion
.threshold
.bm
= bm_history
;
457 for (i
= (ACPI_PROCESSOR_MAX_POWER
- 1); i
> 0; i
--) {
458 cx
= &pr
->power
.states
[i
];
463 cx
->promotion
.state
= higher
;
464 cx
->promotion
.threshold
.ticks
= cx
->latency_ticks
;
465 if (cx
->type
>= ACPI_STATE_C2
)
466 cx
->promotion
.threshold
.count
= 4;
468 cx
->promotion
.threshold
.count
= 10;
469 if (higher
->type
== ACPI_STATE_C3
)
470 cx
->promotion
.threshold
.bm
= bm_history
;
480 static int acpi_processor_get_power_info_fadt (struct acpi_processor
*pr
)
484 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
487 return_VALUE(-EINVAL
);
490 return_VALUE(-ENODEV
);
492 for (i
= 0; i
< ACPI_PROCESSOR_MAX_POWER
; i
++)
493 memset(pr
->power
.states
, 0, sizeof(struct acpi_processor_cx
));
495 /* if info is obtained from pblk/fadt, type equals state */
496 pr
->power
.states
[ACPI_STATE_C1
].type
= ACPI_STATE_C1
;
497 pr
->power
.states
[ACPI_STATE_C2
].type
= ACPI_STATE_C2
;
498 pr
->power
.states
[ACPI_STATE_C3
].type
= ACPI_STATE_C3
;
500 /* the C0 state only exists as a filler in our array,
501 * and all processors need to support C1 */
502 pr
->power
.states
[ACPI_STATE_C0
].valid
= 1;
503 pr
->power
.states
[ACPI_STATE_C1
].valid
= 1;
505 /* determine C2 and C3 address from pblk */
506 pr
->power
.states
[ACPI_STATE_C2
].address
= pr
->pblk
+ 4;
507 pr
->power
.states
[ACPI_STATE_C3
].address
= pr
->pblk
+ 5;
509 /* determine latencies from FADT */
510 pr
->power
.states
[ACPI_STATE_C2
].latency
= acpi_fadt
.plvl2_lat
;
511 pr
->power
.states
[ACPI_STATE_C3
].latency
= acpi_fadt
.plvl3_lat
;
513 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
514 "lvl2[0x%08x] lvl3[0x%08x]\n",
515 pr
->power
.states
[ACPI_STATE_C2
].address
,
516 pr
->power
.states
[ACPI_STATE_C3
].address
));
522 static int acpi_processor_get_power_info_cst (struct acpi_processor
*pr
)
524 acpi_status status
= 0;
527 struct acpi_buffer buffer
= {ACPI_ALLOCATE_BUFFER
, NULL
};
528 union acpi_object
*cst
;
530 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
533 return_VALUE(-ENODEV
);
536 return_VALUE(-ENODEV
);
539 for (i
= 0; i
< ACPI_PROCESSOR_MAX_POWER
; i
++)
540 memset(pr
->power
.states
, 0, sizeof(struct acpi_processor_cx
));
542 status
= acpi_evaluate_object(pr
->handle
, "_CST", NULL
, &buffer
);
543 if (ACPI_FAILURE(status
)) {
544 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "No _CST, giving up\n"));
545 return_VALUE(-ENODEV
);
548 cst
= (union acpi_object
*) buffer
.pointer
;
550 /* There must be at least 2 elements */
551 if (!cst
|| (cst
->type
!= ACPI_TYPE_PACKAGE
) || cst
->package
.count
< 2) {
552 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "not enough elements in _CST\n"));
557 count
= cst
->package
.elements
[0].integer
.value
;
559 /* Validate number of power states. */
560 if (count
< 1 || count
!= cst
->package
.count
- 1) {
561 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "count given by _CST is not valid\n"));
566 /* We support up to ACPI_PROCESSOR_MAX_POWER. */
567 if (count
> ACPI_PROCESSOR_MAX_POWER
) {
568 printk(KERN_WARNING
"Limiting number of power states to max (%d)\n", ACPI_PROCESSOR_MAX_POWER
);
569 printk(KERN_WARNING
"Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
570 count
= ACPI_PROCESSOR_MAX_POWER
;
573 /* Tell driver that at least _CST is supported. */
574 pr
->flags
.has_cst
= 1;
576 for (i
= 1; i
<= count
; i
++) {
577 union acpi_object
*element
;
578 union acpi_object
*obj
;
579 struct acpi_power_register
*reg
;
580 struct acpi_processor_cx cx
;
582 memset(&cx
, 0, sizeof(cx
));
584 element
= (union acpi_object
*) &(cst
->package
.elements
[i
]);
585 if (element
->type
!= ACPI_TYPE_PACKAGE
)
588 if (element
->package
.count
!= 4)
591 obj
= (union acpi_object
*) &(element
->package
.elements
[0]);
593 if (obj
->type
!= ACPI_TYPE_BUFFER
)
596 reg
= (struct acpi_power_register
*) obj
->buffer
.pointer
;
598 if (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_IO
&&
599 (reg
->space_id
!= ACPI_ADR_SPACE_FIXED_HARDWARE
))
602 cx
.address
= (reg
->space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
) ?
605 /* There should be an easy way to extract an integer... */
606 obj
= (union acpi_object
*) &(element
->package
.elements
[1]);
607 if (obj
->type
!= ACPI_TYPE_INTEGER
)
610 cx
.type
= obj
->integer
.value
;
612 if ((cx
.type
!= ACPI_STATE_C1
) &&
613 (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_IO
))
616 if ((cx
.type
< ACPI_STATE_C1
) ||
617 (cx
.type
> ACPI_STATE_C3
))
620 obj
= (union acpi_object
*) &(element
->package
.elements
[2]);
621 if (obj
->type
!= ACPI_TYPE_INTEGER
)
624 cx
.latency
= obj
->integer
.value
;
626 obj
= (union acpi_object
*) &(element
->package
.elements
[3]);
627 if (obj
->type
!= ACPI_TYPE_INTEGER
)
630 cx
.power
= obj
->integer
.value
;
633 memcpy(&(pr
->power
.states
[pr
->power
.count
]), &cx
, sizeof(cx
));
636 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found %d power states\n", pr
->power
.count
));
638 /* Validate number of power states discovered */
639 if (pr
->power
.count
< 2)
643 acpi_os_free(buffer
.pointer
);
645 return_VALUE(status
);
649 static void acpi_processor_power_verify_c2(struct acpi_processor_cx
*cx
)
651 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
657 * C2 latency must be less than or equal to 100
660 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C2_LATENCY
) {
661 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
662 "latency too large [%d]\n",
667 /* We're (currently) only supporting C2 on UP */
668 else if (errata
.smp
) {
669 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
670 "C2 not supported in SMP mode\n"));
675 * Otherwise we've met all of our C2 requirements.
676 * Normalize the C2 latency to expidite policy
679 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
685 static void acpi_processor_power_verify_c3(
686 struct acpi_processor
*pr
,
687 struct acpi_processor_cx
*cx
)
689 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
695 * C3 latency must be less than or equal to 1000
698 else if (cx
->latency
> ACPI_PROCESSOR_MAX_C3_LATENCY
) {
699 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
700 "latency too large [%d]\n",
705 /* bus mastering control is necessary */
706 else if (!pr
->flags
.bm_control
) {
707 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
708 "C3 support requires bus mastering control\n"));
712 /* We're (currently) only supporting C2 on UP */
713 else if (errata
.smp
) {
714 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
715 "C3 not supported in SMP mode\n"));
720 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
721 * DMA transfers are used by any ISA device to avoid livelock.
722 * Note that we could disable Type-F DMA (as recommended by
723 * the erratum), but this is known to disrupt certain ISA
724 * devices thus we take the conservative approach.
726 else if (errata
.piix4
.fdma
) {
727 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
728 "C3 not supported on PIIX4 with Type-F DMA\n"));
733 * Otherwise we've met all of our C3 requirements.
734 * Normalize the C3 latency to expidite policy. Enable
735 * checking of bus mastering status (bm_check) so we can
736 * use this in our C3 policy
739 cx
->latency_ticks
= US_TO_PM_TIMER_TICKS(cx
->latency
);
740 pr
->flags
.bm_check
= 1;
746 static int acpi_processor_power_verify(struct acpi_processor
*pr
)
749 unsigned int working
= 0;
751 for (i
=1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
752 struct acpi_processor_cx
*cx
= &pr
->power
.states
[i
];
760 acpi_processor_power_verify_c2(cx
);
764 acpi_processor_power_verify_c3(pr
, cx
);
775 static int acpi_processor_get_power_info (
776 struct acpi_processor
*pr
)
781 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
783 /* NOTE: the idle thread may not be running while calling
786 result
= acpi_processor_get_power_info_cst(pr
);
787 if ((result
) || (acpi_processor_power_verify(pr
) < 2)) {
788 result
= acpi_processor_get_power_info_fadt(pr
);
790 return_VALUE(result
);
792 if (acpi_processor_power_verify(pr
) < 2)
793 return_VALUE(-ENODEV
);
799 * Now that we know which states are supported, set the default
800 * policy. Note that this policy can be changed dynamically
801 * (e.g. encourage deeper sleeps to conserve battery life when
804 result
= acpi_processor_set_power_policy(pr
);
806 return_VALUE(result
);
809 * if one state of type C2 or C3 is available, mark this
810 * CPU as being "idle manageable"
812 for (i
= 1; i
< ACPI_PROCESSOR_MAX_POWER
; i
++) {
813 if (pr
->power
.states
[i
].valid
)
815 if ((pr
->power
.states
[i
].valid
) &&
816 (pr
->power
.states
[i
].type
>= ACPI_STATE_C2
))
823 int acpi_processor_cst_has_changed (struct acpi_processor
*pr
)
827 ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
830 return_VALUE(-EINVAL
);
832 if (errata
.smp
|| nocst
) {
833 return_VALUE(-ENODEV
);
836 if (!pr
->flags
.power_setup_done
)
837 return_VALUE(-ENODEV
);
839 /* Fall back to the default idle loop */
840 pm_idle
= pm_idle_save
;
841 synchronize_kernel();
844 result
= acpi_processor_get_power_info(pr
);
845 if ((pr
->flags
.power
== 1) && (pr
->flags
.power_setup_done
))
846 pm_idle
= acpi_processor_idle
;
848 return_VALUE(result
);
853 static int acpi_processor_power_seq_show(struct seq_file
*seq
, void *offset
)
855 struct acpi_processor
*pr
= (struct acpi_processor
*)seq
->private;
858 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
863 seq_printf(seq
, "active state: C%zd\n"
865 "bus master activity: %08x\n",
866 pr
->power
.state
? pr
->power
.state
- pr
->power
.states
: 0,
868 (unsigned)pr
->power
.bm_activity
);
870 seq_puts(seq
, "states:\n");
872 for (i
= 1; i
<= pr
->power
.count
; i
++) {
873 seq_printf(seq
, " %cC%d: ",
874 (&pr
->power
.states
[i
] == pr
->power
.state
?'*':' '), i
);
876 if (!pr
->power
.states
[i
].valid
) {
877 seq_puts(seq
, "<not supported>\n");
881 switch (pr
->power
.states
[i
].type
) {
883 seq_printf(seq
, "type[C1] ");
886 seq_printf(seq
, "type[C2] ");
889 seq_printf(seq
, "type[C3] ");
892 seq_printf(seq
, "type[--] ");
896 if (pr
->power
.states
[i
].promotion
.state
)
897 seq_printf(seq
, "promotion[C%zd] ",
898 (pr
->power
.states
[i
].promotion
.state
-
901 seq_puts(seq
, "promotion[--] ");
903 if (pr
->power
.states
[i
].demotion
.state
)
904 seq_printf(seq
, "demotion[C%zd] ",
905 (pr
->power
.states
[i
].demotion
.state
-
908 seq_puts(seq
, "demotion[--] ");
910 seq_printf(seq
, "latency[%03d] usage[%08d]\n",
911 pr
->power
.states
[i
].latency
,
912 pr
->power
.states
[i
].usage
);
919 static int acpi_processor_power_open_fs(struct inode
*inode
, struct file
*file
)
921 return single_open(file
, acpi_processor_power_seq_show
,
925 static struct file_operations acpi_processor_power_fops
= {
926 .open
= acpi_processor_power_open_fs
,
929 .release
= single_release
,
933 int acpi_processor_power_init(struct acpi_processor
*pr
, struct acpi_device
*device
)
935 acpi_status status
= 0;
936 static int first_run
= 0;
937 struct proc_dir_entry
*entry
= NULL
;
940 ACPI_FUNCTION_TRACE("acpi_processor_power_init");
943 dmi_check_system(processor_power_dmi_table
);
944 if (max_cstate
< ACPI_C_STATES_MAX
)
945 printk(KERN_NOTICE
"ACPI: processor limited to max C-state %d\n", max_cstate
);
949 if (!errata
.smp
&& (pr
->id
== 0) && acpi_fadt
.cst_cnt
&& !nocst
) {
950 status
= acpi_os_write_port(acpi_fadt
.smi_cmd
, acpi_fadt
.cst_cnt
, 8);
951 if (ACPI_FAILURE(status
)) {
952 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
953 "Notifying BIOS of _CST ability failed\n"));
957 acpi_processor_get_power_info(pr
);
960 * Install the idle handler if processor power management is supported.
961 * Note that we use previously set idle handler will be used on
962 * platforms that only support C1.
964 if ((pr
->flags
.power
) && (!boot_option_idle_override
)) {
965 printk(KERN_INFO PREFIX
"CPU%d (power states:", pr
->id
);
966 for (i
= 1; i
<= pr
->power
.count
; i
++)
967 if (pr
->power
.states
[i
].valid
)
968 printk(" C%d[C%d]", i
, pr
->power
.states
[i
].type
);
972 pm_idle_save
= pm_idle
;
973 pm_idle
= acpi_processor_idle
;
978 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
979 S_IRUGO
, acpi_device_dir(device
));
981 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
982 "Unable to create '%s' fs entry\n",
983 ACPI_PROCESSOR_FILE_POWER
));
985 entry
->proc_fops
= &acpi_processor_power_fops
;
986 entry
->data
= acpi_driver_data(device
);
987 entry
->owner
= THIS_MODULE
;
990 pr
->flags
.power_setup_done
= 1;
995 int acpi_processor_power_exit(struct acpi_processor
*pr
, struct acpi_device
*device
)
997 ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
999 pr
->flags
.power_setup_done
= 0;
1001 if (acpi_device_dir(device
))
1002 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER
,acpi_device_dir(device
));
1004 /* Unregister the idle handler when processor #0 is removed. */
1006 pm_idle
= pm_idle_save
;
1009 * We are about to unload the current idle thread pm callback
1010 * (pm_idle), Wait for all processors to update cached/local
1011 * copies of pm_idle before proceeding.