percpu, sparc64: fix sparse possible cpu map handling
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / acpi / processor_idle.c
blob0efa59e7e3afd16095fa538f333f11f37ca7a9d4
1 /*
2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/pm_qos_params.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
44 #include <linux/irqflags.h>
47 * Include the apic definitions for x86 to have the APIC timer related defines
48 * available also for UP (on SMP it gets magically included via linux/smp.h).
49 * asm/acpi.h is not an option, as it would require more include magic. Also
50 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
52 #ifdef CONFIG_X86
53 #include <asm/apic.h>
54 #endif
56 #include <asm/io.h>
57 #include <asm/uaccess.h>
59 #include <acpi/acpi_bus.h>
60 #include <acpi/processor.h>
61 #include <asm/processor.h>
63 #define ACPI_PROCESSOR_CLASS "processor"
64 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
65 ACPI_MODULE_NAME("processor_idle");
66 #define ACPI_PROCESSOR_FILE_POWER "power"
67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68 #define C2_OVERHEAD 1 /* 1us */
69 #define C3_OVERHEAD 1 /* 1us */
70 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
72 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
73 module_param(max_cstate, uint, 0000);
74 static unsigned int nocst __read_mostly;
75 module_param(nocst, uint, 0000);
77 static unsigned int latency_factor __read_mostly = 2;
78 module_param(latency_factor, uint, 0644);
80 static s64 us_to_pm_timer_ticks(s64 t)
82 return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
85 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
86 * For now disable this. Probably a bug somewhere else.
88 * To skip this limit, boot/load with a large max_cstate limit.
90 static int set_max_cstate(const struct dmi_system_id *id)
92 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
93 return 0;
95 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
96 " Override with \"processor.max_cstate=%d\"\n", id->ident,
97 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
99 max_cstate = (long)id->driver_data;
101 return 0;
104 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
105 callers to only run once -AK */
106 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
107 { set_max_cstate, "Clevo 5600D", {
108 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
109 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
110 (void *)2},
116 * Callers should disable interrupts before the call and enable
117 * interrupts after return.
119 static void acpi_safe_halt(void)
121 current_thread_info()->status &= ~TS_POLLING;
123 * TS_POLLING-cleared state must be visible before we
124 * test NEED_RESCHED:
126 smp_mb();
127 if (!need_resched()) {
128 safe_halt();
129 local_irq_disable();
131 current_thread_info()->status |= TS_POLLING;
134 #ifdef ARCH_APICTIMER_STOPS_ON_C3
137 * Some BIOS implementations switch to C3 in the published C2 state.
138 * This seems to be a common problem on AMD boxen, but other vendors
139 * are affected too. We pick the most conservative approach: we assume
140 * that the local APIC stops in both C2 and C3.
142 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
143 struct acpi_processor_cx *cx)
145 struct acpi_processor_power *pwr = &pr->power;
146 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
148 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
149 return;
151 if (boot_cpu_has(X86_FEATURE_AMDC1E))
152 type = ACPI_STATE_C1;
155 * Check, if one of the previous states already marked the lapic
156 * unstable
158 if (pwr->timer_broadcast_on_state < state)
159 return;
161 if (cx->type >= type)
162 pr->power.timer_broadcast_on_state = state;
165 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
167 unsigned long reason;
169 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
170 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
172 clockevents_notify(reason, &pr->id);
175 /* Power(C) State timer broadcast control */
176 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
177 struct acpi_processor_cx *cx,
178 int broadcast)
180 int state = cx - pr->power.states;
182 if (state >= pr->power.timer_broadcast_on_state) {
183 unsigned long reason;
185 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
186 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
187 clockevents_notify(reason, &pr->id);
191 #else
193 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
194 struct acpi_processor_cx *cstate) { }
195 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
196 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
197 struct acpi_processor_cx *cx,
198 int broadcast)
202 #endif
205 * Suspend / resume control
207 static int acpi_idle_suspend;
208 static u32 saved_bm_rld;
210 static void acpi_idle_bm_rld_save(void)
212 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
214 static void acpi_idle_bm_rld_restore(void)
216 u32 resumed_bm_rld;
218 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
220 if (resumed_bm_rld != saved_bm_rld)
221 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
224 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
226 if (acpi_idle_suspend == 1)
227 return 0;
229 acpi_idle_bm_rld_save();
230 acpi_idle_suspend = 1;
231 return 0;
234 int acpi_processor_resume(struct acpi_device * device)
236 if (acpi_idle_suspend == 0)
237 return 0;
239 acpi_idle_bm_rld_restore();
240 acpi_idle_suspend = 0;
241 return 0;
244 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
245 static void tsc_check_state(int state)
247 switch (boot_cpu_data.x86_vendor) {
248 case X86_VENDOR_AMD:
249 case X86_VENDOR_INTEL:
251 * AMD Fam10h TSC will tick in all
252 * C/P/S0/S1 states when this bit is set.
254 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
255 return;
257 /*FALL THROUGH*/
258 default:
259 /* TSC could halt in idle, so notify users */
260 if (state > ACPI_STATE_C1)
261 mark_tsc_unstable("TSC halts in idle");
264 #else
265 static void tsc_check_state(int state) { return; }
266 #endif
268 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
271 if (!pr)
272 return -EINVAL;
274 if (!pr->pblk)
275 return -ENODEV;
277 /* if info is obtained from pblk/fadt, type equals state */
278 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
279 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
281 #ifndef CONFIG_HOTPLUG_CPU
283 * Check for P_LVL2_UP flag before entering C2 and above on
284 * an SMP system.
286 if ((num_online_cpus() > 1) &&
287 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
288 return -ENODEV;
289 #endif
291 /* determine C2 and C3 address from pblk */
292 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
293 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
295 /* determine latencies from FADT */
296 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
297 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
299 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
300 "lvl2[0x%08x] lvl3[0x%08x]\n",
301 pr->power.states[ACPI_STATE_C2].address,
302 pr->power.states[ACPI_STATE_C3].address));
304 return 0;
307 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
309 if (!pr->power.states[ACPI_STATE_C1].valid) {
310 /* set the first C-State to C1 */
311 /* all processors need to support C1 */
312 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
313 pr->power.states[ACPI_STATE_C1].valid = 1;
314 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
316 /* the C0 state only exists as a filler in our array */
317 pr->power.states[ACPI_STATE_C0].valid = 1;
318 return 0;
321 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
323 acpi_status status = 0;
324 acpi_integer count;
325 int current_count;
326 int i;
327 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
328 union acpi_object *cst;
331 if (nocst)
332 return -ENODEV;
334 current_count = 0;
336 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
337 if (ACPI_FAILURE(status)) {
338 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
339 return -ENODEV;
342 cst = buffer.pointer;
344 /* There must be at least 2 elements */
345 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
346 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
347 status = -EFAULT;
348 goto end;
351 count = cst->package.elements[0].integer.value;
353 /* Validate number of power states. */
354 if (count < 1 || count != cst->package.count - 1) {
355 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
356 status = -EFAULT;
357 goto end;
360 /* Tell driver that at least _CST is supported. */
361 pr->flags.has_cst = 1;
363 for (i = 1; i <= count; i++) {
364 union acpi_object *element;
365 union acpi_object *obj;
366 struct acpi_power_register *reg;
367 struct acpi_processor_cx cx;
369 memset(&cx, 0, sizeof(cx));
371 element = &(cst->package.elements[i]);
372 if (element->type != ACPI_TYPE_PACKAGE)
373 continue;
375 if (element->package.count != 4)
376 continue;
378 obj = &(element->package.elements[0]);
380 if (obj->type != ACPI_TYPE_BUFFER)
381 continue;
383 reg = (struct acpi_power_register *)obj->buffer.pointer;
385 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
386 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
387 continue;
389 /* There should be an easy way to extract an integer... */
390 obj = &(element->package.elements[1]);
391 if (obj->type != ACPI_TYPE_INTEGER)
392 continue;
394 cx.type = obj->integer.value;
396 * Some buggy BIOSes won't list C1 in _CST -
397 * Let acpi_processor_get_power_info_default() handle them later
399 if (i == 1 && cx.type != ACPI_STATE_C1)
400 current_count++;
402 cx.address = reg->address;
403 cx.index = current_count + 1;
405 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
406 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
407 if (acpi_processor_ffh_cstate_probe
408 (pr->id, &cx, reg) == 0) {
409 cx.entry_method = ACPI_CSTATE_FFH;
410 } else if (cx.type == ACPI_STATE_C1) {
412 * C1 is a special case where FIXED_HARDWARE
413 * can be handled in non-MWAIT way as well.
414 * In that case, save this _CST entry info.
415 * Otherwise, ignore this info and continue.
417 cx.entry_method = ACPI_CSTATE_HALT;
418 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
419 } else {
420 continue;
422 if (cx.type == ACPI_STATE_C1 &&
423 (idle_halt || idle_nomwait)) {
425 * In most cases the C1 space_id obtained from
426 * _CST object is FIXED_HARDWARE access mode.
427 * But when the option of idle=halt is added,
428 * the entry_method type should be changed from
429 * CSTATE_FFH to CSTATE_HALT.
430 * When the option of idle=nomwait is added,
431 * the C1 entry_method type should be
432 * CSTATE_HALT.
434 cx.entry_method = ACPI_CSTATE_HALT;
435 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
437 } else {
438 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
439 cx.address);
442 if (cx.type == ACPI_STATE_C1) {
443 cx.valid = 1;
446 obj = &(element->package.elements[2]);
447 if (obj->type != ACPI_TYPE_INTEGER)
448 continue;
450 cx.latency = obj->integer.value;
452 obj = &(element->package.elements[3]);
453 if (obj->type != ACPI_TYPE_INTEGER)
454 continue;
456 cx.power = obj->integer.value;
458 current_count++;
459 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
462 * We support total ACPI_PROCESSOR_MAX_POWER - 1
463 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
465 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
466 printk(KERN_WARNING
467 "Limiting number of power states to max (%d)\n",
468 ACPI_PROCESSOR_MAX_POWER);
469 printk(KERN_WARNING
470 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
471 break;
475 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
476 current_count));
478 /* Validate number of power states discovered */
479 if (current_count < 2)
480 status = -EFAULT;
482 end:
483 kfree(buffer.pointer);
485 return status;
488 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
491 if (!cx->address)
492 return;
495 * C2 latency must be less than or equal to 100
496 * microseconds.
498 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
499 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
500 "latency too large [%d]\n", cx->latency));
501 return;
505 * Otherwise we've met all of our C2 requirements.
506 * Normalize the C2 latency to expidite policy
508 cx->valid = 1;
510 cx->latency_ticks = cx->latency;
512 return;
515 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
516 struct acpi_processor_cx *cx)
518 static int bm_check_flag = -1;
519 static int bm_control_flag = -1;
522 if (!cx->address)
523 return;
526 * C3 latency must be less than or equal to 1000
527 * microseconds.
529 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
530 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
531 "latency too large [%d]\n", cx->latency));
532 return;
536 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
537 * DMA transfers are used by any ISA device to avoid livelock.
538 * Note that we could disable Type-F DMA (as recommended by
539 * the erratum), but this is known to disrupt certain ISA
540 * devices thus we take the conservative approach.
542 else if (errata.piix4.fdma) {
543 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
544 "C3 not supported on PIIX4 with Type-F DMA\n"));
545 return;
548 /* All the logic here assumes flags.bm_check is same across all CPUs */
549 if (bm_check_flag == -1) {
550 /* Determine whether bm_check is needed based on CPU */
551 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
552 bm_check_flag = pr->flags.bm_check;
553 bm_control_flag = pr->flags.bm_control;
554 } else {
555 pr->flags.bm_check = bm_check_flag;
556 pr->flags.bm_control = bm_control_flag;
559 if (pr->flags.bm_check) {
560 if (!pr->flags.bm_control) {
561 if (pr->flags.has_cst != 1) {
562 /* bus mastering control is necessary */
563 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
564 "C3 support requires BM control\n"));
565 return;
566 } else {
567 /* Here we enter C3 without bus mastering */
568 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
569 "C3 support without BM control\n"));
572 } else {
574 * WBINVD should be set in fadt, for C3 state to be
575 * supported on when bm_check is not required.
577 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
578 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
579 "Cache invalidation should work properly"
580 " for C3 to be enabled on SMP systems\n"));
581 return;
586 * Otherwise we've met all of our C3 requirements.
587 * Normalize the C3 latency to expidite policy. Enable
588 * checking of bus mastering status (bm_check) so we can
589 * use this in our C3 policy
591 cx->valid = 1;
593 cx->latency_ticks = cx->latency;
595 * On older chipsets, BM_RLD needs to be set
596 * in order for Bus Master activity to wake the
597 * system from C3. Newer chipsets handle DMA
598 * during C3 automatically and BM_RLD is a NOP.
599 * In either case, the proper way to
600 * handle BM_RLD is to set it and leave it set.
602 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
604 return;
607 static int acpi_processor_power_verify(struct acpi_processor *pr)
609 unsigned int i;
610 unsigned int working = 0;
612 pr->power.timer_broadcast_on_state = INT_MAX;
614 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
615 struct acpi_processor_cx *cx = &pr->power.states[i];
617 switch (cx->type) {
618 case ACPI_STATE_C1:
619 cx->valid = 1;
620 break;
622 case ACPI_STATE_C2:
623 acpi_processor_power_verify_c2(cx);
624 break;
626 case ACPI_STATE_C3:
627 acpi_processor_power_verify_c3(pr, cx);
628 break;
630 if (!cx->valid)
631 continue;
633 lapic_timer_check_state(i, pr, cx);
634 tsc_check_state(cx->type);
635 working++;
638 lapic_timer_propagate_broadcast(pr);
640 return (working);
643 static int acpi_processor_get_power_info(struct acpi_processor *pr)
645 unsigned int i;
646 int result;
649 /* NOTE: the idle thread may not be running while calling
650 * this function */
652 /* Zero initialize all the C-states info. */
653 memset(pr->power.states, 0, sizeof(pr->power.states));
655 result = acpi_processor_get_power_info_cst(pr);
656 if (result == -ENODEV)
657 result = acpi_processor_get_power_info_fadt(pr);
659 if (result)
660 return result;
662 acpi_processor_get_power_info_default(pr);
664 pr->power.count = acpi_processor_power_verify(pr);
667 * if one state of type C2 or C3 is available, mark this
668 * CPU as being "idle manageable"
670 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
671 if (pr->power.states[i].valid) {
672 pr->power.count = i;
673 if (pr->power.states[i].type >= ACPI_STATE_C2)
674 pr->flags.power = 1;
678 return 0;
681 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
683 struct acpi_processor *pr = seq->private;
684 unsigned int i;
687 if (!pr)
688 goto end;
690 seq_printf(seq, "active state: C%zd\n"
691 "max_cstate: C%d\n"
692 "maximum allowed latency: %d usec\n",
693 pr->power.state ? pr->power.state - pr->power.states : 0,
694 max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
696 seq_puts(seq, "states:\n");
698 for (i = 1; i <= pr->power.count; i++) {
699 seq_printf(seq, " %cC%d: ",
700 (&pr->power.states[i] ==
701 pr->power.state ? '*' : ' '), i);
703 if (!pr->power.states[i].valid) {
704 seq_puts(seq, "<not supported>\n");
705 continue;
708 switch (pr->power.states[i].type) {
709 case ACPI_STATE_C1:
710 seq_printf(seq, "type[C1] ");
711 break;
712 case ACPI_STATE_C2:
713 seq_printf(seq, "type[C2] ");
714 break;
715 case ACPI_STATE_C3:
716 seq_printf(seq, "type[C3] ");
717 break;
718 default:
719 seq_printf(seq, "type[--] ");
720 break;
723 if (pr->power.states[i].promotion.state)
724 seq_printf(seq, "promotion[C%zd] ",
725 (pr->power.states[i].promotion.state -
726 pr->power.states));
727 else
728 seq_puts(seq, "promotion[--] ");
730 if (pr->power.states[i].demotion.state)
731 seq_printf(seq, "demotion[C%zd] ",
732 (pr->power.states[i].demotion.state -
733 pr->power.states));
734 else
735 seq_puts(seq, "demotion[--] ");
737 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
738 pr->power.states[i].latency,
739 pr->power.states[i].usage,
740 (unsigned long long)pr->power.states[i].time);
743 end:
744 return 0;
747 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
749 return single_open(file, acpi_processor_power_seq_show,
750 PDE(inode)->data);
753 static const struct file_operations acpi_processor_power_fops = {
754 .owner = THIS_MODULE,
755 .open = acpi_processor_power_open_fs,
756 .read = seq_read,
757 .llseek = seq_lseek,
758 .release = single_release,
763 * acpi_idle_bm_check - checks if bus master activity was detected
765 static int acpi_idle_bm_check(void)
767 u32 bm_status = 0;
769 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
770 if (bm_status)
771 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
773 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
774 * the true state of bus mastering activity; forcing us to
775 * manually check the BMIDEA bit of each IDE channel.
777 else if (errata.piix4.bmisx) {
778 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
779 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
780 bm_status = 1;
782 return bm_status;
786 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
787 * @cx: cstate data
789 * Caller disables interrupt before call and enables interrupt after return.
791 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
793 /* Don't trace irqs off for idle */
794 stop_critical_timings();
795 if (cx->entry_method == ACPI_CSTATE_FFH) {
796 /* Call into architectural FFH based C-state */
797 acpi_processor_ffh_cstate_enter(cx);
798 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
799 acpi_safe_halt();
800 } else {
801 int unused;
802 /* IO port based C-state */
803 inb(cx->address);
804 /* Dummy wait op - must do something useless after P_LVL2 read
805 because chipsets cannot guarantee that STPCLK# signal
806 gets asserted in time to freeze execution properly. */
807 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
809 start_critical_timings();
813 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
814 * @dev: the target CPU
815 * @state: the state data
817 * This is equivalent to the HALT instruction.
819 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
820 struct cpuidle_state *state)
822 ktime_t kt1, kt2;
823 s64 idle_time;
824 struct acpi_processor *pr;
825 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
827 pr = __get_cpu_var(processors);
829 if (unlikely(!pr))
830 return 0;
832 local_irq_disable();
834 /* Do not access any ACPI IO ports in suspend path */
835 if (acpi_idle_suspend) {
836 local_irq_enable();
837 cpu_relax();
838 return 0;
841 lapic_timer_state_broadcast(pr, cx, 1);
842 kt1 = ktime_get_real();
843 acpi_idle_do_entry(cx);
844 kt2 = ktime_get_real();
845 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
847 local_irq_enable();
848 cx->usage++;
849 lapic_timer_state_broadcast(pr, cx, 0);
851 return idle_time;
855 * acpi_idle_enter_simple - enters an ACPI state without BM handling
856 * @dev: the target CPU
857 * @state: the state data
859 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
860 struct cpuidle_state *state)
862 struct acpi_processor *pr;
863 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
864 ktime_t kt1, kt2;
865 s64 idle_time;
866 s64 sleep_ticks = 0;
868 pr = __get_cpu_var(processors);
870 if (unlikely(!pr))
871 return 0;
873 if (acpi_idle_suspend)
874 return(acpi_idle_enter_c1(dev, state));
876 local_irq_disable();
877 current_thread_info()->status &= ~TS_POLLING;
879 * TS_POLLING-cleared state must be visible before we test
880 * NEED_RESCHED:
882 smp_mb();
884 if (unlikely(need_resched())) {
885 current_thread_info()->status |= TS_POLLING;
886 local_irq_enable();
887 return 0;
891 * Must be done before busmaster disable as we might need to
892 * access HPET !
894 lapic_timer_state_broadcast(pr, cx, 1);
896 if (cx->type == ACPI_STATE_C3)
897 ACPI_FLUSH_CPU_CACHE();
899 kt1 = ktime_get_real();
900 /* Tell the scheduler that we are going deep-idle: */
901 sched_clock_idle_sleep_event();
902 acpi_idle_do_entry(cx);
903 kt2 = ktime_get_real();
904 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
906 sleep_ticks = us_to_pm_timer_ticks(idle_time);
908 /* Tell the scheduler how much we idled: */
909 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
911 local_irq_enable();
912 current_thread_info()->status |= TS_POLLING;
914 cx->usage++;
916 lapic_timer_state_broadcast(pr, cx, 0);
917 cx->time += sleep_ticks;
918 return idle_time;
921 static int c3_cpu_count;
922 static DEFINE_SPINLOCK(c3_lock);
925 * acpi_idle_enter_bm - enters C3 with proper BM handling
926 * @dev: the target CPU
927 * @state: the state data
929 * If BM is detected, the deepest non-C3 idle state is entered instead.
931 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
932 struct cpuidle_state *state)
934 struct acpi_processor *pr;
935 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
936 ktime_t kt1, kt2;
937 s64 idle_time;
938 s64 sleep_ticks = 0;
941 pr = __get_cpu_var(processors);
943 if (unlikely(!pr))
944 return 0;
946 if (acpi_idle_suspend)
947 return(acpi_idle_enter_c1(dev, state));
949 if (acpi_idle_bm_check()) {
950 if (dev->safe_state) {
951 dev->last_state = dev->safe_state;
952 return dev->safe_state->enter(dev, dev->safe_state);
953 } else {
954 local_irq_disable();
955 acpi_safe_halt();
956 local_irq_enable();
957 return 0;
961 local_irq_disable();
962 current_thread_info()->status &= ~TS_POLLING;
964 * TS_POLLING-cleared state must be visible before we test
965 * NEED_RESCHED:
967 smp_mb();
969 if (unlikely(need_resched())) {
970 current_thread_info()->status |= TS_POLLING;
971 local_irq_enable();
972 return 0;
975 acpi_unlazy_tlb(smp_processor_id());
977 /* Tell the scheduler that we are going deep-idle: */
978 sched_clock_idle_sleep_event();
980 * Must be done before busmaster disable as we might need to
981 * access HPET !
983 lapic_timer_state_broadcast(pr, cx, 1);
985 kt1 = ktime_get_real();
987 * disable bus master
988 * bm_check implies we need ARB_DIS
989 * !bm_check implies we need cache flush
990 * bm_control implies whether we can do ARB_DIS
992 * That leaves a case where bm_check is set and bm_control is
993 * not set. In that case we cannot do much, we enter C3
994 * without doing anything.
996 if (pr->flags.bm_check && pr->flags.bm_control) {
997 spin_lock(&c3_lock);
998 c3_cpu_count++;
999 /* Disable bus master arbitration when all CPUs are in C3 */
1000 if (c3_cpu_count == num_online_cpus())
1001 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
1002 spin_unlock(&c3_lock);
1003 } else if (!pr->flags.bm_check) {
1004 ACPI_FLUSH_CPU_CACHE();
1007 acpi_idle_do_entry(cx);
1009 /* Re-enable bus master arbitration */
1010 if (pr->flags.bm_check && pr->flags.bm_control) {
1011 spin_lock(&c3_lock);
1012 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
1013 c3_cpu_count--;
1014 spin_unlock(&c3_lock);
1016 kt2 = ktime_get_real();
1017 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
1019 sleep_ticks = us_to_pm_timer_ticks(idle_time);
1020 /* Tell the scheduler how much we idled: */
1021 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1023 local_irq_enable();
1024 current_thread_info()->status |= TS_POLLING;
1026 cx->usage++;
1028 lapic_timer_state_broadcast(pr, cx, 0);
1029 cx->time += sleep_ticks;
1030 return idle_time;
1033 struct cpuidle_driver acpi_idle_driver = {
1034 .name = "acpi_idle",
1035 .owner = THIS_MODULE,
1039 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1040 * @pr: the ACPI processor
1042 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1044 int i, count = CPUIDLE_DRIVER_STATE_START;
1045 struct acpi_processor_cx *cx;
1046 struct cpuidle_state *state;
1047 struct cpuidle_device *dev = &pr->power.dev;
1049 if (!pr->flags.power_setup_done)
1050 return -EINVAL;
1052 if (pr->flags.power == 0) {
1053 return -EINVAL;
1056 dev->cpu = pr->id;
1057 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1058 dev->states[i].name[0] = '\0';
1059 dev->states[i].desc[0] = '\0';
1062 if (max_cstate == 0)
1063 max_cstate = 1;
1065 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1066 cx = &pr->power.states[i];
1067 state = &dev->states[count];
1069 if (!cx->valid)
1070 continue;
1072 #ifdef CONFIG_HOTPLUG_CPU
1073 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1074 !pr->flags.has_cst &&
1075 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1076 continue;
1077 #endif
1078 cpuidle_set_statedata(state, cx);
1080 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1081 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1082 state->exit_latency = cx->latency;
1083 state->target_residency = cx->latency * latency_factor;
1084 state->power_usage = cx->power;
1086 state->flags = 0;
1087 switch (cx->type) {
1088 case ACPI_STATE_C1:
1089 state->flags |= CPUIDLE_FLAG_SHALLOW;
1090 if (cx->entry_method == ACPI_CSTATE_FFH)
1091 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1093 state->enter = acpi_idle_enter_c1;
1094 dev->safe_state = state;
1095 break;
1097 case ACPI_STATE_C2:
1098 state->flags |= CPUIDLE_FLAG_BALANCED;
1099 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1100 state->enter = acpi_idle_enter_simple;
1101 dev->safe_state = state;
1102 break;
1104 case ACPI_STATE_C3:
1105 state->flags |= CPUIDLE_FLAG_DEEP;
1106 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1107 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1108 state->enter = pr->flags.bm_check ?
1109 acpi_idle_enter_bm :
1110 acpi_idle_enter_simple;
1111 break;
1114 count++;
1115 if (count == CPUIDLE_STATE_MAX)
1116 break;
1119 dev->state_count = count;
1121 if (!count)
1122 return -EINVAL;
1124 return 0;
1127 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1129 int ret = 0;
1131 if (boot_option_idle_override)
1132 return 0;
1134 if (!pr)
1135 return -EINVAL;
1137 if (nocst) {
1138 return -ENODEV;
1141 if (!pr->flags.power_setup_done)
1142 return -ENODEV;
1144 cpuidle_pause_and_lock();
1145 cpuidle_disable_device(&pr->power.dev);
1146 acpi_processor_get_power_info(pr);
1147 if (pr->flags.power) {
1148 acpi_processor_setup_cpuidle(pr);
1149 ret = cpuidle_enable_device(&pr->power.dev);
1151 cpuidle_resume_and_unlock();
1153 return ret;
1156 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1157 struct acpi_device *device)
1159 acpi_status status = 0;
1160 static int first_run;
1161 struct proc_dir_entry *entry = NULL;
1162 unsigned int i;
1164 if (boot_option_idle_override)
1165 return 0;
1167 if (!first_run) {
1168 if (idle_halt) {
1170 * When the boot option of "idle=halt" is added, halt
1171 * is used for CPU IDLE.
1172 * In such case C2/C3 is meaningless. So the max_cstate
1173 * is set to one.
1175 max_cstate = 1;
1177 dmi_check_system(processor_power_dmi_table);
1178 max_cstate = acpi_processor_cstate_check(max_cstate);
1179 if (max_cstate < ACPI_C_STATES_MAX)
1180 printk(KERN_NOTICE
1181 "ACPI: processor limited to max C-state %d\n",
1182 max_cstate);
1183 first_run++;
1186 if (!pr)
1187 return -EINVAL;
1189 if (acpi_gbl_FADT.cst_control && !nocst) {
1190 status =
1191 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1192 if (ACPI_FAILURE(status)) {
1193 ACPI_EXCEPTION((AE_INFO, status,
1194 "Notifying BIOS of _CST ability failed"));
1198 acpi_processor_get_power_info(pr);
1199 pr->flags.power_setup_done = 1;
1202 * Install the idle handler if processor power management is supported.
1203 * Note that we use previously set idle handler will be used on
1204 * platforms that only support C1.
1206 if (pr->flags.power) {
1207 acpi_processor_setup_cpuidle(pr);
1208 if (cpuidle_register_device(&pr->power.dev))
1209 return -EIO;
1211 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1212 for (i = 1; i <= pr->power.count; i++)
1213 if (pr->power.states[i].valid)
1214 printk(" C%d[C%d]", i,
1215 pr->power.states[i].type);
1216 printk(")\n");
1219 /* 'power' [R] */
1220 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1221 S_IRUGO, acpi_device_dir(device),
1222 &acpi_processor_power_fops,
1223 acpi_driver_data(device));
1224 if (!entry)
1225 return -EIO;
1226 return 0;
1229 int acpi_processor_power_exit(struct acpi_processor *pr,
1230 struct acpi_device *device)
1232 if (boot_option_idle_override)
1233 return 0;
1235 cpuidle_unregister_device(&pr->power.dev);
1236 pr->flags.power_setup_done = 0;
1238 if (acpi_device_dir(device))
1239 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1240 acpi_device_dir(device));
1242 return 0;