ACPI : Update T-state coordination after getting _TSD info
[linux-2.6/btrfs-unstable.git] / drivers / acpi / processor_throttling.c
blobd6780f41d28c5f4cdb703a198b2e3d4b28528296
1 /*
2 * processor_throttling.c - Throttling submodule of the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
37 #include <asm/io.h>
38 #include <asm/uaccess.h>
40 #include <acpi/acpi_bus.h>
41 #include <acpi/processor.h>
43 #define ACPI_PROCESSOR_COMPONENT 0x01000000
44 #define ACPI_PROCESSOR_CLASS "processor"
45 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
46 ACPI_MODULE_NAME("processor_throttling");
48 static int acpi_processor_get_throttling(struct acpi_processor *pr);
49 int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
51 static int acpi_processor_update_tsd_coord(void)
53 int count, count_target;
54 int retval = 0;
55 unsigned int i, j;
56 cpumask_t covered_cpus;
57 struct acpi_processor *pr, *match_pr;
58 struct acpi_tsd_package *pdomain, *match_pdomain;
59 struct acpi_processor_throttling *pthrottling, *match_pthrottling;
62 * Now that we have _TSD data from all CPUs, lets setup T-state
63 * coordination among all CPUs.
65 for_each_possible_cpu(i) {
66 pr = processors[i];
67 if (!pr)
68 continue;
70 /* Basic validity check for domain info */
71 pthrottling = &(pr->throttling);
74 * If tsd package for one cpu is invalid, the coordination
75 * among all CPUs is thought as invalid.
76 * Maybe it is ugly.
78 if (!pthrottling->tsd_valid_flag) {
79 retval = -EINVAL;
80 break;
83 if (retval)
84 goto err_ret;
86 cpus_clear(covered_cpus);
87 for_each_possible_cpu(i) {
88 pr = processors[i];
89 if (!pr)
90 continue;
92 if (cpu_isset(i, covered_cpus))
93 continue;
94 pthrottling = &pr->throttling;
96 pdomain = &(pthrottling->domain_info);
97 cpu_set(i, pthrottling->shared_cpu_map);
98 cpu_set(i, covered_cpus);
100 * If the number of processor in the TSD domain is 1, it is
101 * unnecessary to parse the coordination for this CPU.
103 if (pdomain->num_processors <= 1)
104 continue;
106 /* Validate the Domain info */
107 count_target = pdomain->num_processors;
108 count = 1;
110 for_each_possible_cpu(j) {
111 if (i == j)
112 continue;
114 match_pr = processors[j];
115 if (!match_pr)
116 continue;
118 match_pthrottling = &(match_pr->throttling);
119 match_pdomain = &(match_pthrottling->domain_info);
120 if (match_pdomain->domain != pdomain->domain)
121 continue;
123 /* Here i and j are in the same domain.
124 * If two TSD packages have the same domain, they
125 * should have the same num_porcessors and
126 * coordination type. Otherwise it will be regarded
127 * as illegal.
129 if (match_pdomain->num_processors != count_target) {
130 retval = -EINVAL;
131 goto err_ret;
134 if (pdomain->coord_type != match_pdomain->coord_type) {
135 retval = -EINVAL;
136 goto err_ret;
139 cpu_set(j, covered_cpus);
140 cpu_set(j, pthrottling->shared_cpu_map);
141 count++;
143 for_each_possible_cpu(j) {
144 if (i == j)
145 continue;
147 match_pr = processors[j];
148 if (!match_pr)
149 continue;
151 match_pthrottling = &(match_pr->throttling);
152 match_pdomain = &(match_pthrottling->domain_info);
153 if (match_pdomain->domain != pdomain->domain)
154 continue;
157 * If some CPUS have the same domain, they
158 * will have the same shared_cpu_map.
160 match_pthrottling->shared_cpu_map =
161 pthrottling->shared_cpu_map;
165 err_ret:
166 for_each_possible_cpu(i) {
167 pr = processors[i];
168 if (!pr)
169 continue;
172 * Assume no coordination on any error parsing domain info.
173 * The coordination type will be forced as SW_ALL.
175 if (retval) {
176 pthrottling = &(pr->throttling);
177 cpus_clear(pthrottling->shared_cpu_map);
178 cpu_set(i, pthrottling->shared_cpu_map);
179 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
183 return retval;
187 * Update the T-state coordination after the _TSD
188 * data for all cpus is obtained.
190 void acpi_processor_throttling_init(void)
192 if (acpi_processor_update_tsd_coord())
193 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
194 "Assume no T-state coordination\n"));
196 return;
200 * _TPC - Throttling Present Capabilities
202 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
204 acpi_status status = 0;
205 unsigned long tpc = 0;
207 if (!pr)
208 return -EINVAL;
209 status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
210 if (ACPI_FAILURE(status)) {
211 if (status != AE_NOT_FOUND) {
212 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
214 return -ENODEV;
216 pr->throttling_platform_limit = (int)tpc;
217 return 0;
220 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
222 int result = 0;
223 int throttling_limit;
224 int current_state;
225 struct acpi_processor_limit *limit;
226 int target_state;
228 result = acpi_processor_get_platform_limit(pr);
229 if (result) {
230 /* Throttling Limit is unsupported */
231 return result;
234 throttling_limit = pr->throttling_platform_limit;
235 if (throttling_limit >= pr->throttling.state_count) {
236 /* Uncorrect Throttling Limit */
237 return -EINVAL;
240 current_state = pr->throttling.state;
241 if (current_state > throttling_limit) {
243 * The current state can meet the requirement of
244 * _TPC limit. But it is reasonable that OSPM changes
245 * t-states from high to low for better performance.
246 * Of course the limit condition of thermal
247 * and user should be considered.
249 limit = &pr->limit;
250 target_state = throttling_limit;
251 if (limit->thermal.tx > target_state)
252 target_state = limit->thermal.tx;
253 if (limit->user.tx > target_state)
254 target_state = limit->user.tx;
255 } else if (current_state == throttling_limit) {
257 * Unnecessary to change the throttling state
259 return 0;
260 } else {
262 * If the current state is lower than the limit of _TPC, it
263 * will be forced to switch to the throttling state defined
264 * by throttling_platfor_limit.
265 * Because the previous state meets with the limit condition
266 * of thermal and user, it is unnecessary to check it again.
268 target_state = throttling_limit;
270 return acpi_processor_set_throttling(pr, target_state);
274 * _PTC - Processor Throttling Control (and status) register location
276 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
278 int result = 0;
279 acpi_status status = 0;
280 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
281 union acpi_object *ptc = NULL;
282 union acpi_object obj = { 0 };
283 struct acpi_processor_throttling *throttling;
285 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
286 if (ACPI_FAILURE(status)) {
287 if (status != AE_NOT_FOUND) {
288 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
290 return -ENODEV;
293 ptc = (union acpi_object *)buffer.pointer;
294 if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
295 || (ptc->package.count != 2)) {
296 printk(KERN_ERR PREFIX "Invalid _PTC data\n");
297 result = -EFAULT;
298 goto end;
302 * control_register
305 obj = ptc->package.elements[0];
307 if ((obj.type != ACPI_TYPE_BUFFER)
308 || (obj.buffer.length < sizeof(struct acpi_ptc_register))
309 || (obj.buffer.pointer == NULL)) {
310 printk(KERN_ERR PREFIX
311 "Invalid _PTC data (control_register)\n");
312 result = -EFAULT;
313 goto end;
315 memcpy(&pr->throttling.control_register, obj.buffer.pointer,
316 sizeof(struct acpi_ptc_register));
319 * status_register
322 obj = ptc->package.elements[1];
324 if ((obj.type != ACPI_TYPE_BUFFER)
325 || (obj.buffer.length < sizeof(struct acpi_ptc_register))
326 || (obj.buffer.pointer == NULL)) {
327 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
328 result = -EFAULT;
329 goto end;
332 memcpy(&pr->throttling.status_register, obj.buffer.pointer,
333 sizeof(struct acpi_ptc_register));
335 throttling = &pr->throttling;
337 if ((throttling->control_register.bit_width +
338 throttling->control_register.bit_offset) > 32) {
339 printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
340 result = -EFAULT;
341 goto end;
344 if ((throttling->status_register.bit_width +
345 throttling->status_register.bit_offset) > 32) {
346 printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
347 result = -EFAULT;
348 goto end;
351 end:
352 kfree(buffer.pointer);
354 return result;
358 * _TSS - Throttling Supported States
360 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
362 int result = 0;
363 acpi_status status = AE_OK;
364 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
365 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
366 struct acpi_buffer state = { 0, NULL };
367 union acpi_object *tss = NULL;
368 int i;
370 status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
371 if (ACPI_FAILURE(status)) {
372 if (status != AE_NOT_FOUND) {
373 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
375 return -ENODEV;
378 tss = buffer.pointer;
379 if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
380 printk(KERN_ERR PREFIX "Invalid _TSS data\n");
381 result = -EFAULT;
382 goto end;
385 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
386 tss->package.count));
388 pr->throttling.state_count = tss->package.count;
389 pr->throttling.states_tss =
390 kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
391 GFP_KERNEL);
392 if (!pr->throttling.states_tss) {
393 result = -ENOMEM;
394 goto end;
397 for (i = 0; i < pr->throttling.state_count; i++) {
399 struct acpi_processor_tx_tss *tx =
400 (struct acpi_processor_tx_tss *)&(pr->throttling.
401 states_tss[i]);
403 state.length = sizeof(struct acpi_processor_tx_tss);
404 state.pointer = tx;
406 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
408 status = acpi_extract_package(&(tss->package.elements[i]),
409 &format, &state);
410 if (ACPI_FAILURE(status)) {
411 ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
412 result = -EFAULT;
413 kfree(pr->throttling.states_tss);
414 goto end;
417 if (!tx->freqpercentage) {
418 printk(KERN_ERR PREFIX
419 "Invalid _TSS data: freq is zero\n");
420 result = -EFAULT;
421 kfree(pr->throttling.states_tss);
422 goto end;
426 end:
427 kfree(buffer.pointer);
429 return result;
433 * _TSD - T-State Dependencies
435 static int acpi_processor_get_tsd(struct acpi_processor *pr)
437 int result = 0;
438 acpi_status status = AE_OK;
439 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
440 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
441 struct acpi_buffer state = { 0, NULL };
442 union acpi_object *tsd = NULL;
443 struct acpi_tsd_package *pdomain;
444 struct acpi_processor_throttling *pthrottling;
446 pthrottling = &pr->throttling;
447 pthrottling->tsd_valid_flag = 0;
449 status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
450 if (ACPI_FAILURE(status)) {
451 if (status != AE_NOT_FOUND) {
452 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
454 return -ENODEV;
457 tsd = buffer.pointer;
458 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
459 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
460 result = -EFAULT;
461 goto end;
464 if (tsd->package.count != 1) {
465 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
466 result = -EFAULT;
467 goto end;
470 pdomain = &(pr->throttling.domain_info);
472 state.length = sizeof(struct acpi_tsd_package);
473 state.pointer = pdomain;
475 status = acpi_extract_package(&(tsd->package.elements[0]),
476 &format, &state);
477 if (ACPI_FAILURE(status)) {
478 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
479 result = -EFAULT;
480 goto end;
483 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
484 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
485 result = -EFAULT;
486 goto end;
489 if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
490 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
491 result = -EFAULT;
492 goto end;
495 pthrottling = &pr->throttling;
496 pthrottling->tsd_valid_flag = 1;
497 pthrottling->shared_type = pdomain->coord_type;
498 cpu_set(pr->id, pthrottling->shared_cpu_map);
500 * If the coordination type is not defined in ACPI spec,
501 * the tsd_valid_flag will be clear and coordination type
502 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
504 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
505 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
506 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
507 pthrottling->tsd_valid_flag = 0;
508 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
511 end:
512 kfree(buffer.pointer);
513 return result;
516 /* --------------------------------------------------------------------------
517 Throttling Control
518 -------------------------------------------------------------------------- */
519 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
521 int state = 0;
522 u32 value = 0;
523 u32 duty_mask = 0;
524 u32 duty_value = 0;
526 if (!pr)
527 return -EINVAL;
529 if (!pr->flags.throttling)
530 return -ENODEV;
532 pr->throttling.state = 0;
534 duty_mask = pr->throttling.state_count - 1;
536 duty_mask <<= pr->throttling.duty_offset;
538 local_irq_disable();
540 value = inl(pr->throttling.address);
543 * Compute the current throttling state when throttling is enabled
544 * (bit 4 is on).
546 if (value & 0x10) {
547 duty_value = value & duty_mask;
548 duty_value >>= pr->throttling.duty_offset;
550 if (duty_value)
551 state = pr->throttling.state_count - duty_value;
554 pr->throttling.state = state;
556 local_irq_enable();
558 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
559 "Throttling state is T%d (%d%% throttling applied)\n",
560 state, pr->throttling.states[state].performance));
562 return 0;
565 #ifdef CONFIG_X86
566 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
567 acpi_integer * value)
569 struct cpuinfo_x86 *c;
570 u64 msr_high, msr_low;
571 unsigned int cpu;
572 u64 msr = 0;
573 int ret = -1;
575 cpu = pr->id;
576 c = &cpu_data(cpu);
578 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
579 !cpu_has(c, X86_FEATURE_ACPI)) {
580 printk(KERN_ERR PREFIX
581 "HARDWARE addr space,NOT supported yet\n");
582 } else {
583 msr_low = 0;
584 msr_high = 0;
585 rdmsr_safe(MSR_IA32_THERM_CONTROL,
586 (u32 *)&msr_low , (u32 *) &msr_high);
587 msr = (msr_high << 32) | msr_low;
588 *value = (acpi_integer) msr;
589 ret = 0;
591 return ret;
594 static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
596 struct cpuinfo_x86 *c;
597 unsigned int cpu;
598 int ret = -1;
599 u64 msr;
601 cpu = pr->id;
602 c = &cpu_data(cpu);
604 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
605 !cpu_has(c, X86_FEATURE_ACPI)) {
606 printk(KERN_ERR PREFIX
607 "HARDWARE addr space,NOT supported yet\n");
608 } else {
609 msr = value;
610 wrmsr_safe(MSR_IA32_THERM_CONTROL,
611 msr & 0xffffffff, msr >> 32);
612 ret = 0;
614 return ret;
616 #else
617 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
618 acpi_integer * value)
620 printk(KERN_ERR PREFIX
621 "HARDWARE addr space,NOT supported yet\n");
622 return -1;
625 static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
627 printk(KERN_ERR PREFIX
628 "HARDWARE addr space,NOT supported yet\n");
629 return -1;
631 #endif
633 static int acpi_read_throttling_status(struct acpi_processor *pr,
634 acpi_integer *value)
636 u32 bit_width, bit_offset;
637 u64 ptc_value;
638 u64 ptc_mask;
639 struct acpi_processor_throttling *throttling;
640 int ret = -1;
642 throttling = &pr->throttling;
643 switch (throttling->status_register.space_id) {
644 case ACPI_ADR_SPACE_SYSTEM_IO:
645 ptc_value = 0;
646 bit_width = throttling->status_register.bit_width;
647 bit_offset = throttling->status_register.bit_offset;
649 acpi_os_read_port((acpi_io_address) throttling->status_register.
650 address, (u32 *) &ptc_value,
651 (u32) (bit_width + bit_offset));
652 ptc_mask = (1 << bit_width) - 1;
653 *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
654 ret = 0;
655 break;
656 case ACPI_ADR_SPACE_FIXED_HARDWARE:
657 ret = acpi_throttling_rdmsr(pr, value);
658 break;
659 default:
660 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
661 (u32) (throttling->status_register.space_id));
663 return ret;
666 static int acpi_write_throttling_state(struct acpi_processor *pr,
667 acpi_integer value)
669 u32 bit_width, bit_offset;
670 u64 ptc_value;
671 u64 ptc_mask;
672 struct acpi_processor_throttling *throttling;
673 int ret = -1;
675 throttling = &pr->throttling;
676 switch (throttling->control_register.space_id) {
677 case ACPI_ADR_SPACE_SYSTEM_IO:
678 bit_width = throttling->control_register.bit_width;
679 bit_offset = throttling->control_register.bit_offset;
680 ptc_mask = (1 << bit_width) - 1;
681 ptc_value = value & ptc_mask;
683 acpi_os_write_port((acpi_io_address) throttling->
684 control_register.address,
685 (u32) (ptc_value << bit_offset),
686 (u32) (bit_width + bit_offset));
687 ret = 0;
688 break;
689 case ACPI_ADR_SPACE_FIXED_HARDWARE:
690 ret = acpi_throttling_wrmsr(pr, value);
691 break;
692 default:
693 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
694 (u32) (throttling->control_register.space_id));
696 return ret;
699 static int acpi_get_throttling_state(struct acpi_processor *pr,
700 acpi_integer value)
702 int i;
704 for (i = 0; i < pr->throttling.state_count; i++) {
705 struct acpi_processor_tx_tss *tx =
706 (struct acpi_processor_tx_tss *)&(pr->throttling.
707 states_tss[i]);
708 if (tx->control == value)
709 break;
711 if (i > pr->throttling.state_count)
712 i = -1;
713 return i;
716 static int acpi_get_throttling_value(struct acpi_processor *pr,
717 int state, acpi_integer *value)
719 int ret = -1;
721 if (state >= 0 && state <= pr->throttling.state_count) {
722 struct acpi_processor_tx_tss *tx =
723 (struct acpi_processor_tx_tss *)&(pr->throttling.
724 states_tss[state]);
725 *value = tx->control;
726 ret = 0;
728 return ret;
731 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
733 int state = 0;
734 int ret;
735 acpi_integer value;
737 if (!pr)
738 return -EINVAL;
740 if (!pr->flags.throttling)
741 return -ENODEV;
743 pr->throttling.state = 0;
745 value = 0;
746 ret = acpi_read_throttling_status(pr, &value);
747 if (ret >= 0) {
748 state = acpi_get_throttling_state(pr, value);
749 pr->throttling.state = state;
752 return 0;
755 static int acpi_processor_get_throttling(struct acpi_processor *pr)
757 cpumask_t saved_mask;
758 int ret;
760 if (!pr)
761 return -EINVAL;
763 if (!pr->flags.throttling)
764 return -ENODEV;
766 * Migrate task to the cpu pointed by pr.
768 saved_mask = current->cpus_allowed;
769 set_cpus_allowed(current, cpumask_of_cpu(pr->id));
770 ret = pr->throttling.acpi_processor_get_throttling(pr);
771 /* restore the previous state */
772 set_cpus_allowed(current, saved_mask);
774 return ret;
777 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
779 int i, step;
781 if (!pr->throttling.address) {
782 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
783 return -EINVAL;
784 } else if (!pr->throttling.duty_width) {
785 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
786 return -EINVAL;
788 /* TBD: Support duty_cycle values that span bit 4. */
789 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
790 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
791 return -EINVAL;
794 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
797 * Compute state values. Note that throttling displays a linear power
798 * performance relationship (at 50% performance the CPU will consume
799 * 50% power). Values are in 1/10th of a percent to preserve accuracy.
802 step = (1000 / pr->throttling.state_count);
804 for (i = 0; i < pr->throttling.state_count; i++) {
805 pr->throttling.states[i].performance = 1000 - step * i;
806 pr->throttling.states[i].power = 1000 - step * i;
808 return 0;
811 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
812 int state)
814 u32 value = 0;
815 u32 duty_mask = 0;
816 u32 duty_value = 0;
818 if (!pr)
819 return -EINVAL;
821 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
822 return -EINVAL;
824 if (!pr->flags.throttling)
825 return -ENODEV;
827 if (state == pr->throttling.state)
828 return 0;
830 if (state < pr->throttling_platform_limit)
831 return -EPERM;
833 * Calculate the duty_value and duty_mask.
835 if (state) {
836 duty_value = pr->throttling.state_count - state;
838 duty_value <<= pr->throttling.duty_offset;
840 /* Used to clear all duty_value bits */
841 duty_mask = pr->throttling.state_count - 1;
843 duty_mask <<= acpi_gbl_FADT.duty_offset;
844 duty_mask = ~duty_mask;
847 local_irq_disable();
850 * Disable throttling by writing a 0 to bit 4. Note that we must
851 * turn it off before you can change the duty_value.
853 value = inl(pr->throttling.address);
854 if (value & 0x10) {
855 value &= 0xFFFFFFEF;
856 outl(value, pr->throttling.address);
860 * Write the new duty_value and then enable throttling. Note
861 * that a state value of 0 leaves throttling disabled.
863 if (state) {
864 value &= duty_mask;
865 value |= duty_value;
866 outl(value, pr->throttling.address);
868 value |= 0x00000010;
869 outl(value, pr->throttling.address);
872 pr->throttling.state = state;
874 local_irq_enable();
876 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
877 "Throttling state set to T%d (%d%%)\n", state,
878 (pr->throttling.states[state].performance ? pr->
879 throttling.states[state].performance / 10 : 0)));
881 return 0;
884 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
885 int state)
887 int ret;
888 acpi_integer value;
890 if (!pr)
891 return -EINVAL;
893 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
894 return -EINVAL;
896 if (!pr->flags.throttling)
897 return -ENODEV;
899 if (state == pr->throttling.state)
900 return 0;
902 if (state < pr->throttling_platform_limit)
903 return -EPERM;
905 value = 0;
906 ret = acpi_get_throttling_value(pr, state, &value);
907 if (ret >= 0) {
908 acpi_write_throttling_state(pr, value);
909 pr->throttling.state = state;
912 return 0;
915 int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
917 cpumask_t saved_mask;
918 int ret;
920 if (!pr)
921 return -EINVAL;
923 if (!pr->flags.throttling)
924 return -ENODEV;
926 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
927 return -EINVAL;
930 * Migrate task to the cpu pointed by pr.
932 saved_mask = current->cpus_allowed;
933 set_cpus_allowed(current, cpumask_of_cpu(pr->id));
934 ret = pr->throttling.acpi_processor_set_throttling(pr, state);
935 /* restore the previous state */
936 set_cpus_allowed(current, saved_mask);
937 return ret;
940 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
942 int result = 0;
943 struct acpi_processor_throttling *pthrottling;
945 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
946 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
947 pr->throttling.address,
948 pr->throttling.duty_offset,
949 pr->throttling.duty_width));
951 if (!pr)
952 return -EINVAL;
955 * Evaluate _PTC, _TSS and _TPC
956 * They must all be present or none of them can be used.
958 if (acpi_processor_get_throttling_control(pr) ||
959 acpi_processor_get_throttling_states(pr) ||
960 acpi_processor_get_platform_limit(pr))
962 pr->throttling.acpi_processor_get_throttling =
963 &acpi_processor_get_throttling_fadt;
964 pr->throttling.acpi_processor_set_throttling =
965 &acpi_processor_set_throttling_fadt;
966 if (acpi_processor_get_fadt_info(pr))
967 return 0;
968 } else {
969 pr->throttling.acpi_processor_get_throttling =
970 &acpi_processor_get_throttling_ptc;
971 pr->throttling.acpi_processor_set_throttling =
972 &acpi_processor_set_throttling_ptc;
976 * If TSD package for one CPU can't be parsed successfully, it means
977 * that this CPU will have no coordination with other CPUs.
979 if (acpi_processor_get_tsd(pr)) {
980 pthrottling = &pr->throttling;
981 pthrottling->tsd_valid_flag = 0;
982 cpu_set(pr->id, pthrottling->shared_cpu_map);
983 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
987 * PIIX4 Errata: We don't support throttling on the original PIIX4.
988 * This shouldn't be an issue as few (if any) mobile systems ever
989 * used this part.
991 if (errata.piix4.throttle) {
992 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
993 "Throttling not supported on PIIX4 A- or B-step\n"));
994 return 0;
997 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
998 pr->throttling.state_count));
1000 pr->flags.throttling = 1;
1003 * Disable throttling (if enabled). We'll let subsequent policy (e.g.
1004 * thermal) decide to lower performance if it so chooses, but for now
1005 * we'll crank up the speed.
1008 result = acpi_processor_get_throttling(pr);
1009 if (result)
1010 goto end;
1012 if (pr->throttling.state) {
1013 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1014 "Disabling throttling (was T%d)\n",
1015 pr->throttling.state));
1016 result = acpi_processor_set_throttling(pr, 0);
1017 if (result)
1018 goto end;
1021 end:
1022 if (result)
1023 pr->flags.throttling = 0;
1025 return result;
1028 /* proc interface */
1030 static int acpi_processor_throttling_seq_show(struct seq_file *seq,
1031 void *offset)
1033 struct acpi_processor *pr = seq->private;
1034 int i = 0;
1035 int result = 0;
1037 if (!pr)
1038 goto end;
1040 if (!(pr->throttling.state_count > 0)) {
1041 seq_puts(seq, "<not supported>\n");
1042 goto end;
1045 result = acpi_processor_get_throttling(pr);
1047 if (result) {
1048 seq_puts(seq,
1049 "Could not determine current throttling state.\n");
1050 goto end;
1053 seq_printf(seq, "state count: %d\n"
1054 "active state: T%d\n"
1055 "state available: T%d to T%d\n",
1056 pr->throttling.state_count, pr->throttling.state,
1057 pr->throttling_platform_limit,
1058 pr->throttling.state_count - 1);
1060 seq_puts(seq, "states:\n");
1061 if (pr->throttling.acpi_processor_get_throttling ==
1062 acpi_processor_get_throttling_fadt) {
1063 for (i = 0; i < pr->throttling.state_count; i++)
1064 seq_printf(seq, " %cT%d: %02d%%\n",
1065 (i == pr->throttling.state ? '*' : ' '), i,
1066 (pr->throttling.states[i].performance ? pr->
1067 throttling.states[i].performance / 10 : 0));
1068 } else {
1069 for (i = 0; i < pr->throttling.state_count; i++)
1070 seq_printf(seq, " %cT%d: %02d%%\n",
1071 (i == pr->throttling.state ? '*' : ' '), i,
1072 (int)pr->throttling.states_tss[i].
1073 freqpercentage);
1076 end:
1077 return 0;
1080 static int acpi_processor_throttling_open_fs(struct inode *inode,
1081 struct file *file)
1083 return single_open(file, acpi_processor_throttling_seq_show,
1084 PDE(inode)->data);
1087 static ssize_t acpi_processor_write_throttling(struct file *file,
1088 const char __user * buffer,
1089 size_t count, loff_t * data)
1091 int result = 0;
1092 struct seq_file *m = file->private_data;
1093 struct acpi_processor *pr = m->private;
1094 char state_string[12] = { '\0' };
1096 if (!pr || (count > sizeof(state_string) - 1))
1097 return -EINVAL;
1099 if (copy_from_user(state_string, buffer, count))
1100 return -EFAULT;
1102 state_string[count] = '\0';
1104 result = acpi_processor_set_throttling(pr,
1105 simple_strtoul(state_string,
1106 NULL, 0));
1107 if (result)
1108 return result;
1110 return count;
1113 struct file_operations acpi_processor_throttling_fops = {
1114 .open = acpi_processor_throttling_open_fs,
1115 .read = seq_read,
1116 .write = acpi_processor_write_throttling,
1117 .llseek = seq_lseek,
1118 .release = single_release,