2 * Thermal throttle event support code (such as syslog messaging and rate
3 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
5 * This allows consistent reporting of CPU thermal throttle events.
7 * Maintains a counter in /sys that keeps track of the number of thermal
8 * events, such that the user knows how bad the thermal problem might be
9 * (since the logging to syslog and mcelog is rate limited).
11 * Author: Dmitriy Zavin (dmitriyz@google.com)
13 * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
14 * Inspired by Ross Biro's and Al Borchers' counter code.
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel.h>
20 #include <linux/percpu.h>
21 #include <linux/sysdev.h>
22 #include <linux/types.h>
23 #include <linux/init.h>
24 #include <linux/smp.h>
25 #include <linux/cpu.h>
27 #include <asm/processor.h>
28 #include <asm/system.h>
34 /* How long to wait between reporting thermal events */
35 #define CHECK_INTERVAL (300 * HZ)
38 * Current thermal throttling state:
40 struct thermal_state
{
44 unsigned long throttle_count
;
45 unsigned long last_throttle_count
;
48 static DEFINE_PER_CPU(struct thermal_state
, thermal_state
);
50 static atomic_t therm_throt_en
= ATOMIC_INIT(0);
53 #define define_therm_throt_sysdev_one_ro(_name) \
54 static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
56 #define define_therm_throt_sysdev_show_func(name) \
58 static ssize_t therm_throt_sysdev_show_##name( \
59 struct sys_device *dev, \
60 struct sysdev_attribute *attr, \
63 unsigned int cpu = dev->id; \
66 preempt_disable(); /* CPU hotplug */ \
67 if (cpu_online(cpu)) \
68 ret = sprintf(buf, "%lu\n", \
69 per_cpu(thermal_state, cpu).name); \
77 define_therm_throt_sysdev_show_func(throttle_count
);
78 define_therm_throt_sysdev_one_ro(throttle_count
);
80 static struct attribute
*thermal_throttle_attrs
[] = {
81 &attr_throttle_count
.attr
,
85 static struct attribute_group thermal_throttle_attr_group
= {
86 .attrs
= thermal_throttle_attrs
,
87 .name
= "thermal_throttle"
89 #endif /* CONFIG_SYSFS */
92 * therm_throt_process - Process thermal throttling event from interrupt
93 * @curr: Whether the condition is current or not (boolean), since the
94 * thermal interrupt normally gets called both when the thermal
95 * event begins and once the event has ended.
97 * This function is called by the thermal interrupt after the
98 * IRQ has been acknowledged.
100 * It will take care of rate limiting and printing messages to the syslog.
102 * Returns: 0 : Event should NOT be further logged, i.e. still in
103 * "timeout" from previous log message.
104 * 1 : Event should be logged further, and a message has been
105 * printed to the syslog.
107 static int therm_throt_process(bool is_throttled
)
109 struct thermal_state
*state
;
110 unsigned int this_cpu
;
114 this_cpu
= smp_processor_id();
115 now
= get_jiffies_64();
116 state
= &per_cpu(thermal_state
, this_cpu
);
118 was_throttled
= state
->is_throttled
;
119 state
->is_throttled
= is_throttled
;
122 state
->throttle_count
++;
124 if (time_before64(now
, state
->next_check
) &&
125 state
->throttle_count
!= state
->last_throttle_count
)
128 state
->next_check
= now
+ CHECK_INTERVAL
;
129 state
->last_throttle_count
= state
->throttle_count
;
131 /* if we just entered the thermal event */
133 printk(KERN_CRIT
"CPU%d: Temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu
, state
->throttle_count
);
135 add_taint(TAINT_MACHINE_CHECK
);
139 printk(KERN_INFO
"CPU%d: Temperature/speed normal\n", this_cpu
);
147 /* Add/Remove thermal_throttle interface for CPU device: */
148 static __cpuinit
int thermal_throttle_add_dev(struct sys_device
*sys_dev
)
150 return sysfs_create_group(&sys_dev
->kobj
,
151 &thermal_throttle_attr_group
);
154 static __cpuinit
void thermal_throttle_remove_dev(struct sys_device
*sys_dev
)
156 sysfs_remove_group(&sys_dev
->kobj
, &thermal_throttle_attr_group
);
159 /* Mutex protecting device creation against CPU hotplug: */
160 static DEFINE_MUTEX(therm_cpu_lock
);
162 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
164 thermal_throttle_cpu_callback(struct notifier_block
*nfb
,
165 unsigned long action
,
168 unsigned int cpu
= (unsigned long)hcpu
;
169 struct sys_device
*sys_dev
;
172 sys_dev
= get_cpu_sysdev(cpu
);
176 case CPU_UP_PREPARE_FROZEN
:
177 mutex_lock(&therm_cpu_lock
);
178 err
= thermal_throttle_add_dev(sys_dev
);
179 mutex_unlock(&therm_cpu_lock
);
182 case CPU_UP_CANCELED
:
183 case CPU_UP_CANCELED_FROZEN
:
185 case CPU_DEAD_FROZEN
:
186 mutex_lock(&therm_cpu_lock
);
187 thermal_throttle_remove_dev(sys_dev
);
188 mutex_unlock(&therm_cpu_lock
);
191 return err
? NOTIFY_BAD
: NOTIFY_OK
;
194 static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata
=
196 .notifier_call
= thermal_throttle_cpu_callback
,
199 static __init
int thermal_throttle_init_device(void)
201 unsigned int cpu
= 0;
204 if (!atomic_read(&therm_throt_en
))
207 register_hotcpu_notifier(&thermal_throttle_cpu_notifier
);
209 #ifdef CONFIG_HOTPLUG_CPU
210 mutex_lock(&therm_cpu_lock
);
212 /* connect live CPUs to sysfs */
213 for_each_online_cpu(cpu
) {
214 err
= thermal_throttle_add_dev(get_cpu_sysdev(cpu
));
217 #ifdef CONFIG_HOTPLUG_CPU
218 mutex_unlock(&therm_cpu_lock
);
223 device_initcall(thermal_throttle_init_device
);
225 #endif /* CONFIG_SYSFS */
227 /* Thermal transition interrupt handler */
228 static void intel_thermal_interrupt(void)
232 rdmsrl(MSR_IA32_THERM_STATUS
, msr_val
);
233 if (therm_throt_process((msr_val
& THERM_STATUS_PROCHOT
) != 0))
234 mce_log_therm_throt_event(msr_val
);
237 static void unexpected_thermal_interrupt(void)
239 printk(KERN_ERR
"CPU%d: Unexpected LVT TMR interrupt!\n",
241 add_taint(TAINT_MACHINE_CHECK
);
244 static void (*smp_thermal_vector
)(void) = unexpected_thermal_interrupt
;
246 asmlinkage
void smp_thermal_interrupt(struct pt_regs
*regs
)
250 inc_irq_stat(irq_thermal_count
);
251 smp_thermal_vector();
253 /* Ack only at the end to avoid potential reentry */
257 void intel_init_thermal(struct cpuinfo_x86
*c
)
259 unsigned int cpu
= smp_processor_id();
263 /* Thermal monitoring depends on ACPI and clock modulation*/
264 if (!cpu_has(c
, X86_FEATURE_ACPI
) || !cpu_has(c
, X86_FEATURE_ACC
))
268 * First check if its enabled already, in which case there might
269 * be some SMM goo which handles it, so we can't even put a handler
270 * since it might be delivered via SMI already:
272 rdmsr(MSR_IA32_MISC_ENABLE
, l
, h
);
273 h
= apic_read(APIC_LVTTHMR
);
274 if ((l
& MSR_IA32_MISC_ENABLE_TM1
) && (h
& APIC_DM_SMI
)) {
276 "CPU%d: Thermal monitoring handled by SMI\n", cpu
);
280 /* Check whether a vector already exists */
281 if (h
& APIC_VECTOR_MASK
) {
283 "CPU%d: Thermal LVT vector (%#x) already installed\n",
284 cpu
, (h
& APIC_VECTOR_MASK
));
288 /* early Pentium M models use different method for enabling TM2 */
289 if (cpu_has(c
, X86_FEATURE_TM2
)) {
290 if (c
->x86
== 6 && (c
->x86_model
== 9 || c
->x86_model
== 13)) {
291 rdmsr(MSR_THERM2_CTL
, l
, h
);
292 if (l
& MSR_THERM2_CTL_TM_SELECT
)
294 } else if (l
& MSR_IA32_MISC_ENABLE_TM2
)
298 /* We'll mask the thermal vector in the lapic till we're ready: */
299 h
= THERMAL_APIC_VECTOR
| APIC_DM_FIXED
| APIC_LVT_MASKED
;
300 apic_write(APIC_LVTTHMR
, h
);
302 rdmsr(MSR_IA32_THERM_INTERRUPT
, l
, h
);
303 wrmsr(MSR_IA32_THERM_INTERRUPT
,
304 l
| (THERM_INT_LOW_ENABLE
| THERM_INT_HIGH_ENABLE
), h
);
306 smp_thermal_vector
= intel_thermal_interrupt
;
308 rdmsr(MSR_IA32_MISC_ENABLE
, l
, h
);
309 wrmsr(MSR_IA32_MISC_ENABLE
, l
| MSR_IA32_MISC_ENABLE_TM1
, h
);
311 /* Unmask the thermal vector: */
312 l
= apic_read(APIC_LVTTHMR
);
313 apic_write(APIC_LVTTHMR
, l
& ~APIC_LVT_MASKED
);
315 printk(KERN_INFO
"CPU%d: Thermal monitoring enabled (%s)\n",
316 cpu
, tm2
? "TM2" : "TM1");
318 /* enable thermal throttle processing */
319 atomic_set(&therm_throt_en
, 1);