xhci: Resume bus on any port status change.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / hwmon / coretemp.c
blob194ca0aa8b0c5b6e14b1777a63fedcdf205dc0f1
1 /*
2 * coretemp.c - Linux kernel module for hardware monitoring
4 * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
6 * Inspired from many hwmon drivers
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301 USA.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/jiffies.h>
29 #include <linux/hwmon.h>
30 #include <linux/sysfs.h>
31 #include <linux/hwmon-sysfs.h>
32 #include <linux/err.h>
33 #include <linux/mutex.h>
34 #include <linux/list.h>
35 #include <linux/platform_device.h>
36 #include <linux/cpu.h>
37 #include <linux/pci.h>
38 #include <asm/msr.h>
39 #include <asm/processor.h>
40 #include <asm/smp.h>
42 #define DRVNAME "coretemp"
44 typedef enum { SHOW_TEMP, SHOW_TJMAX, SHOW_TTARGET, SHOW_LABEL,
45 SHOW_NAME } SHOW;
48 * Functions declaration
51 static struct coretemp_data *coretemp_update_device(struct device *dev);
53 struct coretemp_data {
54 struct device *hwmon_dev;
55 struct mutex update_lock;
56 const char *name;
57 u32 id;
58 u16 core_id;
59 char valid; /* zero until following fields are valid */
60 unsigned long last_updated; /* in jiffies */
61 int temp;
62 int tjmax;
63 int ttarget;
64 u8 alarm;
68 * Sysfs stuff
71 static ssize_t show_name(struct device *dev, struct device_attribute
72 *devattr, char *buf)
74 int ret;
75 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
76 struct coretemp_data *data = dev_get_drvdata(dev);
78 if (attr->index == SHOW_NAME)
79 ret = sprintf(buf, "%s\n", data->name);
80 else /* show label */
81 ret = sprintf(buf, "Core %d\n", data->core_id);
82 return ret;
85 static ssize_t show_alarm(struct device *dev, struct device_attribute
86 *devattr, char *buf)
88 struct coretemp_data *data = coretemp_update_device(dev);
89 /* read the Out-of-spec log, never clear */
90 return sprintf(buf, "%d\n", data->alarm);
93 static ssize_t show_temp(struct device *dev,
94 struct device_attribute *devattr, char *buf)
96 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
97 struct coretemp_data *data = coretemp_update_device(dev);
98 int err;
100 if (attr->index == SHOW_TEMP)
101 err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN;
102 else if (attr->index == SHOW_TJMAX)
103 err = sprintf(buf, "%d\n", data->tjmax);
104 else
105 err = sprintf(buf, "%d\n", data->ttarget);
106 return err;
109 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
110 SHOW_TEMP);
111 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL,
112 SHOW_TJMAX);
113 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL,
114 SHOW_TTARGET);
115 static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL);
116 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
117 static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
119 static struct attribute *coretemp_attributes[] = {
120 &sensor_dev_attr_name.dev_attr.attr,
121 &sensor_dev_attr_temp1_label.dev_attr.attr,
122 &dev_attr_temp1_crit_alarm.attr,
123 &sensor_dev_attr_temp1_input.dev_attr.attr,
124 &sensor_dev_attr_temp1_crit.dev_attr.attr,
125 NULL
128 static const struct attribute_group coretemp_group = {
129 .attrs = coretemp_attributes,
132 static struct coretemp_data *coretemp_update_device(struct device *dev)
134 struct coretemp_data *data = dev_get_drvdata(dev);
136 mutex_lock(&data->update_lock);
138 if (!data->valid || time_after(jiffies, data->last_updated + HZ)) {
139 u32 eax, edx;
141 data->valid = 0;
142 rdmsr_on_cpu(data->id, MSR_IA32_THERM_STATUS, &eax, &edx);
143 data->alarm = (eax >> 5) & 1;
144 /* update only if data has been valid */
145 if (eax & 0x80000000) {
146 data->temp = data->tjmax - (((eax >> 16)
147 & 0x7f) * 1000);
148 data->valid = 1;
149 } else {
150 dev_dbg(dev, "Temperature data invalid (0x%x)\n", eax);
152 data->last_updated = jiffies;
155 mutex_unlock(&data->update_lock);
156 return data;
159 static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
161 /* The 100C is default for both mobile and non mobile CPUs */
163 int tjmax = 100000;
164 int tjmax_ee = 85000;
165 int usemsr_ee = 1;
166 int err;
167 u32 eax, edx;
168 struct pci_dev *host_bridge;
170 /* Early chips have no MSR for TjMax */
172 if ((c->x86_model == 0xf) && (c->x86_mask < 4)) {
173 usemsr_ee = 0;
176 /* Atom CPUs */
178 if (c->x86_model == 0x1c) {
179 usemsr_ee = 0;
181 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
183 if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
184 && (host_bridge->device == 0xa000 /* NM10 based nettop */
185 || host_bridge->device == 0xa010)) /* NM10 based netbook */
186 tjmax = 100000;
187 else
188 tjmax = 90000;
190 pci_dev_put(host_bridge);
193 if ((c->x86_model > 0xe) && (usemsr_ee)) {
194 u8 platform_id;
196 /* Now we can detect the mobile CPU using Intel provided table
197 http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
198 For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
201 err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
202 if (err) {
203 dev_warn(dev,
204 "Unable to access MSR 0x17, assuming desktop"
205 " CPU\n");
206 usemsr_ee = 0;
207 } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
208 /* Trust bit 28 up to Penryn, I could not find any
209 documentation on that; if you happen to know
210 someone at Intel please ask */
211 usemsr_ee = 0;
212 } else {
213 /* Platform ID bits 52:50 (EDX starts at bit 32) */
214 platform_id = (edx >> 18) & 0x7;
216 /* Mobile Penryn CPU seems to be platform ID 7 or 5
217 (guesswork) */
218 if ((c->x86_model == 0x17) &&
219 ((platform_id == 5) || (platform_id == 7))) {
220 /* If MSR EE bit is set, set it to 90 degrees C,
221 otherwise 105 degrees C */
222 tjmax_ee = 90000;
223 tjmax = 105000;
228 if (usemsr_ee) {
230 err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
231 if (err) {
232 dev_warn(dev,
233 "Unable to access MSR 0xEE, for Tjmax, left"
234 " at default\n");
235 } else if (eax & 0x40000000) {
236 tjmax = tjmax_ee;
238 /* if we dont use msr EE it means we are desktop CPU (with exeception
239 of Atom) */
240 } else if (tjmax == 100000) {
241 dev_warn(dev, "Using relative temperature scale!\n");
244 return tjmax;
247 static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
248 struct device *dev)
250 /* The 100C is default for both mobile and non mobile CPUs */
251 int err;
252 u32 eax, edx;
253 u32 val;
255 /* A new feature of current Intel(R) processors, the
256 IA32_TEMPERATURE_TARGET contains the TjMax value */
257 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
258 if (err) {
259 dev_warn(dev, "Unable to read TjMax from CPU.\n");
260 } else {
261 val = (eax >> 16) & 0xff;
263 * If the TjMax is not plausible, an assumption
264 * will be used
266 if ((val > 80) && (val < 120)) {
267 dev_info(dev, "TjMax is %d C.\n", val);
268 return val * 1000;
273 * An assumption is made for early CPUs and unreadable MSR.
274 * NOTE: the given value may not be correct.
277 switch (c->x86_model) {
278 case 0xe:
279 case 0xf:
280 case 0x16:
281 case 0x1a:
282 dev_warn(dev, "TjMax is assumed as 100 C!\n");
283 return 100000;
284 case 0x17:
285 case 0x1c: /* Atom CPUs */
286 return adjust_tjmax(c, id, dev);
287 default:
288 dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
289 " using default TjMax of 100C.\n", c->x86_model);
290 return 100000;
294 static void __devinit get_ucode_rev_on_cpu(void *edx)
296 u32 eax;
298 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
299 sync_core();
300 rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
303 static int __devinit coretemp_probe(struct platform_device *pdev)
305 struct coretemp_data *data;
306 struct cpuinfo_x86 *c = &cpu_data(pdev->id);
307 int err;
308 u32 eax, edx;
310 if (!(data = kzalloc(sizeof(struct coretemp_data), GFP_KERNEL))) {
311 err = -ENOMEM;
312 dev_err(&pdev->dev, "Out of memory\n");
313 goto exit;
316 data->id = pdev->id;
317 #ifdef CONFIG_SMP
318 data->core_id = c->cpu_core_id;
319 #endif
320 data->name = "coretemp";
321 mutex_init(&data->update_lock);
323 /* test if we can access the THERM_STATUS MSR */
324 err = rdmsr_safe_on_cpu(data->id, MSR_IA32_THERM_STATUS, &eax, &edx);
325 if (err) {
326 dev_err(&pdev->dev,
327 "Unable to access THERM_STATUS MSR, giving up\n");
328 goto exit_free;
331 /* Check if we have problem with errata AE18 of Core processors:
332 Readings might stop update when processor visited too deep sleep,
333 fixed for stepping D0 (6EC).
336 if ((c->x86_model == 0xe) && (c->x86_mask < 0xc)) {
337 /* check for microcode update */
338 err = smp_call_function_single(data->id, get_ucode_rev_on_cpu,
339 &edx, 1);
340 if (err) {
341 dev_err(&pdev->dev,
342 "Cannot determine microcode revision of "
343 "CPU#%u (%d)!\n", data->id, err);
344 err = -ENODEV;
345 goto exit_free;
346 } else if (edx < 0x39) {
347 err = -ENODEV;
348 dev_err(&pdev->dev,
349 "Errata AE18 not fixed, update BIOS or "
350 "microcode of the CPU!\n");
351 goto exit_free;
355 data->tjmax = get_tjmax(c, data->id, &pdev->dev);
356 platform_set_drvdata(pdev, data);
359 * read the still undocumented IA32_TEMPERATURE_TARGET. It exists
360 * on older CPUs but not in this register,
361 * Atoms don't have it either.
364 if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) {
365 err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET,
366 &eax, &edx);
367 if (err) {
368 dev_warn(&pdev->dev, "Unable to read"
369 " IA32_TEMPERATURE_TARGET MSR\n");
370 } else {
371 data->ttarget = data->tjmax -
372 (((eax >> 8) & 0xff) * 1000);
373 err = device_create_file(&pdev->dev,
374 &sensor_dev_attr_temp1_max.dev_attr);
375 if (err)
376 goto exit_free;
380 if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group)))
381 goto exit_dev;
383 data->hwmon_dev = hwmon_device_register(&pdev->dev);
384 if (IS_ERR(data->hwmon_dev)) {
385 err = PTR_ERR(data->hwmon_dev);
386 dev_err(&pdev->dev, "Class registration failed (%d)\n",
387 err);
388 goto exit_class;
391 return 0;
393 exit_class:
394 sysfs_remove_group(&pdev->dev.kobj, &coretemp_group);
395 exit_dev:
396 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
397 exit_free:
398 kfree(data);
399 exit:
400 return err;
403 static int __devexit coretemp_remove(struct platform_device *pdev)
405 struct coretemp_data *data = platform_get_drvdata(pdev);
407 hwmon_device_unregister(data->hwmon_dev);
408 sysfs_remove_group(&pdev->dev.kobj, &coretemp_group);
409 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
410 platform_set_drvdata(pdev, NULL);
411 kfree(data);
412 return 0;
415 static struct platform_driver coretemp_driver = {
416 .driver = {
417 .owner = THIS_MODULE,
418 .name = DRVNAME,
420 .probe = coretemp_probe,
421 .remove = __devexit_p(coretemp_remove),
424 struct pdev_entry {
425 struct list_head list;
426 struct platform_device *pdev;
427 unsigned int cpu;
428 #ifdef CONFIG_SMP
429 u16 phys_proc_id;
430 u16 cpu_core_id;
431 #endif
434 static LIST_HEAD(pdev_list);
435 static DEFINE_MUTEX(pdev_list_mutex);
437 static int __cpuinit coretemp_device_add(unsigned int cpu)
439 int err;
440 struct platform_device *pdev;
441 struct pdev_entry *pdev_entry;
442 struct cpuinfo_x86 *c = &cpu_data(cpu);
445 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
446 * sensors. We check this bit only, all the early CPUs
447 * without thermal sensors will be filtered out.
449 if (!cpu_has(c, X86_FEATURE_DTS)) {
450 pr_info("CPU (model=0x%x) has no thermal sensor\n",
451 c->x86_model);
452 return 0;
455 mutex_lock(&pdev_list_mutex);
457 #ifdef CONFIG_SMP
458 /* Skip second HT entry of each core */
459 list_for_each_entry(pdev_entry, &pdev_list, list) {
460 if (c->phys_proc_id == pdev_entry->phys_proc_id &&
461 c->cpu_core_id == pdev_entry->cpu_core_id) {
462 err = 0; /* Not an error */
463 goto exit;
466 #endif
468 pdev = platform_device_alloc(DRVNAME, cpu);
469 if (!pdev) {
470 err = -ENOMEM;
471 pr_err("Device allocation failed\n");
472 goto exit;
475 pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
476 if (!pdev_entry) {
477 err = -ENOMEM;
478 goto exit_device_put;
481 err = platform_device_add(pdev);
482 if (err) {
483 pr_err("Device addition failed (%d)\n", err);
484 goto exit_device_free;
487 pdev_entry->pdev = pdev;
488 pdev_entry->cpu = cpu;
489 #ifdef CONFIG_SMP
490 pdev_entry->phys_proc_id = c->phys_proc_id;
491 pdev_entry->cpu_core_id = c->cpu_core_id;
492 #endif
493 list_add_tail(&pdev_entry->list, &pdev_list);
494 mutex_unlock(&pdev_list_mutex);
496 return 0;
498 exit_device_free:
499 kfree(pdev_entry);
500 exit_device_put:
501 platform_device_put(pdev);
502 exit:
503 mutex_unlock(&pdev_list_mutex);
504 return err;
507 static void __cpuinit coretemp_device_remove(unsigned int cpu)
509 struct pdev_entry *p;
510 unsigned int i;
512 mutex_lock(&pdev_list_mutex);
513 list_for_each_entry(p, &pdev_list, list) {
514 if (p->cpu != cpu)
515 continue;
517 platform_device_unregister(p->pdev);
518 list_del(&p->list);
519 mutex_unlock(&pdev_list_mutex);
520 kfree(p);
521 for_each_cpu(i, cpu_sibling_mask(cpu))
522 if (i != cpu && !coretemp_device_add(i))
523 break;
524 return;
526 mutex_unlock(&pdev_list_mutex);
529 static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
530 unsigned long action, void *hcpu)
532 unsigned int cpu = (unsigned long) hcpu;
534 switch (action) {
535 case CPU_ONLINE:
536 case CPU_DOWN_FAILED:
537 coretemp_device_add(cpu);
538 break;
539 case CPU_DOWN_PREPARE:
540 coretemp_device_remove(cpu);
541 break;
543 return NOTIFY_OK;
546 static struct notifier_block coretemp_cpu_notifier __refdata = {
547 .notifier_call = coretemp_cpu_callback,
550 static int __init coretemp_init(void)
552 int i, err = -ENODEV;
554 /* quick check if we run Intel */
555 if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
556 goto exit;
558 err = platform_driver_register(&coretemp_driver);
559 if (err)
560 goto exit;
562 for_each_online_cpu(i)
563 coretemp_device_add(i);
565 #ifndef CONFIG_HOTPLUG_CPU
566 if (list_empty(&pdev_list)) {
567 err = -ENODEV;
568 goto exit_driver_unreg;
570 #endif
572 register_hotcpu_notifier(&coretemp_cpu_notifier);
573 return 0;
575 #ifndef CONFIG_HOTPLUG_CPU
576 exit_driver_unreg:
577 platform_driver_unregister(&coretemp_driver);
578 #endif
579 exit:
580 return err;
583 static void __exit coretemp_exit(void)
585 struct pdev_entry *p, *n;
587 unregister_hotcpu_notifier(&coretemp_cpu_notifier);
588 mutex_lock(&pdev_list_mutex);
589 list_for_each_entry_safe(p, n, &pdev_list, list) {
590 platform_device_unregister(p->pdev);
591 list_del(&p->list);
592 kfree(p);
594 mutex_unlock(&pdev_list_mutex);
595 platform_driver_unregister(&coretemp_driver);
598 MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
599 MODULE_DESCRIPTION("Intel Core temperature monitor");
600 MODULE_LICENSE("GPL");
602 module_init(coretemp_init)
603 module_exit(coretemp_exit)