ACPI: pdc init related memory leak with physical CPU hotplug
[linux-2.6/mini2440.git] / drivers / acpi / processor_core.c
blobd40d45e904a5185d140854c6cfc77162207db3e6
1 /*
2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 * TBD:
28 * 1. Make # power states dynamic.
29 * 2. Support duty_cycle values that span bit 4.
30 * 3. Optimize by having scheduler determine business instead of
31 * having us try to calculate it here.
32 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 #include <linux/cpuidle.h>
49 #include <asm/io.h>
50 #include <asm/system.h>
51 #include <asm/cpu.h>
52 #include <asm/delay.h>
53 #include <asm/uaccess.h>
54 #include <asm/processor.h>
55 #include <asm/smp.h>
56 #include <asm/acpi.h>
58 #include <acpi/acpi_bus.h>
59 #include <acpi/acpi_drivers.h>
60 #include <acpi/processor.h>
62 #define ACPI_PROCESSOR_CLASS "processor"
63 #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
64 #define ACPI_PROCESSOR_FILE_INFO "info"
65 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
66 #define ACPI_PROCESSOR_FILE_LIMIT "limit"
67 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
69 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
71 #define ACPI_PROCESSOR_LIMIT_USER 0
72 #define ACPI_PROCESSOR_LIMIT_THERMAL 1
74 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
75 ACPI_MODULE_NAME("processor_core");
77 MODULE_AUTHOR("Paul Diefenbaugh");
78 MODULE_DESCRIPTION("ACPI Processor Driver");
79 MODULE_LICENSE("GPL");
81 static int acpi_processor_add(struct acpi_device *device);
82 static int acpi_processor_start(struct acpi_device *device);
83 static int acpi_processor_remove(struct acpi_device *device, int type);
84 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
85 static void acpi_processor_notify(struct acpi_device *device, u32 event);
86 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
87 static int acpi_processor_handle_eject(struct acpi_processor *pr);
90 static const struct acpi_device_id processor_device_ids[] = {
91 {ACPI_PROCESSOR_OBJECT_HID, 0},
92 {ACPI_PROCESSOR_HID, 0},
93 {"", 0},
95 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
97 static struct acpi_driver acpi_processor_driver = {
98 .name = "processor",
99 .class = ACPI_PROCESSOR_CLASS,
100 .ids = processor_device_ids,
101 .ops = {
102 .add = acpi_processor_add,
103 .remove = acpi_processor_remove,
104 .start = acpi_processor_start,
105 .suspend = acpi_processor_suspend,
106 .resume = acpi_processor_resume,
107 .notify = acpi_processor_notify,
111 #define INSTALL_NOTIFY_HANDLER 1
112 #define UNINSTALL_NOTIFY_HANDLER 2
114 static const struct file_operations acpi_processor_info_fops = {
115 .owner = THIS_MODULE,
116 .open = acpi_processor_info_open_fs,
117 .read = seq_read,
118 .llseek = seq_lseek,
119 .release = single_release,
122 DEFINE_PER_CPU(struct acpi_processor *, processors);
123 struct acpi_processor_errata errata __read_mostly;
124 static int set_no_mwait(const struct dmi_system_id *id)
126 printk(KERN_NOTICE PREFIX "%s detected - "
127 "disabling mwait for CPU C-states\n", id->ident);
128 idle_nomwait = 1;
129 return 0;
132 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
134 set_no_mwait, "IFL91 board", {
135 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
136 DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
137 DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
138 DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
140 set_no_mwait, "Extensa 5220", {
141 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
142 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
143 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
144 DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
148 /* --------------------------------------------------------------------------
149 Errata Handling
150 -------------------------------------------------------------------------- */
152 static int acpi_processor_errata_piix4(struct pci_dev *dev)
154 u8 value1 = 0;
155 u8 value2 = 0;
158 if (!dev)
159 return -EINVAL;
162 * Note that 'dev' references the PIIX4 ACPI Controller.
165 switch (dev->revision) {
166 case 0:
167 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
168 break;
169 case 1:
170 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
171 break;
172 case 2:
173 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
174 break;
175 case 3:
176 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
177 break;
178 default:
179 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
180 break;
183 switch (dev->revision) {
185 case 0: /* PIIX4 A-step */
186 case 1: /* PIIX4 B-step */
188 * See specification changes #13 ("Manual Throttle Duty Cycle")
189 * and #14 ("Enabling and Disabling Manual Throttle"), plus
190 * erratum #5 ("STPCLK# Deassertion Time") from the January
191 * 2002 PIIX4 specification update. Applies to only older
192 * PIIX4 models.
194 errata.piix4.throttle = 1;
196 case 2: /* PIIX4E */
197 case 3: /* PIIX4M */
199 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
200 * Livelock") from the January 2002 PIIX4 specification update.
201 * Applies to all PIIX4 models.
205 * BM-IDE
206 * ------
207 * Find the PIIX4 IDE Controller and get the Bus Master IDE
208 * Status register address. We'll use this later to read
209 * each IDE controller's DMA status to make sure we catch all
210 * DMA activity.
212 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
213 PCI_DEVICE_ID_INTEL_82371AB,
214 PCI_ANY_ID, PCI_ANY_ID, NULL);
215 if (dev) {
216 errata.piix4.bmisx = pci_resource_start(dev, 4);
217 pci_dev_put(dev);
221 * Type-F DMA
222 * ----------
223 * Find the PIIX4 ISA Controller and read the Motherboard
224 * DMA controller's status to see if Type-F (Fast) DMA mode
225 * is enabled (bit 7) on either channel. Note that we'll
226 * disable C3 support if this is enabled, as some legacy
227 * devices won't operate well if fast DMA is disabled.
229 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
230 PCI_DEVICE_ID_INTEL_82371AB_0,
231 PCI_ANY_ID, PCI_ANY_ID, NULL);
232 if (dev) {
233 pci_read_config_byte(dev, 0x76, &value1);
234 pci_read_config_byte(dev, 0x77, &value2);
235 if ((value1 & 0x80) || (value2 & 0x80))
236 errata.piix4.fdma = 1;
237 pci_dev_put(dev);
240 break;
243 if (errata.piix4.bmisx)
244 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
245 "Bus master activity detection (BM-IDE) erratum enabled\n"));
246 if (errata.piix4.fdma)
247 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
248 "Type-F DMA livelock erratum (C3 disabled)\n"));
250 return 0;
253 static int acpi_processor_errata(struct acpi_processor *pr)
255 int result = 0;
256 struct pci_dev *dev = NULL;
259 if (!pr)
260 return -EINVAL;
263 * PIIX4
265 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
266 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
267 PCI_ANY_ID, NULL);
268 if (dev) {
269 result = acpi_processor_errata_piix4(dev);
270 pci_dev_put(dev);
273 return result;
276 /* --------------------------------------------------------------------------
277 Common ACPI processor functions
278 -------------------------------------------------------------------------- */
281 * _PDC is required for a BIOS-OS handshake for most of the newer
282 * ACPI processor features.
284 static int acpi_processor_set_pdc(struct acpi_processor *pr)
286 struct acpi_object_list *pdc_in = pr->pdc;
287 acpi_status status = AE_OK;
290 if (!pdc_in)
291 return status;
292 if (idle_nomwait) {
294 * If mwait is disabled for CPU C-states, the C2C3_FFH access
295 * mode will be disabled in the parameter of _PDC object.
296 * Of course C1_FFH access mode will also be disabled.
298 union acpi_object *obj;
299 u32 *buffer = NULL;
301 obj = pdc_in->pointer;
302 buffer = (u32 *)(obj->buffer.pointer);
303 buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
306 status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
308 if (ACPI_FAILURE(status))
309 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
310 "Could not evaluate _PDC, using legacy perf. control...\n"));
312 return status;
315 /* --------------------------------------------------------------------------
316 FS Interface (/proc)
317 -------------------------------------------------------------------------- */
319 static struct proc_dir_entry *acpi_processor_dir = NULL;
321 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
323 struct acpi_processor *pr = seq->private;
326 if (!pr)
327 goto end;
329 seq_printf(seq, "processor id: %d\n"
330 "acpi id: %d\n"
331 "bus mastering control: %s\n"
332 "power management: %s\n"
333 "throttling control: %s\n"
334 "limit interface: %s\n",
335 pr->id,
336 pr->acpi_id,
337 pr->flags.bm_control ? "yes" : "no",
338 pr->flags.power ? "yes" : "no",
339 pr->flags.throttling ? "yes" : "no",
340 pr->flags.limit ? "yes" : "no");
342 end:
343 return 0;
346 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
348 return single_open(file, acpi_processor_info_seq_show,
349 PDE(inode)->data);
352 static int acpi_processor_add_fs(struct acpi_device *device)
354 struct proc_dir_entry *entry = NULL;
357 if (!acpi_device_dir(device)) {
358 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
359 acpi_processor_dir);
360 if (!acpi_device_dir(device))
361 return -ENODEV;
364 /* 'info' [R] */
365 entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
366 S_IRUGO, acpi_device_dir(device),
367 &acpi_processor_info_fops,
368 acpi_driver_data(device));
369 if (!entry)
370 return -EIO;
372 /* 'throttling' [R/W] */
373 entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
374 S_IFREG | S_IRUGO | S_IWUSR,
375 acpi_device_dir(device),
376 &acpi_processor_throttling_fops,
377 acpi_driver_data(device));
378 if (!entry)
379 return -EIO;
381 /* 'limit' [R/W] */
382 entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
383 S_IFREG | S_IRUGO | S_IWUSR,
384 acpi_device_dir(device),
385 &acpi_processor_limit_fops,
386 acpi_driver_data(device));
387 if (!entry)
388 return -EIO;
389 return 0;
392 static int acpi_processor_remove_fs(struct acpi_device *device)
395 if (acpi_device_dir(device)) {
396 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
397 acpi_device_dir(device));
398 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
399 acpi_device_dir(device));
400 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
401 acpi_device_dir(device));
402 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
403 acpi_device_dir(device) = NULL;
406 return 0;
409 /* Use the acpiid in MADT to map cpus in case of SMP */
411 #ifndef CONFIG_SMP
412 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) { return -1; }
413 #else
415 static struct acpi_table_madt *madt;
417 static int map_lapic_id(struct acpi_subtable_header *entry,
418 u32 acpi_id, int *apic_id)
420 struct acpi_madt_local_apic *lapic =
421 (struct acpi_madt_local_apic *)entry;
422 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
423 lapic->processor_id == acpi_id) {
424 *apic_id = lapic->id;
425 return 1;
427 return 0;
430 static int map_x2apic_id(struct acpi_subtable_header *entry,
431 int device_declaration, u32 acpi_id, int *apic_id)
433 struct acpi_madt_local_x2apic *apic =
434 (struct acpi_madt_local_x2apic *)entry;
435 u32 tmp = apic->local_apic_id;
437 /* Only check enabled APICs*/
438 if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
439 return 0;
441 /* Device statement declaration type */
442 if (device_declaration) {
443 if (apic->uid == acpi_id)
444 goto found;
447 return 0;
448 found:
449 *apic_id = tmp;
450 return 1;
453 static int map_lsapic_id(struct acpi_subtable_header *entry,
454 int device_declaration, u32 acpi_id, int *apic_id)
456 struct acpi_madt_local_sapic *lsapic =
457 (struct acpi_madt_local_sapic *)entry;
458 u32 tmp = (lsapic->id << 8) | lsapic->eid;
460 /* Only check enabled APICs*/
461 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
462 return 0;
464 /* Device statement declaration type */
465 if (device_declaration) {
466 if (entry->length < 16)
467 printk(KERN_ERR PREFIX
468 "Invalid LSAPIC with Device type processor (SAPIC ID %#x)\n",
469 tmp);
470 else if (lsapic->uid == acpi_id)
471 goto found;
472 /* Processor statement declaration type */
473 } else if (lsapic->processor_id == acpi_id)
474 goto found;
476 return 0;
477 found:
478 *apic_id = tmp;
479 return 1;
482 static int map_madt_entry(int type, u32 acpi_id)
484 unsigned long madt_end, entry;
485 int apic_id = -1;
487 if (!madt)
488 return apic_id;
490 entry = (unsigned long)madt;
491 madt_end = entry + madt->header.length;
493 /* Parse all entries looking for a match. */
495 entry += sizeof(struct acpi_table_madt);
496 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
497 struct acpi_subtable_header *header =
498 (struct acpi_subtable_header *)entry;
499 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
500 if (map_lapic_id(header, acpi_id, &apic_id))
501 break;
502 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
503 if (map_x2apic_id(header, type, acpi_id, &apic_id))
504 break;
505 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
506 if (map_lsapic_id(header, type, acpi_id, &apic_id))
507 break;
509 entry += header->length;
511 return apic_id;
514 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
516 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
517 union acpi_object *obj;
518 struct acpi_subtable_header *header;
519 int apic_id = -1;
521 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
522 goto exit;
524 if (!buffer.length || !buffer.pointer)
525 goto exit;
527 obj = buffer.pointer;
528 if (obj->type != ACPI_TYPE_BUFFER ||
529 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
530 goto exit;
533 header = (struct acpi_subtable_header *)obj->buffer.pointer;
534 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
535 map_lapic_id(header, acpi_id, &apic_id);
536 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
537 map_lsapic_id(header, type, acpi_id, &apic_id);
540 exit:
541 if (buffer.pointer)
542 kfree(buffer.pointer);
543 return apic_id;
546 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id)
548 int i;
549 int apic_id = -1;
551 apic_id = map_mat_entry(handle, type, acpi_id);
552 if (apic_id == -1)
553 apic_id = map_madt_entry(type, acpi_id);
554 if (apic_id == -1)
555 return apic_id;
557 for_each_possible_cpu(i) {
558 if (cpu_physical_id(i) == apic_id)
559 return i;
561 return -1;
563 #endif
565 /* --------------------------------------------------------------------------
566 Driver Interface
567 -------------------------------------------------------------------------- */
569 static int acpi_processor_get_info(struct acpi_device *device)
571 acpi_status status = 0;
572 union acpi_object object = { 0 };
573 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
574 struct acpi_processor *pr;
575 int cpu_index, device_declaration = 0;
576 static int cpu0_initialized;
578 pr = acpi_driver_data(device);
579 if (!pr)
580 return -EINVAL;
582 if (num_online_cpus() > 1)
583 errata.smp = TRUE;
585 acpi_processor_errata(pr);
588 * Check to see if we have bus mastering arbitration control. This
589 * is required for proper C3 usage (to maintain cache coherency).
591 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
592 pr->flags.bm_control = 1;
593 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
594 "Bus mastering arbitration control present\n"));
595 } else
596 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
597 "No bus mastering arbitration control\n"));
599 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
601 * Declared with "Device" statement; match _UID.
602 * Note that we don't handle string _UIDs yet.
604 unsigned long long value;
605 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
606 NULL, &value);
607 if (ACPI_FAILURE(status)) {
608 printk(KERN_ERR PREFIX
609 "Evaluating processor _UID [%#x]\n", status);
610 return -ENODEV;
612 device_declaration = 1;
613 pr->acpi_id = value;
614 } else {
615 /* Declared with "Processor" statement; match ProcessorID */
616 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
617 if (ACPI_FAILURE(status)) {
618 printk(KERN_ERR PREFIX "Evaluating processor object\n");
619 return -ENODEV;
623 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
624 * >>> 'acpi_get_processor_id(acpi_id, &id)' in
625 * arch/xxx/acpi.c
627 pr->acpi_id = object.processor.proc_id;
629 cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
631 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
632 if (!cpu0_initialized && (cpu_index == -1) &&
633 (num_online_cpus() == 1)) {
634 cpu_index = 0;
637 cpu0_initialized = 1;
639 pr->id = cpu_index;
642 * Extra Processor objects may be enumerated on MP systems with
643 * less than the max # of CPUs. They should be ignored _iff
644 * they are physically not present.
646 if (pr->id == -1) {
647 if (ACPI_FAILURE
648 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
649 return -ENODEV;
653 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
654 pr->acpi_id));
656 if (!object.processor.pblk_address)
657 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
658 else if (object.processor.pblk_length != 6)
659 printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
660 object.processor.pblk_length);
661 else {
662 pr->throttling.address = object.processor.pblk_address;
663 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
664 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
666 pr->pblk = object.processor.pblk_address;
669 * We don't care about error returns - we just try to mark
670 * these reserved so that nobody else is confused into thinking
671 * that this region might be unused..
673 * (In particular, allocating the IO range for Cardbus)
675 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
679 * If ACPI describes a slot number for this CPU, we can use it
680 * ensure we get the right value in the "physical id" field
681 * of /proc/cpuinfo
683 status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
684 if (ACPI_SUCCESS(status))
685 arch_fix_phys_package_id(pr->id, object.integer.value);
687 return 0;
690 static DEFINE_PER_CPU(void *, processor_device_array);
692 static int __cpuinit acpi_processor_start(struct acpi_device *device)
694 int result = 0;
695 struct acpi_processor *pr;
696 struct sys_device *sysdev;
698 pr = acpi_driver_data(device);
700 result = acpi_processor_get_info(device);
701 if (result) {
702 /* Processor is physically not present */
703 return 0;
706 BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
709 * Buggy BIOS check
710 * ACPI id of processors can be reported wrongly by the BIOS.
711 * Don't trust it blindly
713 if (per_cpu(processor_device_array, pr->id) != NULL &&
714 per_cpu(processor_device_array, pr->id) != device) {
715 printk(KERN_WARNING "BIOS reported wrong ACPI id "
716 "for the processor\n");
717 return -ENODEV;
719 per_cpu(processor_device_array, pr->id) = device;
721 per_cpu(processors, pr->id) = pr;
723 result = acpi_processor_add_fs(device);
724 if (result)
725 goto end;
727 sysdev = get_cpu_sysdev(pr->id);
728 if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
729 return -EFAULT;
731 /* _PDC call should be done before doing anything else (if reqd.). */
732 arch_acpi_processor_init_pdc(pr);
733 acpi_processor_set_pdc(pr);
734 arch_acpi_processor_cleanup_pdc(pr);
736 #ifdef CONFIG_CPU_FREQ
737 acpi_processor_ppc_has_changed(pr);
738 #endif
739 acpi_processor_get_throttling_info(pr);
740 acpi_processor_get_limit_info(pr);
743 acpi_processor_power_init(pr, device);
745 pr->cdev = thermal_cooling_device_register("Processor", device,
746 &processor_cooling_ops);
747 if (IS_ERR(pr->cdev)) {
748 result = PTR_ERR(pr->cdev);
749 goto end;
752 dev_info(&device->dev, "registered as cooling_device%d\n",
753 pr->cdev->id);
755 result = sysfs_create_link(&device->dev.kobj,
756 &pr->cdev->device.kobj,
757 "thermal_cooling");
758 if (result)
759 printk(KERN_ERR PREFIX "Create sysfs link\n");
760 result = sysfs_create_link(&pr->cdev->device.kobj,
761 &device->dev.kobj,
762 "device");
763 if (result)
764 printk(KERN_ERR PREFIX "Create sysfs link\n");
766 if (pr->flags.throttling) {
767 printk(KERN_INFO PREFIX "%s [%s] (supports",
768 acpi_device_name(device), acpi_device_bid(device));
769 printk(" %d throttling states", pr->throttling.state_count);
770 printk(")\n");
773 end:
775 return result;
778 static void acpi_processor_notify(struct acpi_device *device, u32 event)
780 struct acpi_processor *pr = acpi_driver_data(device);
781 int saved;
783 if (!pr)
784 return;
786 switch (event) {
787 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
788 saved = pr->performance_platform_limit;
789 acpi_processor_ppc_has_changed(pr);
790 if (saved == pr->performance_platform_limit)
791 break;
792 acpi_bus_generate_proc_event(device, event,
793 pr->performance_platform_limit);
794 acpi_bus_generate_netlink_event(device->pnp.device_class,
795 dev_name(&device->dev), event,
796 pr->performance_platform_limit);
797 break;
798 case ACPI_PROCESSOR_NOTIFY_POWER:
799 acpi_processor_cst_has_changed(pr);
800 acpi_bus_generate_proc_event(device, event, 0);
801 acpi_bus_generate_netlink_event(device->pnp.device_class,
802 dev_name(&device->dev), event, 0);
803 break;
804 case ACPI_PROCESSOR_NOTIFY_THROTTLING:
805 acpi_processor_tstate_has_changed(pr);
806 acpi_bus_generate_proc_event(device, event, 0);
807 acpi_bus_generate_netlink_event(device->pnp.device_class,
808 dev_name(&device->dev), event, 0);
809 default:
810 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
811 "Unsupported event [0x%x]\n", event));
812 break;
815 return;
818 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
819 unsigned long action, void *hcpu)
821 unsigned int cpu = (unsigned long)hcpu;
822 struct acpi_processor *pr = per_cpu(processors, cpu);
824 if (action == CPU_ONLINE && pr) {
825 acpi_processor_ppc_has_changed(pr);
826 acpi_processor_cst_has_changed(pr);
827 acpi_processor_tstate_has_changed(pr);
829 return NOTIFY_OK;
832 static struct notifier_block acpi_cpu_notifier =
834 .notifier_call = acpi_cpu_soft_notify,
837 static int acpi_processor_add(struct acpi_device *device)
839 struct acpi_processor *pr = NULL;
842 if (!device)
843 return -EINVAL;
845 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
846 if (!pr)
847 return -ENOMEM;
849 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
850 kfree(pr);
851 return -ENOMEM;
854 pr->handle = device->handle;
855 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
856 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
857 device->driver_data = pr;
859 return 0;
862 static int acpi_processor_remove(struct acpi_device *device, int type)
864 struct acpi_processor *pr = NULL;
867 if (!device || !acpi_driver_data(device))
868 return -EINVAL;
870 pr = acpi_driver_data(device);
872 if (pr->id >= nr_cpu_ids)
873 goto free;
875 if (type == ACPI_BUS_REMOVAL_EJECT) {
876 if (acpi_processor_handle_eject(pr))
877 return -EINVAL;
880 acpi_processor_power_exit(pr, device);
882 sysfs_remove_link(&device->dev.kobj, "sysdev");
884 acpi_processor_remove_fs(device);
886 if (pr->cdev) {
887 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
888 sysfs_remove_link(&pr->cdev->device.kobj, "device");
889 thermal_cooling_device_unregister(pr->cdev);
890 pr->cdev = NULL;
893 per_cpu(processors, pr->id) = NULL;
894 per_cpu(processor_device_array, pr->id) = NULL;
896 free:
897 free_cpumask_var(pr->throttling.shared_cpu_map);
898 kfree(pr);
900 return 0;
903 #ifdef CONFIG_ACPI_HOTPLUG_CPU
904 /****************************************************************************
905 * Acpi processor hotplug support *
906 ****************************************************************************/
908 static int is_processor_present(acpi_handle handle)
910 acpi_status status;
911 unsigned long long sta = 0;
914 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
916 if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
917 return 1;
920 * _STA is mandatory for a processor that supports hot plug
922 if (status == AE_NOT_FOUND)
923 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
924 "Processor does not support hot plug\n"));
925 else
926 ACPI_EXCEPTION((AE_INFO, status,
927 "Processor Device is not present"));
928 return 0;
931 static
932 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
934 acpi_handle phandle;
935 struct acpi_device *pdev;
936 struct acpi_processor *pr;
939 if (acpi_get_parent(handle, &phandle)) {
940 return -ENODEV;
943 if (acpi_bus_get_device(phandle, &pdev)) {
944 return -ENODEV;
947 if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
948 return -ENODEV;
951 acpi_bus_start(*device);
953 pr = acpi_driver_data(*device);
954 if (!pr)
955 return -ENODEV;
957 if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
958 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
960 return 0;
963 static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
964 u32 event, void *data)
966 struct acpi_processor *pr;
967 struct acpi_device *device = NULL;
968 int result;
971 switch (event) {
972 case ACPI_NOTIFY_BUS_CHECK:
973 case ACPI_NOTIFY_DEVICE_CHECK:
974 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
975 "Processor driver received %s event\n",
976 (event == ACPI_NOTIFY_BUS_CHECK) ?
977 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
979 if (!is_processor_present(handle))
980 break;
982 if (acpi_bus_get_device(handle, &device)) {
983 result = acpi_processor_device_add(handle, &device);
984 if (result)
985 printk(KERN_ERR PREFIX
986 "Unable to add the device\n");
987 break;
990 pr = acpi_driver_data(device);
991 if (!pr) {
992 printk(KERN_ERR PREFIX "Driver data is NULL\n");
993 break;
996 if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
997 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
998 break;
1001 result = acpi_processor_start(device);
1002 if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
1003 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
1004 } else {
1005 printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
1006 acpi_device_bid(device));
1008 break;
1009 case ACPI_NOTIFY_EJECT_REQUEST:
1010 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1011 "received ACPI_NOTIFY_EJECT_REQUEST\n"));
1013 if (acpi_bus_get_device(handle, &device)) {
1014 printk(KERN_ERR PREFIX
1015 "Device don't exist, dropping EJECT\n");
1016 break;
1018 pr = acpi_driver_data(device);
1019 if (!pr) {
1020 printk(KERN_ERR PREFIX
1021 "Driver data is NULL, dropping EJECT\n");
1022 return;
1025 if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
1026 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
1027 break;
1028 default:
1029 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1030 "Unsupported event [0x%x]\n", event));
1031 break;
1034 return;
1037 static acpi_status
1038 processor_walk_namespace_cb(acpi_handle handle,
1039 u32 lvl, void *context, void **rv)
1041 acpi_status status;
1042 int *action = context;
1043 acpi_object_type type = 0;
1045 status = acpi_get_type(handle, &type);
1046 if (ACPI_FAILURE(status))
1047 return (AE_OK);
1049 if (type != ACPI_TYPE_PROCESSOR)
1050 return (AE_OK);
1052 switch (*action) {
1053 case INSTALL_NOTIFY_HANDLER:
1054 acpi_install_notify_handler(handle,
1055 ACPI_SYSTEM_NOTIFY,
1056 acpi_processor_hotplug_notify,
1057 NULL);
1058 break;
1059 case UNINSTALL_NOTIFY_HANDLER:
1060 acpi_remove_notify_handler(handle,
1061 ACPI_SYSTEM_NOTIFY,
1062 acpi_processor_hotplug_notify);
1063 break;
1064 default:
1065 break;
1068 return (AE_OK);
1071 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1074 if (!is_processor_present(handle)) {
1075 return AE_ERROR;
1078 if (acpi_map_lsapic(handle, p_cpu))
1079 return AE_ERROR;
1081 if (arch_register_cpu(*p_cpu)) {
1082 acpi_unmap_lsapic(*p_cpu);
1083 return AE_ERROR;
1086 return AE_OK;
1089 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1091 if (cpu_online(pr->id))
1092 cpu_down(pr->id);
1094 arch_unregister_cpu(pr->id);
1095 acpi_unmap_lsapic(pr->id);
1096 return (0);
1098 #else
1099 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1101 return AE_ERROR;
1103 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1105 return (-EINVAL);
1107 #endif
1109 static
1110 void acpi_processor_install_hotplug_notify(void)
1112 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1113 int action = INSTALL_NOTIFY_HANDLER;
1114 acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1115 ACPI_ROOT_OBJECT,
1116 ACPI_UINT32_MAX,
1117 processor_walk_namespace_cb, &action, NULL);
1118 #endif
1119 register_hotcpu_notifier(&acpi_cpu_notifier);
1122 static
1123 void acpi_processor_uninstall_hotplug_notify(void)
1125 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1126 int action = UNINSTALL_NOTIFY_HANDLER;
1127 acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1128 ACPI_ROOT_OBJECT,
1129 ACPI_UINT32_MAX,
1130 processor_walk_namespace_cb, &action, NULL);
1131 #endif
1132 unregister_hotcpu_notifier(&acpi_cpu_notifier);
1136 * We keep the driver loaded even when ACPI is not running.
1137 * This is needed for the powernow-k8 driver, that works even without
1138 * ACPI, but needs symbols from this driver
1141 static int __init acpi_processor_init(void)
1143 int result = 0;
1145 memset(&errata, 0, sizeof(errata));
1147 #ifdef CONFIG_SMP
1148 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1149 (struct acpi_table_header **)&madt)))
1150 madt = NULL;
1151 #endif
1153 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1154 if (!acpi_processor_dir)
1155 return -ENOMEM;
1158 * Check whether the system is DMI table. If yes, OSPM
1159 * should not use mwait for CPU-states.
1161 dmi_check_system(processor_idle_dmi_table);
1162 result = cpuidle_register_driver(&acpi_idle_driver);
1163 if (result < 0)
1164 goto out_proc;
1166 result = acpi_bus_register_driver(&acpi_processor_driver);
1167 if (result < 0)
1168 goto out_cpuidle;
1170 acpi_processor_install_hotplug_notify();
1172 acpi_thermal_cpufreq_init();
1174 acpi_processor_ppc_init();
1176 acpi_processor_throttling_init();
1178 return 0;
1180 out_cpuidle:
1181 cpuidle_unregister_driver(&acpi_idle_driver);
1183 out_proc:
1184 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1186 return result;
1189 static void __exit acpi_processor_exit(void)
1191 acpi_processor_ppc_exit();
1193 acpi_thermal_cpufreq_exit();
1195 acpi_processor_uninstall_hotplug_notify();
1197 acpi_bus_unregister_driver(&acpi_processor_driver);
1199 cpuidle_unregister_driver(&acpi_idle_driver);
1201 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1203 return;
1206 module_init(acpi_processor_init);
1207 module_exit(acpi_processor_exit);
1209 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1211 MODULE_ALIAS("processor");