sched: domain sysctl fixes: unregister the sysctl table before domains
[linux-2.6/cjktty.git] / drivers / acpi / processor_core.c
blob9f11dc296cdd7bc29f5545cd8524eb03ede594e3
1 /*
2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 * TBD:
28 * 1. Make # power states dynamic.
29 * 2. Support duty_cycle values that span bit 4.
30 * 3. Optimize by having scheduler determine business instead of
31 * having us try to calculate it here.
32 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
48 #include <asm/io.h>
49 #include <asm/system.h>
50 #include <asm/cpu.h>
51 #include <asm/delay.h>
52 #include <asm/uaccess.h>
53 #include <asm/processor.h>
54 #include <asm/smp.h>
55 #include <asm/acpi.h>
57 #include <acpi/acpi_bus.h>
58 #include <acpi/acpi_drivers.h>
59 #include <acpi/processor.h>
61 #define ACPI_PROCESSOR_COMPONENT 0x01000000
62 #define ACPI_PROCESSOR_CLASS "processor"
63 #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
64 #define ACPI_PROCESSOR_FILE_INFO "info"
65 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
66 #define ACPI_PROCESSOR_FILE_LIMIT "limit"
67 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
69 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
71 #define ACPI_PROCESSOR_LIMIT_USER 0
72 #define ACPI_PROCESSOR_LIMIT_THERMAL 1
74 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
75 ACPI_MODULE_NAME("processor_core");
77 MODULE_AUTHOR("Paul Diefenbaugh");
78 MODULE_DESCRIPTION("ACPI Processor Driver");
79 MODULE_LICENSE("GPL");
81 static int acpi_processor_add(struct acpi_device *device);
82 static int acpi_processor_start(struct acpi_device *device);
83 static int acpi_processor_remove(struct acpi_device *device, int type);
84 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
85 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
86 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
87 static int acpi_processor_handle_eject(struct acpi_processor *pr);
88 extern int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
91 static const struct acpi_device_id processor_device_ids[] = {
92 {ACPI_PROCESSOR_HID, 0},
93 {"", 0},
95 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
97 static struct acpi_driver acpi_processor_driver = {
98 .name = "processor",
99 .class = ACPI_PROCESSOR_CLASS,
100 .ids = processor_device_ids,
101 .ops = {
102 .add = acpi_processor_add,
103 .remove = acpi_processor_remove,
104 .start = acpi_processor_start,
105 .suspend = acpi_processor_suspend,
106 .resume = acpi_processor_resume,
110 #define INSTALL_NOTIFY_HANDLER 1
111 #define UNINSTALL_NOTIFY_HANDLER 2
113 static const struct file_operations acpi_processor_info_fops = {
114 .open = acpi_processor_info_open_fs,
115 .read = seq_read,
116 .llseek = seq_lseek,
117 .release = single_release,
120 struct acpi_processor *processors[NR_CPUS];
121 struct acpi_processor_errata errata __read_mostly;
123 /* --------------------------------------------------------------------------
124 Errata Handling
125 -------------------------------------------------------------------------- */
127 static int acpi_processor_errata_piix4(struct pci_dev *dev)
129 u8 value1 = 0;
130 u8 value2 = 0;
133 if (!dev)
134 return -EINVAL;
137 * Note that 'dev' references the PIIX4 ACPI Controller.
140 switch (dev->revision) {
141 case 0:
142 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
143 break;
144 case 1:
145 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
146 break;
147 case 2:
148 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
149 break;
150 case 3:
151 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
152 break;
153 default:
154 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
155 break;
158 switch (dev->revision) {
160 case 0: /* PIIX4 A-step */
161 case 1: /* PIIX4 B-step */
163 * See specification changes #13 ("Manual Throttle Duty Cycle")
164 * and #14 ("Enabling and Disabling Manual Throttle"), plus
165 * erratum #5 ("STPCLK# Deassertion Time") from the January
166 * 2002 PIIX4 specification update. Applies to only older
167 * PIIX4 models.
169 errata.piix4.throttle = 1;
171 case 2: /* PIIX4E */
172 case 3: /* PIIX4M */
174 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
175 * Livelock") from the January 2002 PIIX4 specification update.
176 * Applies to all PIIX4 models.
180 * BM-IDE
181 * ------
182 * Find the PIIX4 IDE Controller and get the Bus Master IDE
183 * Status register address. We'll use this later to read
184 * each IDE controller's DMA status to make sure we catch all
185 * DMA activity.
187 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
188 PCI_DEVICE_ID_INTEL_82371AB,
189 PCI_ANY_ID, PCI_ANY_ID, NULL);
190 if (dev) {
191 errata.piix4.bmisx = pci_resource_start(dev, 4);
192 pci_dev_put(dev);
196 * Type-F DMA
197 * ----------
198 * Find the PIIX4 ISA Controller and read the Motherboard
199 * DMA controller's status to see if Type-F (Fast) DMA mode
200 * is enabled (bit 7) on either channel. Note that we'll
201 * disable C3 support if this is enabled, as some legacy
202 * devices won't operate well if fast DMA is disabled.
204 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
205 PCI_DEVICE_ID_INTEL_82371AB_0,
206 PCI_ANY_ID, PCI_ANY_ID, NULL);
207 if (dev) {
208 pci_read_config_byte(dev, 0x76, &value1);
209 pci_read_config_byte(dev, 0x77, &value2);
210 if ((value1 & 0x80) || (value2 & 0x80))
211 errata.piix4.fdma = 1;
212 pci_dev_put(dev);
215 break;
218 if (errata.piix4.bmisx)
219 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
220 "Bus master activity detection (BM-IDE) erratum enabled\n"));
221 if (errata.piix4.fdma)
222 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
223 "Type-F DMA livelock erratum (C3 disabled)\n"));
225 return 0;
228 static int acpi_processor_errata(struct acpi_processor *pr)
230 int result = 0;
231 struct pci_dev *dev = NULL;
234 if (!pr)
235 return -EINVAL;
238 * PIIX4
240 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
241 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
242 PCI_ANY_ID, NULL);
243 if (dev) {
244 result = acpi_processor_errata_piix4(dev);
245 pci_dev_put(dev);
248 return result;
251 /* --------------------------------------------------------------------------
252 Common ACPI processor functions
253 -------------------------------------------------------------------------- */
256 * _PDC is required for a BIOS-OS handshake for most of the newer
257 * ACPI processor features.
259 static int acpi_processor_set_pdc(struct acpi_processor *pr)
261 struct acpi_object_list *pdc_in = pr->pdc;
262 acpi_status status = AE_OK;
265 if (!pdc_in)
266 return status;
268 status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
270 if (ACPI_FAILURE(status))
271 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
272 "Could not evaluate _PDC, using legacy perf. control...\n"));
274 return status;
277 /* --------------------------------------------------------------------------
278 FS Interface (/proc)
279 -------------------------------------------------------------------------- */
281 static struct proc_dir_entry *acpi_processor_dir = NULL;
283 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
285 struct acpi_processor *pr = seq->private;
288 if (!pr)
289 goto end;
291 seq_printf(seq, "processor id: %d\n"
292 "acpi id: %d\n"
293 "bus mastering control: %s\n"
294 "power management: %s\n"
295 "throttling control: %s\n"
296 "limit interface: %s\n",
297 pr->id,
298 pr->acpi_id,
299 pr->flags.bm_control ? "yes" : "no",
300 pr->flags.power ? "yes" : "no",
301 pr->flags.throttling ? "yes" : "no",
302 pr->flags.limit ? "yes" : "no");
304 end:
305 return 0;
308 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
310 return single_open(file, acpi_processor_info_seq_show,
311 PDE(inode)->data);
314 static int acpi_processor_add_fs(struct acpi_device *device)
316 struct proc_dir_entry *entry = NULL;
319 if (!acpi_device_dir(device)) {
320 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
321 acpi_processor_dir);
322 if (!acpi_device_dir(device))
323 return -ENODEV;
325 acpi_device_dir(device)->owner = THIS_MODULE;
327 /* 'info' [R] */
328 entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO,
329 S_IRUGO, acpi_device_dir(device));
330 if (!entry)
331 return -EIO;
332 else {
333 entry->proc_fops = &acpi_processor_info_fops;
334 entry->data = acpi_driver_data(device);
335 entry->owner = THIS_MODULE;
338 /* 'throttling' [R/W] */
339 entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
340 S_IFREG | S_IRUGO | S_IWUSR,
341 acpi_device_dir(device));
342 if (!entry)
343 return -EIO;
344 else {
345 entry->proc_fops = &acpi_processor_throttling_fops;
346 entry->data = acpi_driver_data(device);
347 entry->owner = THIS_MODULE;
350 /* 'limit' [R/W] */
351 entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
352 S_IFREG | S_IRUGO | S_IWUSR,
353 acpi_device_dir(device));
354 if (!entry)
355 return -EIO;
356 else {
357 entry->proc_fops = &acpi_processor_limit_fops;
358 entry->data = acpi_driver_data(device);
359 entry->owner = THIS_MODULE;
362 return 0;
365 static int acpi_processor_remove_fs(struct acpi_device *device)
368 if (acpi_device_dir(device)) {
369 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
370 acpi_device_dir(device));
371 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
372 acpi_device_dir(device));
373 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
374 acpi_device_dir(device));
375 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
376 acpi_device_dir(device) = NULL;
379 return 0;
382 /* Use the acpiid in MADT to map cpus in case of SMP */
384 #ifndef CONFIG_SMP
385 static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
386 #else
388 static struct acpi_table_madt *madt;
390 static int map_lapic_id(struct acpi_subtable_header *entry,
391 u32 acpi_id, int *apic_id)
393 struct acpi_madt_local_apic *lapic =
394 (struct acpi_madt_local_apic *)entry;
395 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
396 lapic->processor_id == acpi_id) {
397 *apic_id = lapic->id;
398 return 1;
400 return 0;
403 static int map_lsapic_id(struct acpi_subtable_header *entry,
404 u32 acpi_id, int *apic_id)
406 struct acpi_madt_local_sapic *lsapic =
407 (struct acpi_madt_local_sapic *)entry;
408 /* Only check enabled APICs*/
409 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
410 /* First check against id */
411 if (lsapic->processor_id == acpi_id) {
412 *apic_id = (lsapic->id << 8) | lsapic->eid;
413 return 1;
414 /* Check against optional uid */
415 } else if (entry->length >= 16 &&
416 lsapic->uid == acpi_id) {
417 *apic_id = lsapic->uid;
418 return 1;
421 return 0;
424 #ifdef CONFIG_IA64
425 #define arch_cpu_to_apicid ia64_cpu_to_sapicid
426 #else
427 #define arch_cpu_to_apicid x86_cpu_to_apicid
428 #endif
430 static int map_madt_entry(u32 acpi_id)
432 unsigned long madt_end, entry;
433 int apic_id = -1;
435 if (!madt)
436 return apic_id;
438 entry = (unsigned long)madt;
439 madt_end = entry + madt->header.length;
441 /* Parse all entries looking for a match. */
443 entry += sizeof(struct acpi_table_madt);
444 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
445 struct acpi_subtable_header *header =
446 (struct acpi_subtable_header *)entry;
447 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
448 if (map_lapic_id(header, acpi_id, &apic_id))
449 break;
450 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
451 if (map_lsapic_id(header, acpi_id, &apic_id))
452 break;
454 entry += header->length;
456 return apic_id;
459 static int map_mat_entry(acpi_handle handle, u32 acpi_id)
461 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
462 union acpi_object *obj;
463 struct acpi_subtable_header *header;
464 int apic_id = -1;
466 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
467 goto exit;
469 if (!buffer.length || !buffer.pointer)
470 goto exit;
472 obj = buffer.pointer;
473 if (obj->type != ACPI_TYPE_BUFFER ||
474 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
475 goto exit;
478 header = (struct acpi_subtable_header *)obj->buffer.pointer;
479 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
480 map_lapic_id(header, acpi_id, &apic_id);
481 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
482 map_lsapic_id(header, acpi_id, &apic_id);
485 exit:
486 if (buffer.pointer)
487 kfree(buffer.pointer);
488 return apic_id;
491 static int get_cpu_id(acpi_handle handle, u32 acpi_id)
493 int i;
494 int apic_id = -1;
496 apic_id = map_mat_entry(handle, acpi_id);
497 if (apic_id == -1)
498 apic_id = map_madt_entry(acpi_id);
499 if (apic_id == -1)
500 return apic_id;
502 for (i = 0; i < NR_CPUS; ++i) {
503 if (arch_cpu_to_apicid[i] == apic_id)
504 return i;
506 return -1;
508 #endif
510 /* --------------------------------------------------------------------------
511 Driver Interface
512 -------------------------------------------------------------------------- */
514 static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
516 acpi_status status = 0;
517 union acpi_object object = { 0 };
518 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
519 int cpu_index;
520 static int cpu0_initialized;
523 if (!pr)
524 return -EINVAL;
526 if (num_online_cpus() > 1)
527 errata.smp = TRUE;
529 acpi_processor_errata(pr);
532 * Check to see if we have bus mastering arbitration control. This
533 * is required for proper C3 usage (to maintain cache coherency).
535 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
536 pr->flags.bm_control = 1;
537 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
538 "Bus mastering arbitration control present\n"));
539 } else
540 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
541 "No bus mastering arbitration control\n"));
543 /* Check if it is a Device with HID and UID */
544 if (has_uid) {
545 unsigned long value;
546 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
547 NULL, &value);
548 if (ACPI_FAILURE(status)) {
549 printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
550 return -ENODEV;
552 pr->acpi_id = value;
553 } else {
555 * Evalute the processor object. Note that it is common on SMP to
556 * have the first (boot) processor with a valid PBLK address while
557 * all others have a NULL address.
559 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
560 if (ACPI_FAILURE(status)) {
561 printk(KERN_ERR PREFIX "Evaluating processor object\n");
562 return -ENODEV;
566 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
567 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
569 pr->acpi_id = object.processor.proc_id;
571 cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
573 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
574 if (!cpu0_initialized && (cpu_index == -1) &&
575 (num_online_cpus() == 1)) {
576 cpu_index = 0;
579 cpu0_initialized = 1;
581 pr->id = cpu_index;
584 * Extra Processor objects may be enumerated on MP systems with
585 * less than the max # of CPUs. They should be ignored _iff
586 * they are physically not present.
588 if (pr->id == -1) {
589 if (ACPI_FAILURE
590 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
591 return -ENODEV;
595 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
596 pr->acpi_id));
598 if (!object.processor.pblk_address)
599 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
600 else if (object.processor.pblk_length != 6)
601 printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
602 object.processor.pblk_length);
603 else {
604 pr->throttling.address = object.processor.pblk_address;
605 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
606 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
608 pr->pblk = object.processor.pblk_address;
611 * We don't care about error returns - we just try to mark
612 * these reserved so that nobody else is confused into thinking
613 * that this region might be unused..
615 * (In particular, allocating the IO range for Cardbus)
617 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
620 #ifdef CONFIG_CPU_FREQ
621 acpi_processor_ppc_has_changed(pr);
622 #endif
623 acpi_processor_get_throttling_info(pr);
624 acpi_processor_get_limit_info(pr);
626 return 0;
629 static void *processor_device_array[NR_CPUS];
631 static int __cpuinit acpi_processor_start(struct acpi_device *device)
633 int result = 0;
634 acpi_status status = AE_OK;
635 struct acpi_processor *pr;
638 pr = acpi_driver_data(device);
640 result = acpi_processor_get_info(pr, device->flags.unique_id);
641 if (result) {
642 /* Processor is physically not present */
643 return 0;
646 BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0));
649 * Buggy BIOS check
650 * ACPI id of processors can be reported wrongly by the BIOS.
651 * Don't trust it blindly
653 if (processor_device_array[pr->id] != NULL &&
654 processor_device_array[pr->id] != device) {
655 printk(KERN_WARNING "BIOS reported wrong ACPI id"
656 "for the processor\n");
657 return -ENODEV;
659 processor_device_array[pr->id] = device;
661 processors[pr->id] = pr;
663 result = acpi_processor_add_fs(device);
664 if (result)
665 goto end;
667 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
668 acpi_processor_notify, pr);
670 /* _PDC call should be done before doing anything else (if reqd.). */
671 arch_acpi_processor_init_pdc(pr);
672 acpi_processor_set_pdc(pr);
674 acpi_processor_power_init(pr, device);
676 if (pr->flags.throttling) {
677 printk(KERN_INFO PREFIX "%s [%s] (supports",
678 acpi_device_name(device), acpi_device_bid(device));
679 printk(" %d throttling states", pr->throttling.state_count);
680 printk(")\n");
683 end:
685 return result;
688 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
690 struct acpi_processor *pr = data;
691 struct acpi_device *device = NULL;
694 if (!pr)
695 return;
697 if (acpi_bus_get_device(pr->handle, &device))
698 return;
700 switch (event) {
701 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
702 acpi_processor_ppc_has_changed(pr);
703 acpi_bus_generate_proc_event(device, event,
704 pr->performance_platform_limit);
705 acpi_bus_generate_netlink_event(device->pnp.device_class,
706 device->dev.bus_id, event,
707 pr->performance_platform_limit);
708 break;
709 case ACPI_PROCESSOR_NOTIFY_POWER:
710 acpi_processor_cst_has_changed(pr);
711 acpi_bus_generate_proc_event(device, event, 0);
712 acpi_bus_generate_netlink_event(device->pnp.device_class,
713 device->dev.bus_id, event, 0);
714 break;
715 case ACPI_PROCESSOR_NOTIFY_THROTTLING:
716 acpi_processor_tstate_has_changed(pr);
717 acpi_bus_generate_proc_event(device, event, 0);
718 acpi_bus_generate_netlink_event(device->pnp.device_class,
719 device->dev.bus_id, event, 0);
720 default:
721 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
722 "Unsupported event [0x%x]\n", event));
723 break;
726 return;
729 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
730 unsigned long action, void *hcpu)
732 unsigned int cpu = (unsigned long)hcpu;
733 struct acpi_processor *pr = processors[cpu];
735 if (action == CPU_ONLINE && pr) {
736 acpi_processor_ppc_has_changed(pr);
737 acpi_processor_cst_has_changed(pr);
738 acpi_processor_tstate_has_changed(pr);
740 return NOTIFY_OK;
743 static struct notifier_block acpi_cpu_notifier =
745 .notifier_call = acpi_cpu_soft_notify,
748 static int acpi_processor_add(struct acpi_device *device)
750 struct acpi_processor *pr = NULL;
753 if (!device)
754 return -EINVAL;
756 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
757 if (!pr)
758 return -ENOMEM;
760 pr->handle = device->handle;
761 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
762 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
763 acpi_driver_data(device) = pr;
765 return 0;
768 static int acpi_processor_remove(struct acpi_device *device, int type)
770 acpi_status status = AE_OK;
771 struct acpi_processor *pr = NULL;
774 if (!device || !acpi_driver_data(device))
775 return -EINVAL;
777 pr = acpi_driver_data(device);
779 if (pr->id >= NR_CPUS) {
780 kfree(pr);
781 return 0;
784 if (type == ACPI_BUS_REMOVAL_EJECT) {
785 if (acpi_processor_handle_eject(pr))
786 return -EINVAL;
789 acpi_processor_power_exit(pr, device);
791 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
792 acpi_processor_notify);
794 acpi_processor_remove_fs(device);
796 processors[pr->id] = NULL;
798 kfree(pr);
800 return 0;
803 #ifdef CONFIG_ACPI_HOTPLUG_CPU
804 /****************************************************************************
805 * Acpi processor hotplug support *
806 ****************************************************************************/
808 static int is_processor_present(acpi_handle handle);
810 static int is_processor_present(acpi_handle handle)
812 acpi_status status;
813 unsigned long sta = 0;
816 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
817 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) {
818 ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present"));
819 return 0;
821 return 1;
824 static
825 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
827 acpi_handle phandle;
828 struct acpi_device *pdev;
829 struct acpi_processor *pr;
832 if (acpi_get_parent(handle, &phandle)) {
833 return -ENODEV;
836 if (acpi_bus_get_device(phandle, &pdev)) {
837 return -ENODEV;
840 if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
841 return -ENODEV;
844 acpi_bus_start(*device);
846 pr = acpi_driver_data(*device);
847 if (!pr)
848 return -ENODEV;
850 if ((pr->id >= 0) && (pr->id < NR_CPUS)) {
851 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
853 return 0;
856 static void
857 acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
859 struct acpi_processor *pr;
860 struct acpi_device *device = NULL;
861 int result;
864 switch (event) {
865 case ACPI_NOTIFY_BUS_CHECK:
866 case ACPI_NOTIFY_DEVICE_CHECK:
867 printk("Processor driver received %s event\n",
868 (event == ACPI_NOTIFY_BUS_CHECK) ?
869 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
871 if (!is_processor_present(handle))
872 break;
874 if (acpi_bus_get_device(handle, &device)) {
875 result = acpi_processor_device_add(handle, &device);
876 if (result)
877 printk(KERN_ERR PREFIX
878 "Unable to add the device\n");
879 break;
882 pr = acpi_driver_data(device);
883 if (!pr) {
884 printk(KERN_ERR PREFIX "Driver data is NULL\n");
885 break;
888 if (pr->id >= 0 && (pr->id < NR_CPUS)) {
889 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
890 break;
893 result = acpi_processor_start(device);
894 if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) {
895 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
896 } else {
897 printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
898 acpi_device_bid(device));
900 break;
901 case ACPI_NOTIFY_EJECT_REQUEST:
902 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
903 "received ACPI_NOTIFY_EJECT_REQUEST\n"));
905 if (acpi_bus_get_device(handle, &device)) {
906 printk(KERN_ERR PREFIX
907 "Device don't exist, dropping EJECT\n");
908 break;
910 pr = acpi_driver_data(device);
911 if (!pr) {
912 printk(KERN_ERR PREFIX
913 "Driver data is NULL, dropping EJECT\n");
914 return;
917 if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
918 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
919 break;
920 default:
921 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
922 "Unsupported event [0x%x]\n", event));
923 break;
926 return;
929 static acpi_status
930 processor_walk_namespace_cb(acpi_handle handle,
931 u32 lvl, void *context, void **rv)
933 acpi_status status;
934 int *action = context;
935 acpi_object_type type = 0;
937 status = acpi_get_type(handle, &type);
938 if (ACPI_FAILURE(status))
939 return (AE_OK);
941 if (type != ACPI_TYPE_PROCESSOR)
942 return (AE_OK);
944 switch (*action) {
945 case INSTALL_NOTIFY_HANDLER:
946 acpi_install_notify_handler(handle,
947 ACPI_SYSTEM_NOTIFY,
948 acpi_processor_hotplug_notify,
949 NULL);
950 break;
951 case UNINSTALL_NOTIFY_HANDLER:
952 acpi_remove_notify_handler(handle,
953 ACPI_SYSTEM_NOTIFY,
954 acpi_processor_hotplug_notify);
955 break;
956 default:
957 break;
960 return (AE_OK);
963 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
966 if (!is_processor_present(handle)) {
967 return AE_ERROR;
970 if (acpi_map_lsapic(handle, p_cpu))
971 return AE_ERROR;
973 if (arch_register_cpu(*p_cpu)) {
974 acpi_unmap_lsapic(*p_cpu);
975 return AE_ERROR;
978 return AE_OK;
981 static int acpi_processor_handle_eject(struct acpi_processor *pr)
983 if (cpu_online(pr->id)) {
984 return (-EINVAL);
986 arch_unregister_cpu(pr->id);
987 acpi_unmap_lsapic(pr->id);
988 return (0);
990 #else
991 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
993 return AE_ERROR;
995 static int acpi_processor_handle_eject(struct acpi_processor *pr)
997 return (-EINVAL);
999 #endif
1001 static
1002 void acpi_processor_install_hotplug_notify(void)
1004 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1005 int action = INSTALL_NOTIFY_HANDLER;
1006 acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1007 ACPI_ROOT_OBJECT,
1008 ACPI_UINT32_MAX,
1009 processor_walk_namespace_cb, &action, NULL);
1010 #endif
1011 register_hotcpu_notifier(&acpi_cpu_notifier);
1014 static
1015 void acpi_processor_uninstall_hotplug_notify(void)
1017 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1018 int action = UNINSTALL_NOTIFY_HANDLER;
1019 acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1020 ACPI_ROOT_OBJECT,
1021 ACPI_UINT32_MAX,
1022 processor_walk_namespace_cb, &action, NULL);
1023 #endif
1024 unregister_hotcpu_notifier(&acpi_cpu_notifier);
1028 * We keep the driver loaded even when ACPI is not running.
1029 * This is needed for the powernow-k8 driver, that works even without
1030 * ACPI, but needs symbols from this driver
1033 static int __init acpi_processor_init(void)
1035 int result = 0;
1038 memset(&processors, 0, sizeof(processors));
1039 memset(&errata, 0, sizeof(errata));
1041 #ifdef CONFIG_SMP
1042 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1043 (struct acpi_table_header **)&madt)))
1044 madt = NULL;
1045 #endif
1047 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1048 if (!acpi_processor_dir)
1049 return -ENOMEM;
1050 acpi_processor_dir->owner = THIS_MODULE;
1052 result = acpi_bus_register_driver(&acpi_processor_driver);
1053 if (result < 0) {
1054 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1055 return result;
1058 acpi_processor_install_hotplug_notify();
1060 acpi_thermal_cpufreq_init();
1062 acpi_processor_ppc_init();
1064 return 0;
1067 static void __exit acpi_processor_exit(void)
1070 acpi_processor_ppc_exit();
1072 acpi_thermal_cpufreq_exit();
1074 acpi_processor_uninstall_hotplug_notify();
1076 acpi_bus_unregister_driver(&acpi_processor_driver);
1078 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1080 return;
1083 module_init(acpi_processor_init);
1084 module_exit(acpi_processor_exit);
1086 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1088 MODULE_ALIAS("processor");