block: fix compiler warning in genhd.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / pci / msi.c
blob15af618d36e20a0f64c704b67cc14e2c1dc2daf2
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
9 #include <linux/err.h>
10 #include <linux/mm.h>
11 #include <linux/irq.h>
12 #include <linux/interrupt.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
17 #include <linux/msi.h>
18 #include <linux/smp.h>
20 #include <asm/errno.h>
21 #include <asm/io.h>
23 #include "pci.h"
24 #include "msi.h"
26 static int pci_msi_enable = 1;
28 /* Arch hooks */
30 int __attribute__ ((weak))
31 arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
33 return 0;
36 int __attribute__ ((weak))
37 arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
39 return 0;
42 int __attribute__ ((weak))
43 arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
45 struct msi_desc *entry;
46 int ret;
48 list_for_each_entry(entry, &dev->msi_list, list) {
49 ret = arch_setup_msi_irq(dev, entry);
50 if (ret)
51 return ret;
54 return 0;
57 void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
59 return;
62 void __attribute__ ((weak))
63 arch_teardown_msi_irqs(struct pci_dev *dev)
65 struct msi_desc *entry;
67 list_for_each_entry(entry, &dev->msi_list, list) {
68 if (entry->irq != 0)
69 arch_teardown_msi_irq(entry->irq);
73 static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
75 u16 control;
77 if (pos) {
78 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
79 control &= ~PCI_MSI_FLAGS_ENABLE;
80 if (enable)
81 control |= PCI_MSI_FLAGS_ENABLE;
82 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
86 static void msi_set_enable(struct pci_dev *dev, int enable)
88 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
91 static void msix_set_enable(struct pci_dev *dev, int enable)
93 int pos;
94 u16 control;
96 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
97 if (pos) {
98 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
99 control &= ~PCI_MSIX_FLAGS_ENABLE;
100 if (enable)
101 control |= PCI_MSIX_FLAGS_ENABLE;
102 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
106 static void msix_flush_writes(unsigned int irq)
108 struct msi_desc *entry;
110 entry = get_irq_msi(irq);
111 BUG_ON(!entry || !entry->dev);
112 switch (entry->msi_attrib.type) {
113 case PCI_CAP_ID_MSI:
114 /* nothing to do */
115 break;
116 case PCI_CAP_ID_MSIX:
118 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
119 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
120 readl(entry->mask_base + offset);
121 break;
123 default:
124 BUG();
125 break;
129 static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
131 struct msi_desc *entry;
133 entry = get_irq_msi(irq);
134 BUG_ON(!entry || !entry->dev);
135 switch (entry->msi_attrib.type) {
136 case PCI_CAP_ID_MSI:
137 if (entry->msi_attrib.maskbit) {
138 int pos;
139 u32 mask_bits;
141 pos = (long)entry->mask_base;
142 pci_read_config_dword(entry->dev, pos, &mask_bits);
143 mask_bits &= ~(mask);
144 mask_bits |= flag & mask;
145 pci_write_config_dword(entry->dev, pos, mask_bits);
146 } else {
147 __msi_set_enable(entry->dev, entry->msi_attrib.pos,
148 !flag);
150 break;
151 case PCI_CAP_ID_MSIX:
153 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
154 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
155 writel(flag, entry->mask_base + offset);
156 readl(entry->mask_base + offset);
157 break;
159 default:
160 BUG();
161 break;
163 entry->msi_attrib.masked = !!flag;
166 void read_msi_msg(unsigned int irq, struct msi_msg *msg)
168 struct msi_desc *entry = get_irq_msi(irq);
169 switch(entry->msi_attrib.type) {
170 case PCI_CAP_ID_MSI:
172 struct pci_dev *dev = entry->dev;
173 int pos = entry->msi_attrib.pos;
174 u16 data;
176 pci_read_config_dword(dev, msi_lower_address_reg(pos),
177 &msg->address_lo);
178 if (entry->msi_attrib.is_64) {
179 pci_read_config_dword(dev, msi_upper_address_reg(pos),
180 &msg->address_hi);
181 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
182 } else {
183 msg->address_hi = 0;
184 pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
186 msg->data = data;
187 break;
189 case PCI_CAP_ID_MSIX:
191 void __iomem *base;
192 base = entry->mask_base +
193 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
195 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
196 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
197 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
198 break;
200 default:
201 BUG();
205 void write_msi_msg(unsigned int irq, struct msi_msg *msg)
207 struct msi_desc *entry = get_irq_msi(irq);
208 switch (entry->msi_attrib.type) {
209 case PCI_CAP_ID_MSI:
211 struct pci_dev *dev = entry->dev;
212 int pos = entry->msi_attrib.pos;
214 pci_write_config_dword(dev, msi_lower_address_reg(pos),
215 msg->address_lo);
216 if (entry->msi_attrib.is_64) {
217 pci_write_config_dword(dev, msi_upper_address_reg(pos),
218 msg->address_hi);
219 pci_write_config_word(dev, msi_data_reg(pos, 1),
220 msg->data);
221 } else {
222 pci_write_config_word(dev, msi_data_reg(pos, 0),
223 msg->data);
225 break;
227 case PCI_CAP_ID_MSIX:
229 void __iomem *base;
230 base = entry->mask_base +
231 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
233 writel(msg->address_lo,
234 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
235 writel(msg->address_hi,
236 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
237 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
238 break;
240 default:
241 BUG();
243 entry->msg = *msg;
246 void mask_msi_irq(unsigned int irq)
248 msi_set_mask_bits(irq, 1, 1);
249 msix_flush_writes(irq);
252 void unmask_msi_irq(unsigned int irq)
254 msi_set_mask_bits(irq, 1, 0);
255 msix_flush_writes(irq);
258 static int msi_free_irqs(struct pci_dev* dev);
261 static struct msi_desc* alloc_msi_entry(void)
263 struct msi_desc *entry;
265 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
266 if (!entry)
267 return NULL;
269 INIT_LIST_HEAD(&entry->list);
270 entry->irq = 0;
271 entry->dev = NULL;
273 return entry;
276 static void pci_intx_for_msi(struct pci_dev *dev, int enable)
278 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
279 pci_intx(dev, enable);
282 static void __pci_restore_msi_state(struct pci_dev *dev)
284 int pos;
285 u16 control;
286 struct msi_desc *entry;
288 if (!dev->msi_enabled)
289 return;
291 entry = get_irq_msi(dev->irq);
292 pos = entry->msi_attrib.pos;
294 pci_intx_for_msi(dev, 0);
295 msi_set_enable(dev, 0);
296 write_msi_msg(dev->irq, &entry->msg);
297 if (entry->msi_attrib.maskbit)
298 msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask,
299 entry->msi_attrib.masked);
301 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
302 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
303 if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked)
304 control |= PCI_MSI_FLAGS_ENABLE;
305 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
308 static void __pci_restore_msix_state(struct pci_dev *dev)
310 int pos;
311 struct msi_desc *entry;
312 u16 control;
314 if (!dev->msix_enabled)
315 return;
317 /* route the table */
318 pci_intx_for_msi(dev, 0);
319 msix_set_enable(dev, 0);
321 list_for_each_entry(entry, &dev->msi_list, list) {
322 write_msi_msg(entry->irq, &entry->msg);
323 msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked);
326 BUG_ON(list_empty(&dev->msi_list));
327 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
328 pos = entry->msi_attrib.pos;
329 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
330 control &= ~PCI_MSIX_FLAGS_MASKALL;
331 control |= PCI_MSIX_FLAGS_ENABLE;
332 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
335 void pci_restore_msi_state(struct pci_dev *dev)
337 __pci_restore_msi_state(dev);
338 __pci_restore_msix_state(dev);
340 EXPORT_SYMBOL_GPL(pci_restore_msi_state);
343 * msi_capability_init - configure device's MSI capability structure
344 * @dev: pointer to the pci_dev data structure of MSI device function
346 * Setup the MSI capability structure of device function with a single
347 * MSI irq, regardless of device function is capable of handling
348 * multiple messages. A return of zero indicates the successful setup
349 * of an entry zero with the new MSI irq or non-zero for otherwise.
351 static int msi_capability_init(struct pci_dev *dev)
353 struct msi_desc *entry;
354 int pos, ret;
355 u16 control;
357 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
359 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
360 pci_read_config_word(dev, msi_control_reg(pos), &control);
361 /* MSI Entry Initialization */
362 entry = alloc_msi_entry();
363 if (!entry)
364 return -ENOMEM;
366 entry->msi_attrib.type = PCI_CAP_ID_MSI;
367 entry->msi_attrib.is_64 = is_64bit_address(control);
368 entry->msi_attrib.entry_nr = 0;
369 entry->msi_attrib.maskbit = is_mask_bit_support(control);
370 entry->msi_attrib.masked = 1;
371 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
372 entry->msi_attrib.pos = pos;
373 if (is_mask_bit_support(control)) {
374 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
375 is_64bit_address(control));
377 entry->dev = dev;
378 if (entry->msi_attrib.maskbit) {
379 unsigned int maskbits, temp;
380 /* All MSIs are unmasked by default, Mask them all */
381 pci_read_config_dword(dev,
382 msi_mask_bits_reg(pos, is_64bit_address(control)),
383 &maskbits);
384 temp = (1 << multi_msi_capable(control));
385 temp = ((temp - 1) & ~temp);
386 maskbits |= temp;
387 pci_write_config_dword(dev,
388 msi_mask_bits_reg(pos, is_64bit_address(control)),
389 maskbits);
390 entry->msi_attrib.maskbits_mask = temp;
392 list_add_tail(&entry->list, &dev->msi_list);
394 /* Configure MSI capability structure */
395 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
396 if (ret) {
397 msi_free_irqs(dev);
398 return ret;
401 /* Set MSI enabled bits */
402 pci_intx_for_msi(dev, 0);
403 msi_set_enable(dev, 1);
404 dev->msi_enabled = 1;
406 dev->irq = entry->irq;
407 return 0;
411 * msix_capability_init - configure device's MSI-X capability
412 * @dev: pointer to the pci_dev data structure of MSI-X device function
413 * @entries: pointer to an array of struct msix_entry entries
414 * @nvec: number of @entries
416 * Setup the MSI-X capability structure of device function with a
417 * single MSI-X irq. A return of zero indicates the successful setup of
418 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
420 static int msix_capability_init(struct pci_dev *dev,
421 struct msix_entry *entries, int nvec)
423 struct msi_desc *entry;
424 int pos, i, j, nr_entries, ret;
425 unsigned long phys_addr;
426 u32 table_offset;
427 u16 control;
428 u8 bir;
429 void __iomem *base;
431 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
433 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
434 /* Request & Map MSI-X table region */
435 pci_read_config_word(dev, msi_control_reg(pos), &control);
436 nr_entries = multi_msix_capable(control);
438 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
439 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
440 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
441 phys_addr = pci_resource_start (dev, bir) + table_offset;
442 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
443 if (base == NULL)
444 return -ENOMEM;
446 /* MSI-X Table Initialization */
447 for (i = 0; i < nvec; i++) {
448 entry = alloc_msi_entry();
449 if (!entry)
450 break;
452 j = entries[i].entry;
453 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
454 entry->msi_attrib.is_64 = 1;
455 entry->msi_attrib.entry_nr = j;
456 entry->msi_attrib.maskbit = 1;
457 entry->msi_attrib.masked = 1;
458 entry->msi_attrib.default_irq = dev->irq;
459 entry->msi_attrib.pos = pos;
460 entry->dev = dev;
461 entry->mask_base = base;
463 list_add_tail(&entry->list, &dev->msi_list);
466 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
467 if (ret) {
468 int avail = 0;
469 list_for_each_entry(entry, &dev->msi_list, list) {
470 if (entry->irq != 0) {
471 avail++;
475 msi_free_irqs(dev);
477 /* If we had some success report the number of irqs
478 * we succeeded in setting up.
480 if (avail == 0)
481 avail = ret;
482 return avail;
485 i = 0;
486 list_for_each_entry(entry, &dev->msi_list, list) {
487 entries[i].vector = entry->irq;
488 set_irq_msi(entry->irq, entry);
489 i++;
491 /* Set MSI-X enabled bits */
492 pci_intx_for_msi(dev, 0);
493 msix_set_enable(dev, 1);
494 dev->msix_enabled = 1;
496 return 0;
500 * pci_msi_check_device - check whether MSI may be enabled on a device
501 * @dev: pointer to the pci_dev data structure of MSI device function
502 * @nvec: how many MSIs have been requested ?
503 * @type: are we checking for MSI or MSI-X ?
505 * Look at global flags, the device itself, and its parent busses
506 * to determine if MSI/-X are supported for the device. If MSI/-X is
507 * supported return 0, else return an error code.
509 static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
511 struct pci_bus *bus;
512 int ret;
514 /* MSI must be globally enabled and supported by the device */
515 if (!pci_msi_enable || !dev || dev->no_msi)
516 return -EINVAL;
519 * You can't ask to have 0 or less MSIs configured.
520 * a) it's stupid ..
521 * b) the list manipulation code assumes nvec >= 1.
523 if (nvec < 1)
524 return -ERANGE;
526 /* Any bridge which does NOT route MSI transactions from it's
527 * secondary bus to it's primary bus must set NO_MSI flag on
528 * the secondary pci_bus.
529 * We expect only arch-specific PCI host bus controller driver
530 * or quirks for specific PCI bridges to be setting NO_MSI.
532 for (bus = dev->bus; bus; bus = bus->parent)
533 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
534 return -EINVAL;
536 ret = arch_msi_check_device(dev, nvec, type);
537 if (ret)
538 return ret;
540 if (!pci_find_capability(dev, type))
541 return -EINVAL;
543 return 0;
547 * pci_enable_msi - configure device's MSI capability structure
548 * @dev: pointer to the pci_dev data structure of MSI device function
550 * Setup the MSI capability structure of device function with
551 * a single MSI irq upon its software driver call to request for
552 * MSI mode enabled on its hardware device function. A return of zero
553 * indicates the successful setup of an entry zero with the new MSI
554 * irq or non-zero for otherwise.
556 int pci_enable_msi(struct pci_dev* dev)
558 int status;
560 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
561 if (status)
562 return status;
564 WARN_ON(!!dev->msi_enabled);
566 /* Check whether driver already requested for MSI-X irqs */
567 if (dev->msix_enabled) {
568 dev_info(&dev->dev, "can't enable MSI "
569 "(MSI-X already enabled)\n");
570 return -EINVAL;
572 status = msi_capability_init(dev);
573 return status;
575 EXPORT_SYMBOL(pci_enable_msi);
577 void pci_msi_shutdown(struct pci_dev* dev)
579 struct msi_desc *entry;
581 if (!pci_msi_enable || !dev || !dev->msi_enabled)
582 return;
584 msi_set_enable(dev, 0);
585 pci_intx_for_msi(dev, 1);
586 dev->msi_enabled = 0;
588 BUG_ON(list_empty(&dev->msi_list));
589 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
590 /* Return the the pci reset with msi irqs unmasked */
591 if (entry->msi_attrib.maskbit) {
592 u32 mask = entry->msi_attrib.maskbits_mask;
593 msi_set_mask_bits(dev->irq, mask, ~mask);
595 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
596 return;
598 /* Restore dev->irq to its default pin-assertion irq */
599 dev->irq = entry->msi_attrib.default_irq;
601 void pci_disable_msi(struct pci_dev* dev)
603 struct msi_desc *entry;
605 if (!pci_msi_enable || !dev || !dev->msi_enabled)
606 return;
608 pci_msi_shutdown(dev);
610 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
611 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
612 return;
614 msi_free_irqs(dev);
616 EXPORT_SYMBOL(pci_disable_msi);
618 static int msi_free_irqs(struct pci_dev* dev)
620 struct msi_desc *entry, *tmp;
622 list_for_each_entry(entry, &dev->msi_list, list) {
623 if (entry->irq)
624 BUG_ON(irq_has_action(entry->irq));
627 arch_teardown_msi_irqs(dev);
629 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
630 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
631 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
632 * PCI_MSIX_ENTRY_SIZE
633 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
635 if (list_is_last(&entry->list, &dev->msi_list))
636 iounmap(entry->mask_base);
638 list_del(&entry->list);
639 kfree(entry);
642 return 0;
646 * pci_enable_msix - configure device's MSI-X capability structure
647 * @dev: pointer to the pci_dev data structure of MSI-X device function
648 * @entries: pointer to an array of MSI-X entries
649 * @nvec: number of MSI-X irqs requested for allocation by device driver
651 * Setup the MSI-X capability structure of device function with the number
652 * of requested irqs upon its software driver call to request for
653 * MSI-X mode enabled on its hardware device function. A return of zero
654 * indicates the successful configuration of MSI-X capability structure
655 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
656 * Or a return of > 0 indicates that driver request is exceeding the number
657 * of irqs available. Driver should use the returned value to re-send
658 * its request.
660 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
662 int status, pos, nr_entries;
663 int i, j;
664 u16 control;
666 if (!entries)
667 return -EINVAL;
669 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
670 if (status)
671 return status;
673 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
674 pci_read_config_word(dev, msi_control_reg(pos), &control);
675 nr_entries = multi_msix_capable(control);
676 if (nvec > nr_entries)
677 return -EINVAL;
679 /* Check for any invalid entries */
680 for (i = 0; i < nvec; i++) {
681 if (entries[i].entry >= nr_entries)
682 return -EINVAL; /* invalid entry */
683 for (j = i + 1; j < nvec; j++) {
684 if (entries[i].entry == entries[j].entry)
685 return -EINVAL; /* duplicate entry */
688 WARN_ON(!!dev->msix_enabled);
690 /* Check whether driver already requested for MSI irq */
691 if (dev->msi_enabled) {
692 dev_info(&dev->dev, "can't enable MSI-X "
693 "(MSI IRQ already assigned)\n");
694 return -EINVAL;
696 status = msix_capability_init(dev, entries, nvec);
697 return status;
699 EXPORT_SYMBOL(pci_enable_msix);
701 static void msix_free_all_irqs(struct pci_dev *dev)
703 msi_free_irqs(dev);
706 void pci_msix_shutdown(struct pci_dev* dev)
708 if (!pci_msi_enable || !dev || !dev->msix_enabled)
709 return;
711 msix_set_enable(dev, 0);
712 pci_intx_for_msi(dev, 1);
713 dev->msix_enabled = 0;
715 void pci_disable_msix(struct pci_dev* dev)
717 if (!pci_msi_enable || !dev || !dev->msix_enabled)
718 return;
720 pci_msix_shutdown(dev);
722 msix_free_all_irqs(dev);
724 EXPORT_SYMBOL(pci_disable_msix);
727 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
728 * @dev: pointer to the pci_dev data structure of MSI(X) device function
730 * Being called during hotplug remove, from which the device function
731 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
732 * allocated for this device function, are reclaimed to unused state,
733 * which may be used later on.
735 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
737 if (!pci_msi_enable || !dev)
738 return;
740 if (dev->msi_enabled)
741 msi_free_irqs(dev);
743 if (dev->msix_enabled)
744 msix_free_all_irqs(dev);
747 void pci_no_msi(void)
749 pci_msi_enable = 0;
752 void pci_msi_init_pci_dev(struct pci_dev *dev)
754 INIT_LIST_HEAD(&dev->msi_list);