3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
11 #include <linux/irq.h>
12 #include <linux/interrupt.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/smp_lock.h>
16 #include <linux/pci.h>
17 #include <linux/proc_fs.h>
19 #include <asm/errno.h>
26 static DEFINE_SPINLOCK(msi_lock
);
27 static struct msi_desc
* msi_desc
[NR_IRQS
] = { [0 ... NR_IRQS
-1] = NULL
};
28 static kmem_cache_t
* msi_cachep
;
30 static int pci_msi_enable
= 1;
32 static struct msi_ops
*msi_ops
;
35 msi_register(struct msi_ops
*ops
)
41 static int msi_cache_init(void)
43 msi_cachep
= kmem_cache_create("msi_cache", sizeof(struct msi_desc
),
44 0, SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
51 static void msi_set_mask_bit(unsigned int irq
, int flag
)
53 struct msi_desc
*entry
;
55 entry
= msi_desc
[irq
];
56 if (!entry
|| !entry
->dev
|| !entry
->mask_base
)
58 switch (entry
->msi_attrib
.type
) {
64 pos
= (long)entry
->mask_base
;
65 pci_read_config_dword(entry
->dev
, pos
, &mask_bits
);
68 pci_write_config_dword(entry
->dev
, pos
, mask_bits
);
73 int offset
= entry
->msi_attrib
.entry_nr
* PCI_MSIX_ENTRY_SIZE
+
74 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET
;
75 writel(flag
, entry
->mask_base
+ offset
);
83 static void read_msi_msg(struct msi_desc
*entry
, struct msi_msg
*msg
)
85 switch(entry
->msi_attrib
.type
) {
88 struct pci_dev
*dev
= entry
->dev
;
89 int pos
= entry
->msi_attrib
.pos
;
92 pci_read_config_dword(dev
, msi_lower_address_reg(pos
),
94 if (entry
->msi_attrib
.is_64
) {
95 pci_read_config_dword(dev
, msi_upper_address_reg(pos
),
97 pci_read_config_word(dev
, msi_data_reg(pos
, 1), &data
);
100 pci_read_config_word(dev
, msi_data_reg(pos
, 1), &data
);
105 case PCI_CAP_ID_MSIX
:
108 base
= entry
->mask_base
+
109 entry
->msi_attrib
.entry_nr
* PCI_MSIX_ENTRY_SIZE
;
111 msg
->address_lo
= readl(base
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
);
112 msg
->address_hi
= readl(base
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET
);
113 msg
->data
= readl(base
+ PCI_MSIX_ENTRY_DATA_OFFSET
);
121 static void write_msi_msg(struct msi_desc
*entry
, struct msi_msg
*msg
)
123 switch (entry
->msi_attrib
.type
) {
126 struct pci_dev
*dev
= entry
->dev
;
127 int pos
= entry
->msi_attrib
.pos
;
129 pci_write_config_dword(dev
, msi_lower_address_reg(pos
),
131 if (entry
->msi_attrib
.is_64
) {
132 pci_write_config_dword(dev
, msi_upper_address_reg(pos
),
134 pci_write_config_word(dev
, msi_data_reg(pos
, 1),
137 pci_write_config_word(dev
, msi_data_reg(pos
, 0),
142 case PCI_CAP_ID_MSIX
:
145 base
= entry
->mask_base
+
146 entry
->msi_attrib
.entry_nr
* PCI_MSIX_ENTRY_SIZE
;
148 writel(msg
->address_lo
,
149 base
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
);
150 writel(msg
->address_hi
,
151 base
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET
);
152 writel(msg
->data
, base
+ PCI_MSIX_ENTRY_DATA_OFFSET
);
161 static void set_msi_affinity(unsigned int irq
, cpumask_t cpu_mask
)
163 struct msi_desc
*entry
;
166 entry
= msi_desc
[irq
];
167 if (!entry
|| !entry
->dev
)
170 read_msi_msg(entry
, &msg
);
171 msi_ops
->target(irq
, cpu_mask
, &msg
);
172 write_msi_msg(entry
, &msg
);
173 set_native_irq_info(irq
, cpu_mask
);
176 #define set_msi_affinity NULL
177 #endif /* CONFIG_SMP */
179 static void mask_MSI_irq(unsigned int irq
)
181 msi_set_mask_bit(irq
, 1);
184 static void unmask_MSI_irq(unsigned int irq
)
186 msi_set_mask_bit(irq
, 0);
189 static unsigned int startup_msi_irq_wo_maskbit(unsigned int irq
)
191 struct msi_desc
*entry
;
194 spin_lock_irqsave(&msi_lock
, flags
);
195 entry
= msi_desc
[irq
];
196 if (!entry
|| !entry
->dev
) {
197 spin_unlock_irqrestore(&msi_lock
, flags
);
200 entry
->msi_attrib
.state
= 1; /* Mark it active */
201 spin_unlock_irqrestore(&msi_lock
, flags
);
203 return 0; /* never anything pending */
206 static unsigned int startup_msi_irq_w_maskbit(unsigned int irq
)
208 startup_msi_irq_wo_maskbit(irq
);
210 return 0; /* never anything pending */
213 static void shutdown_msi_irq(unsigned int irq
)
215 struct msi_desc
*entry
;
218 spin_lock_irqsave(&msi_lock
, flags
);
219 entry
= msi_desc
[irq
];
220 if (entry
&& entry
->dev
)
221 entry
->msi_attrib
.state
= 0; /* Mark it not active */
222 spin_unlock_irqrestore(&msi_lock
, flags
);
225 static void end_msi_irq_wo_maskbit(unsigned int irq
)
227 move_native_irq(irq
);
231 static void end_msi_irq_w_maskbit(unsigned int irq
)
233 move_native_irq(irq
);
238 static void do_nothing(unsigned int irq
)
243 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
244 * which implement the MSI-X Capability Structure.
246 static struct hw_interrupt_type msix_irq_type
= {
247 .typename
= "PCI-MSI-X",
248 .startup
= startup_msi_irq_w_maskbit
,
249 .shutdown
= shutdown_msi_irq
,
250 .enable
= unmask_MSI_irq
,
251 .disable
= mask_MSI_irq
,
253 .end
= end_msi_irq_w_maskbit
,
254 .set_affinity
= set_msi_affinity
258 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
259 * which implement the MSI Capability Structure with
260 * Mask-and-Pending Bits.
262 static struct hw_interrupt_type msi_irq_w_maskbit_type
= {
263 .typename
= "PCI-MSI",
264 .startup
= startup_msi_irq_w_maskbit
,
265 .shutdown
= shutdown_msi_irq
,
266 .enable
= unmask_MSI_irq
,
267 .disable
= mask_MSI_irq
,
269 .end
= end_msi_irq_w_maskbit
,
270 .set_affinity
= set_msi_affinity
274 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
275 * which implement the MSI Capability Structure without
276 * Mask-and-Pending Bits.
278 static struct hw_interrupt_type msi_irq_wo_maskbit_type
= {
279 .typename
= "PCI-MSI",
280 .startup
= startup_msi_irq_wo_maskbit
,
281 .shutdown
= shutdown_msi_irq
,
282 .enable
= do_nothing
,
283 .disable
= do_nothing
,
285 .end
= end_msi_irq_wo_maskbit
,
286 .set_affinity
= set_msi_affinity
289 static int msi_free_irq(struct pci_dev
* dev
, int irq
);
290 static int msi_init(void)
292 static int status
= -ENOMEM
;
299 printk(KERN_WARNING
"PCI: MSI quirk detected. MSI disabled.\n");
304 status
= msi_arch_init();
308 "PCI: MSI arch init failed. MSI disabled.\n");
315 "PCI: MSI ops not registered. MSI disabled.\n");
320 status
= msi_cache_init();
323 printk(KERN_WARNING
"PCI: MSI cache init failed\n");
330 static struct msi_desc
* alloc_msi_entry(void)
332 struct msi_desc
*entry
;
334 entry
= kmem_cache_zalloc(msi_cachep
, GFP_KERNEL
);
338 entry
->link
.tail
= entry
->link
.head
= 0; /* single message */
344 static void attach_msi_entry(struct msi_desc
*entry
, int irq
)
348 spin_lock_irqsave(&msi_lock
, flags
);
349 msi_desc
[irq
] = entry
;
350 spin_unlock_irqrestore(&msi_lock
, flags
);
353 static int create_msi_irq(struct hw_interrupt_type
*handler
)
355 struct msi_desc
*entry
;
358 entry
= alloc_msi_entry();
364 kmem_cache_free(msi_cachep
, entry
);
368 set_irq_chip(irq
, handler
);
369 set_irq_data(irq
, entry
);
374 static void destroy_msi_irq(unsigned int irq
)
376 struct msi_desc
*entry
;
378 entry
= get_irq_data(irq
);
379 set_irq_chip(irq
, NULL
);
380 set_irq_data(irq
, NULL
);
382 kmem_cache_free(msi_cachep
, entry
);
385 static void enable_msi_mode(struct pci_dev
*dev
, int pos
, int type
)
389 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
390 if (type
== PCI_CAP_ID_MSI
) {
391 /* Set enabled bits to single MSI & enable MSI_enable bit */
392 msi_enable(control
, 1);
393 pci_write_config_word(dev
, msi_control_reg(pos
), control
);
394 dev
->msi_enabled
= 1;
396 msix_enable(control
);
397 pci_write_config_word(dev
, msi_control_reg(pos
), control
);
398 dev
->msix_enabled
= 1;
400 if (pci_find_capability(dev
, PCI_CAP_ID_EXP
)) {
401 /* PCI Express Endpoint device detected */
402 pci_intx(dev
, 0); /* disable intx */
406 void disable_msi_mode(struct pci_dev
*dev
, int pos
, int type
)
410 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
411 if (type
== PCI_CAP_ID_MSI
) {
412 /* Set enabled bits to single MSI & enable MSI_enable bit */
413 msi_disable(control
);
414 pci_write_config_word(dev
, msi_control_reg(pos
), control
);
415 dev
->msi_enabled
= 0;
417 msix_disable(control
);
418 pci_write_config_word(dev
, msi_control_reg(pos
), control
);
419 dev
->msix_enabled
= 0;
421 if (pci_find_capability(dev
, PCI_CAP_ID_EXP
)) {
422 /* PCI Express Endpoint device detected */
423 pci_intx(dev
, 1); /* enable intx */
427 static int msi_lookup_irq(struct pci_dev
*dev
, int type
)
432 spin_lock_irqsave(&msi_lock
, flags
);
433 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
434 if (!msi_desc
[irq
] || msi_desc
[irq
]->dev
!= dev
||
435 msi_desc
[irq
]->msi_attrib
.type
!= type
||
436 msi_desc
[irq
]->msi_attrib
.default_irq
!= dev
->irq
)
438 spin_unlock_irqrestore(&msi_lock
, flags
);
439 /* This pre-assigned MSI irq for this device
440 already exits. Override dev->irq with this irq */
444 spin_unlock_irqrestore(&msi_lock
, flags
);
449 void pci_scan_msi_device(struct pci_dev
*dev
)
456 int pci_save_msi_state(struct pci_dev
*dev
)
460 struct pci_cap_saved_state
*save_state
;
463 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
464 if (pos
<= 0 || dev
->no_msi
)
467 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
468 if (!(control
& PCI_MSI_FLAGS_ENABLE
))
471 save_state
= kzalloc(sizeof(struct pci_cap_saved_state
) + sizeof(u32
) * 5,
474 printk(KERN_ERR
"Out of memory in pci_save_msi_state\n");
477 cap
= &save_state
->data
[0];
479 pci_read_config_dword(dev
, pos
, &cap
[i
++]);
480 control
= cap
[0] >> 16;
481 pci_read_config_dword(dev
, pos
+ PCI_MSI_ADDRESS_LO
, &cap
[i
++]);
482 if (control
& PCI_MSI_FLAGS_64BIT
) {
483 pci_read_config_dword(dev
, pos
+ PCI_MSI_ADDRESS_HI
, &cap
[i
++]);
484 pci_read_config_dword(dev
, pos
+ PCI_MSI_DATA_64
, &cap
[i
++]);
486 pci_read_config_dword(dev
, pos
+ PCI_MSI_DATA_32
, &cap
[i
++]);
487 if (control
& PCI_MSI_FLAGS_MASKBIT
)
488 pci_read_config_dword(dev
, pos
+ PCI_MSI_MASK_BIT
, &cap
[i
++]);
489 save_state
->cap_nr
= PCI_CAP_ID_MSI
;
490 pci_add_saved_cap(dev
, save_state
);
494 void pci_restore_msi_state(struct pci_dev
*dev
)
498 struct pci_cap_saved_state
*save_state
;
501 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_MSI
);
502 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
503 if (!save_state
|| pos
<= 0)
505 cap
= &save_state
->data
[0];
507 control
= cap
[i
++] >> 16;
508 pci_write_config_dword(dev
, pos
+ PCI_MSI_ADDRESS_LO
, cap
[i
++]);
509 if (control
& PCI_MSI_FLAGS_64BIT
) {
510 pci_write_config_dword(dev
, pos
+ PCI_MSI_ADDRESS_HI
, cap
[i
++]);
511 pci_write_config_dword(dev
, pos
+ PCI_MSI_DATA_64
, cap
[i
++]);
513 pci_write_config_dword(dev
, pos
+ PCI_MSI_DATA_32
, cap
[i
++]);
514 if (control
& PCI_MSI_FLAGS_MASKBIT
)
515 pci_write_config_dword(dev
, pos
+ PCI_MSI_MASK_BIT
, cap
[i
++]);
516 pci_write_config_word(dev
, pos
+ PCI_MSI_FLAGS
, control
);
517 enable_msi_mode(dev
, pos
, PCI_CAP_ID_MSI
);
518 pci_remove_saved_cap(save_state
);
522 int pci_save_msix_state(struct pci_dev
*dev
)
526 int irq
, head
, tail
= 0;
528 struct pci_cap_saved_state
*save_state
;
530 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
531 if (pos
<= 0 || dev
->no_msi
)
534 /* save the capability */
535 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
536 if (!(control
& PCI_MSIX_FLAGS_ENABLE
))
538 save_state
= kzalloc(sizeof(struct pci_cap_saved_state
) + sizeof(u16
),
541 printk(KERN_ERR
"Out of memory in pci_save_msix_state\n");
544 *((u16
*)&save_state
->data
[0]) = control
;
548 if (msi_lookup_irq(dev
, PCI_CAP_ID_MSIX
)) {
553 irq
= head
= dev
->irq
;
554 while (head
!= tail
) {
555 struct msi_desc
*entry
;
557 entry
= msi_desc
[irq
];
558 read_msi_msg(entry
, &entry
->msg_save
);
560 tail
= msi_desc
[irq
]->link
.tail
;
565 save_state
->cap_nr
= PCI_CAP_ID_MSIX
;
566 pci_add_saved_cap(dev
, save_state
);
570 void pci_restore_msix_state(struct pci_dev
*dev
)
574 int irq
, head
, tail
= 0;
575 struct msi_desc
*entry
;
577 struct pci_cap_saved_state
*save_state
;
579 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_MSIX
);
582 save
= *((u16
*)&save_state
->data
[0]);
583 pci_remove_saved_cap(save_state
);
586 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
590 /* route the table */
592 if (msi_lookup_irq(dev
, PCI_CAP_ID_MSIX
))
594 irq
= head
= dev
->irq
;
595 while (head
!= tail
) {
596 entry
= msi_desc
[irq
];
597 write_msi_msg(entry
, &entry
->msg_save
);
599 tail
= msi_desc
[irq
]->link
.tail
;
604 pci_write_config_word(dev
, msi_control_reg(pos
), save
);
605 enable_msi_mode(dev
, pos
, PCI_CAP_ID_MSIX
);
609 static int msi_register_init(struct pci_dev
*dev
, struct msi_desc
*entry
)
616 pos
= entry
->msi_attrib
.pos
;
617 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
619 /* Configure MSI capability structure */
620 status
= msi_ops
->setup(dev
, dev
->irq
, &msg
);
624 write_msi_msg(entry
, &msg
);
625 if (entry
->msi_attrib
.maskbit
) {
626 unsigned int maskbits
, temp
;
627 /* All MSIs are unmasked by default, Mask them all */
628 pci_read_config_dword(dev
,
629 msi_mask_bits_reg(pos
, is_64bit_address(control
)),
631 temp
= (1 << multi_msi_capable(control
));
632 temp
= ((temp
- 1) & ~temp
);
634 pci_write_config_dword(dev
,
635 msi_mask_bits_reg(pos
, is_64bit_address(control
)),
643 * msi_capability_init - configure device's MSI capability structure
644 * @dev: pointer to the pci_dev data structure of MSI device function
646 * Setup the MSI capability structure of device function with a single
647 * MSI irq, regardless of device function is capable of handling
648 * multiple messages. A return of zero indicates the successful setup
649 * of an entry zero with the new MSI irq or non-zero for otherwise.
651 static int msi_capability_init(struct pci_dev
*dev
)
654 struct msi_desc
*entry
;
657 struct hw_interrupt_type
*handler
;
659 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
660 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
661 /* MSI Entry Initialization */
662 handler
= &msi_irq_wo_maskbit_type
;
663 if (is_mask_bit_support(control
))
664 handler
= &msi_irq_w_maskbit_type
;
666 irq
= create_msi_irq(handler
);
670 entry
= get_irq_data(irq
);
671 entry
->link
.head
= irq
;
672 entry
->link
.tail
= irq
;
673 entry
->msi_attrib
.type
= PCI_CAP_ID_MSI
;
674 entry
->msi_attrib
.state
= 0; /* Mark it not active */
675 entry
->msi_attrib
.is_64
= is_64bit_address(control
);
676 entry
->msi_attrib
.entry_nr
= 0;
677 entry
->msi_attrib
.maskbit
= is_mask_bit_support(control
);
678 entry
->msi_attrib
.default_irq
= dev
->irq
; /* Save IOAPIC IRQ */
679 entry
->msi_attrib
.pos
= pos
;
682 if (is_mask_bit_support(control
)) {
683 entry
->mask_base
= (void __iomem
*)(long)msi_mask_bits_reg(pos
,
684 is_64bit_address(control
));
686 /* Configure MSI capability structure */
687 status
= msi_register_init(dev
, entry
);
689 dev
->irq
= entry
->msi_attrib
.default_irq
;
690 destroy_msi_irq(irq
);
694 attach_msi_entry(entry
, irq
);
695 /* Set MSI enabled bits */
696 enable_msi_mode(dev
, pos
, PCI_CAP_ID_MSI
);
702 * msix_capability_init - configure device's MSI-X capability
703 * @dev: pointer to the pci_dev data structure of MSI-X device function
704 * @entries: pointer to an array of struct msix_entry entries
705 * @nvec: number of @entries
707 * Setup the MSI-X capability structure of device function with a
708 * single MSI-X irq. A return of zero indicates the successful setup of
709 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
711 static int msix_capability_init(struct pci_dev
*dev
,
712 struct msix_entry
*entries
, int nvec
)
714 struct msi_desc
*head
= NULL
, *tail
= NULL
, *entry
= NULL
;
717 int irq
, pos
, i
, j
, nr_entries
, temp
= 0;
718 unsigned long phys_addr
;
724 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
725 /* Request & Map MSI-X table region */
726 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
727 nr_entries
= multi_msix_capable(control
);
729 pci_read_config_dword(dev
, msix_table_offset_reg(pos
), &table_offset
);
730 bir
= (u8
)(table_offset
& PCI_MSIX_FLAGS_BIRMASK
);
731 table_offset
&= ~PCI_MSIX_FLAGS_BIRMASK
;
732 phys_addr
= pci_resource_start (dev
, bir
) + table_offset
;
733 base
= ioremap_nocache(phys_addr
, nr_entries
* PCI_MSIX_ENTRY_SIZE
);
737 /* MSI-X Table Initialization */
738 for (i
= 0; i
< nvec
; i
++) {
739 irq
= create_msi_irq(&msix_irq_type
);
743 entry
= get_irq_data(irq
);
744 j
= entries
[i
].entry
;
745 entries
[i
].vector
= irq
;
746 entry
->msi_attrib
.type
= PCI_CAP_ID_MSIX
;
747 entry
->msi_attrib
.state
= 0; /* Mark it not active */
748 entry
->msi_attrib
.is_64
= 1;
749 entry
->msi_attrib
.entry_nr
= j
;
750 entry
->msi_attrib
.maskbit
= 1;
751 entry
->msi_attrib
.default_irq
= dev
->irq
;
752 entry
->msi_attrib
.pos
= pos
;
754 entry
->mask_base
= base
;
756 entry
->link
.head
= irq
;
757 entry
->link
.tail
= irq
;
760 entry
->link
.head
= temp
;
761 entry
->link
.tail
= tail
->link
.tail
;
762 tail
->link
.tail
= irq
;
763 head
->link
.head
= irq
;
767 /* Configure MSI-X capability structure */
768 status
= msi_ops
->setup(dev
, irq
, &msg
);
770 destroy_msi_irq(irq
);
774 write_msi_msg(entry
, &msg
);
775 attach_msi_entry(entry
, irq
);
780 for (; i
>= 0; i
--) {
781 irq
= (entries
+ i
)->vector
;
782 msi_free_irq(dev
, irq
);
783 (entries
+ i
)->vector
= 0;
785 /* If we had some success report the number of irqs
786 * we succeeded in setting up.
792 /* Set MSI-X enabled bits */
793 enable_msi_mode(dev
, pos
, PCI_CAP_ID_MSIX
);
799 * pci_msi_supported - check whether MSI may be enabled on device
800 * @dev: pointer to the pci_dev data structure of MSI device function
802 * MSI must be globally enabled and supported by the device and its root
803 * bus. But, the root bus is not easy to find since some architectures
804 * have virtual busses on top of the PCI hierarchy (for instance the
805 * hypertransport bus), while the actual bus where MSI must be supported
806 * is below. So we test the MSI flag on all parent busses and assume
807 * that no quirk will ever set the NO_MSI flag on a non-root bus.
810 int pci_msi_supported(struct pci_dev
* dev
)
814 if (!pci_msi_enable
|| !dev
|| dev
->no_msi
)
817 /* check MSI flags of all parent busses */
818 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
)
819 if (bus
->bus_flags
& PCI_BUS_FLAGS_NO_MSI
)
826 * pci_enable_msi - configure device's MSI capability structure
827 * @dev: pointer to the pci_dev data structure of MSI device function
829 * Setup the MSI capability structure of device function with
830 * a single MSI irq upon its software driver call to request for
831 * MSI mode enabled on its hardware device function. A return of zero
832 * indicates the successful setup of an entry zero with the new MSI
833 * irq or non-zero for otherwise.
835 int pci_enable_msi(struct pci_dev
* dev
)
837 int pos
, temp
, status
;
840 if (pci_msi_supported(dev
) < 0)
849 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
853 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
854 if (!is_64bit_address(control
) && msi_ops
->needs_64bit_address
)
857 WARN_ON(!msi_lookup_irq(dev
, PCI_CAP_ID_MSI
));
859 /* Check whether driver already requested for MSI-X irqs */
860 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
861 if (pos
> 0 && !msi_lookup_irq(dev
, PCI_CAP_ID_MSIX
)) {
862 printk(KERN_INFO
"PCI: %s: Can't enable MSI. "
863 "Device already has MSI-X irq assigned\n",
868 status
= msi_capability_init(dev
);
872 void pci_disable_msi(struct pci_dev
* dev
)
874 struct msi_desc
*entry
;
875 int pos
, default_irq
;
884 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
888 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
889 if (!(control
& PCI_MSI_FLAGS_ENABLE
))
892 disable_msi_mode(dev
, pos
, PCI_CAP_ID_MSI
);
894 spin_lock_irqsave(&msi_lock
, flags
);
895 entry
= msi_desc
[dev
->irq
];
896 if (!entry
|| !entry
->dev
|| entry
->msi_attrib
.type
!= PCI_CAP_ID_MSI
) {
897 spin_unlock_irqrestore(&msi_lock
, flags
);
900 if (entry
->msi_attrib
.state
) {
901 spin_unlock_irqrestore(&msi_lock
, flags
);
902 printk(KERN_WARNING
"PCI: %s: pci_disable_msi() called without "
903 "free_irq() on MSI irq %d\n",
904 pci_name(dev
), dev
->irq
);
905 BUG_ON(entry
->msi_attrib
.state
> 0);
907 default_irq
= entry
->msi_attrib
.default_irq
;
908 spin_unlock_irqrestore(&msi_lock
, flags
);
909 msi_free_irq(dev
, dev
->irq
);
911 /* Restore dev->irq to its default pin-assertion irq */
912 dev
->irq
= default_irq
;
916 static int msi_free_irq(struct pci_dev
* dev
, int irq
)
918 struct msi_desc
*entry
;
919 int head
, entry_nr
, type
;
923 msi_ops
->teardown(irq
);
925 spin_lock_irqsave(&msi_lock
, flags
);
926 entry
= msi_desc
[irq
];
927 if (!entry
|| entry
->dev
!= dev
) {
928 spin_unlock_irqrestore(&msi_lock
, flags
);
931 type
= entry
->msi_attrib
.type
;
932 entry_nr
= entry
->msi_attrib
.entry_nr
;
933 head
= entry
->link
.head
;
934 base
= entry
->mask_base
;
935 msi_desc
[entry
->link
.head
]->link
.tail
= entry
->link
.tail
;
936 msi_desc
[entry
->link
.tail
]->link
.head
= entry
->link
.head
;
938 msi_desc
[irq
] = NULL
;
939 spin_unlock_irqrestore(&msi_lock
, flags
);
941 destroy_msi_irq(irq
);
943 if (type
== PCI_CAP_ID_MSIX
) {
944 writel(1, base
+ entry_nr
* PCI_MSIX_ENTRY_SIZE
+
945 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET
);
955 * pci_enable_msix - configure device's MSI-X capability structure
956 * @dev: pointer to the pci_dev data structure of MSI-X device function
957 * @entries: pointer to an array of MSI-X entries
958 * @nvec: number of MSI-X irqs requested for allocation by device driver
960 * Setup the MSI-X capability structure of device function with the number
961 * of requested irqs upon its software driver call to request for
962 * MSI-X mode enabled on its hardware device function. A return of zero
963 * indicates the successful configuration of MSI-X capability structure
964 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
965 * Or a return of > 0 indicates that driver request is exceeding the number
966 * of irqs available. Driver should use the returned value to re-send
969 int pci_enable_msix(struct pci_dev
* dev
, struct msix_entry
*entries
, int nvec
)
971 int status
, pos
, nr_entries
;
975 if (!entries
|| pci_msi_supported(dev
) < 0)
982 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
986 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
987 nr_entries
= multi_msix_capable(control
);
988 if (nvec
> nr_entries
)
991 /* Check for any invalid entries */
992 for (i
= 0; i
< nvec
; i
++) {
993 if (entries
[i
].entry
>= nr_entries
)
994 return -EINVAL
; /* invalid entry */
995 for (j
= i
+ 1; j
< nvec
; j
++) {
996 if (entries
[i
].entry
== entries
[j
].entry
)
997 return -EINVAL
; /* duplicate entry */
1001 WARN_ON(!msi_lookup_irq(dev
, PCI_CAP_ID_MSIX
));
1003 /* Check whether driver already requested for MSI irq */
1004 if (pci_find_capability(dev
, PCI_CAP_ID_MSI
) > 0 &&
1005 !msi_lookup_irq(dev
, PCI_CAP_ID_MSI
)) {
1006 printk(KERN_INFO
"PCI: %s: Can't enable MSI-X. "
1007 "Device already has an MSI irq assigned\n",
1012 status
= msix_capability_init(dev
, entries
, nvec
);
1016 void pci_disable_msix(struct pci_dev
* dev
)
1021 if (!pci_msi_enable
)
1026 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
1030 pci_read_config_word(dev
, msi_control_reg(pos
), &control
);
1031 if (!(control
& PCI_MSIX_FLAGS_ENABLE
))
1034 disable_msi_mode(dev
, pos
, PCI_CAP_ID_MSIX
);
1037 if (!msi_lookup_irq(dev
, PCI_CAP_ID_MSIX
)) {
1038 int state
, irq
, head
, tail
= 0, warning
= 0;
1039 unsigned long flags
;
1041 irq
= head
= dev
->irq
;
1042 dev
->irq
= temp
; /* Restore pin IRQ */
1043 while (head
!= tail
) {
1044 spin_lock_irqsave(&msi_lock
, flags
);
1045 state
= msi_desc
[irq
]->msi_attrib
.state
;
1046 tail
= msi_desc
[irq
]->link
.tail
;
1047 spin_unlock_irqrestore(&msi_lock
, flags
);
1050 else if (irq
!= head
) /* Release MSI-X irq */
1051 msi_free_irq(dev
, irq
);
1054 msi_free_irq(dev
, irq
);
1056 printk(KERN_WARNING
"PCI: %s: pci_disable_msix() called without "
1057 "free_irq() on all MSI-X irqs\n",
1059 BUG_ON(warning
> 0);
1065 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
1066 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1068 * Being called during hotplug remove, from which the device function
1069 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
1070 * allocated for this device function, are reclaimed to unused state,
1071 * which may be used later on.
1073 void msi_remove_pci_irq_vectors(struct pci_dev
* dev
)
1075 int state
, pos
, temp
;
1076 unsigned long flags
;
1078 if (!pci_msi_enable
|| !dev
)
1081 temp
= dev
->irq
; /* Save IOAPIC IRQ */
1082 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
1083 if (pos
> 0 && !msi_lookup_irq(dev
, PCI_CAP_ID_MSI
)) {
1084 spin_lock_irqsave(&msi_lock
, flags
);
1085 state
= msi_desc
[dev
->irq
]->msi_attrib
.state
;
1086 spin_unlock_irqrestore(&msi_lock
, flags
);
1088 printk(KERN_WARNING
"PCI: %s: msi_remove_pci_irq_vectors() "
1089 "called without free_irq() on MSI irq %d\n",
1090 pci_name(dev
), dev
->irq
);
1092 } else /* Release MSI irq assigned to this device */
1093 msi_free_irq(dev
, dev
->irq
);
1094 dev
->irq
= temp
; /* Restore IOAPIC IRQ */
1096 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
1097 if (pos
> 0 && !msi_lookup_irq(dev
, PCI_CAP_ID_MSIX
)) {
1098 int irq
, head
, tail
= 0, warning
= 0;
1099 void __iomem
*base
= NULL
;
1101 irq
= head
= dev
->irq
;
1102 while (head
!= tail
) {
1103 spin_lock_irqsave(&msi_lock
, flags
);
1104 state
= msi_desc
[irq
]->msi_attrib
.state
;
1105 tail
= msi_desc
[irq
]->link
.tail
;
1106 base
= msi_desc
[irq
]->mask_base
;
1107 spin_unlock_irqrestore(&msi_lock
, flags
);
1110 else if (irq
!= head
) /* Release MSI-X irq */
1111 msi_free_irq(dev
, irq
);
1114 msi_free_irq(dev
, irq
);
1117 printk(KERN_WARNING
"PCI: %s: msi_remove_pci_irq_vectors() "
1118 "called without free_irq() on all MSI-X irqs\n",
1120 BUG_ON(warning
> 0);
1122 dev
->irq
= temp
; /* Restore IOAPIC IRQ */
1126 void pci_no_msi(void)
1131 EXPORT_SYMBOL(pci_enable_msi
);
1132 EXPORT_SYMBOL(pci_disable_msi
);
1133 EXPORT_SYMBOL(pci_enable_msix
);
1134 EXPORT_SYMBOL(pci_disable_msix
);