4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
35 #include <asm/ptrace.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
57 static DEFINE_SPINLOCK(irq_mapping_update_lock
);
59 /* IRQ <-> VIRQ mapping. */
60 static DEFINE_PER_CPU(int [NR_VIRQS
], virq_to_irq
) = {[0 ... NR_VIRQS
-1] = -1};
62 /* IRQ <-> IPI mapping */
63 static DEFINE_PER_CPU(int [XEN_NR_IPIS
], ipi_to_irq
) = {[0 ... XEN_NR_IPIS
-1] = -1};
65 /* Interrupt types. */
75 * Packed IRQ information:
76 * type - enum xen_irq_type
77 * event channel - irq->event channel mapping
78 * cpu - cpu this event channel is bound to
79 * index - type-specific information:
80 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
81 * guest, or GSI (real passthrough IRQ) of the device.
88 enum xen_irq_type type
; /* type */
89 unsigned short evtchn
; /* event channel */
90 unsigned short cpu
; /* cpu bound */
103 #define PIRQ_NEEDS_EOI (1 << 0)
104 #define PIRQ_SHAREABLE (1 << 1)
106 static struct irq_info
*irq_info
;
107 static int *pirq_to_irq
;
109 static int *evtchn_to_irq
;
110 struct cpu_evtchn_s
{
111 unsigned long bits
[NR_EVENT_CHANNELS
/BITS_PER_LONG
];
114 static __initdata
struct cpu_evtchn_s init_evtchn_mask
= {
115 .bits
[0 ... (NR_EVENT_CHANNELS
/BITS_PER_LONG
)-1] = ~0ul,
117 static struct cpu_evtchn_s
*cpu_evtchn_mask_p
= &init_evtchn_mask
;
119 static inline unsigned long *cpu_evtchn_mask(int cpu
)
121 return cpu_evtchn_mask_p
[cpu
].bits
;
124 /* Xen will never allocate port zero for any purpose. */
125 #define VALID_EVTCHN(chn) ((chn) != 0)
127 static struct irq_chip xen_dynamic_chip
;
128 static struct irq_chip xen_percpu_chip
;
129 static struct irq_chip xen_pirq_chip
;
131 /* Constructor for packed IRQ information. */
132 static struct irq_info
mk_unbound_info(void)
134 return (struct irq_info
) { .type
= IRQT_UNBOUND
};
137 static struct irq_info
mk_evtchn_info(unsigned short evtchn
)
139 return (struct irq_info
) { .type
= IRQT_EVTCHN
, .evtchn
= evtchn
,
143 static struct irq_info
mk_ipi_info(unsigned short evtchn
, enum ipi_vector ipi
)
145 return (struct irq_info
) { .type
= IRQT_IPI
, .evtchn
= evtchn
,
146 .cpu
= 0, .u
.ipi
= ipi
};
149 static struct irq_info
mk_virq_info(unsigned short evtchn
, unsigned short virq
)
151 return (struct irq_info
) { .type
= IRQT_VIRQ
, .evtchn
= evtchn
,
152 .cpu
= 0, .u
.virq
= virq
};
155 static struct irq_info
mk_pirq_info(unsigned short evtchn
, unsigned short pirq
,
156 unsigned short gsi
, unsigned short vector
)
158 return (struct irq_info
) { .type
= IRQT_PIRQ
, .evtchn
= evtchn
,
160 .u
.pirq
= { .pirq
= pirq
, .gsi
= gsi
, .vector
= vector
} };
164 * Accessors for packed IRQ information.
166 static struct irq_info
*info_for_irq(unsigned irq
)
168 return &irq_info
[irq
];
171 static unsigned int evtchn_from_irq(unsigned irq
)
173 return info_for_irq(irq
)->evtchn
;
176 unsigned irq_from_evtchn(unsigned int evtchn
)
178 return evtchn_to_irq
[evtchn
];
180 EXPORT_SYMBOL_GPL(irq_from_evtchn
);
182 static enum ipi_vector
ipi_from_irq(unsigned irq
)
184 struct irq_info
*info
= info_for_irq(irq
);
186 BUG_ON(info
== NULL
);
187 BUG_ON(info
->type
!= IRQT_IPI
);
192 static unsigned virq_from_irq(unsigned irq
)
194 struct irq_info
*info
= info_for_irq(irq
);
196 BUG_ON(info
== NULL
);
197 BUG_ON(info
->type
!= IRQT_VIRQ
);
202 static unsigned pirq_from_irq(unsigned irq
)
204 struct irq_info
*info
= info_for_irq(irq
);
206 BUG_ON(info
== NULL
);
207 BUG_ON(info
->type
!= IRQT_PIRQ
);
209 return info
->u
.pirq
.pirq
;
212 static unsigned gsi_from_irq(unsigned irq
)
214 struct irq_info
*info
= info_for_irq(irq
);
216 BUG_ON(info
== NULL
);
217 BUG_ON(info
->type
!= IRQT_PIRQ
);
219 return info
->u
.pirq
.gsi
;
222 static unsigned vector_from_irq(unsigned irq
)
224 struct irq_info
*info
= info_for_irq(irq
);
226 BUG_ON(info
== NULL
);
227 BUG_ON(info
->type
!= IRQT_PIRQ
);
229 return info
->u
.pirq
.vector
;
232 static enum xen_irq_type
type_from_irq(unsigned irq
)
234 return info_for_irq(irq
)->type
;
237 static unsigned cpu_from_irq(unsigned irq
)
239 return info_for_irq(irq
)->cpu
;
242 static unsigned int cpu_from_evtchn(unsigned int evtchn
)
244 int irq
= evtchn_to_irq
[evtchn
];
248 ret
= cpu_from_irq(irq
);
253 static bool pirq_needs_eoi(unsigned irq
)
255 struct irq_info
*info
= info_for_irq(irq
);
257 BUG_ON(info
->type
!= IRQT_PIRQ
);
259 return info
->u
.pirq
.flags
& PIRQ_NEEDS_EOI
;
262 static inline unsigned long active_evtchns(unsigned int cpu
,
263 struct shared_info
*sh
,
266 return (sh
->evtchn_pending
[idx
] &
267 cpu_evtchn_mask(cpu
)[idx
] &
268 ~sh
->evtchn_mask
[idx
]);
271 static void bind_evtchn_to_cpu(unsigned int chn
, unsigned int cpu
)
273 int irq
= evtchn_to_irq
[chn
];
277 cpumask_copy(irq_to_desc(irq
)->affinity
, cpumask_of(cpu
));
280 clear_bit(chn
, cpu_evtchn_mask(cpu_from_irq(irq
)));
281 set_bit(chn
, cpu_evtchn_mask(cpu
));
283 irq_info
[irq
].cpu
= cpu
;
286 static void init_evtchn_cpu_bindings(void)
290 struct irq_desc
*desc
;
292 /* By default all event channels notify CPU#0. */
293 for_each_irq_desc(i
, desc
) {
294 cpumask_copy(desc
->affinity
, cpumask_of(0));
298 for_each_possible_cpu(i
)
299 memset(cpu_evtchn_mask(i
),
300 (i
== 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s
));
304 static inline void clear_evtchn(int port
)
306 struct shared_info
*s
= HYPERVISOR_shared_info
;
307 sync_clear_bit(port
, &s
->evtchn_pending
[0]);
310 static inline void set_evtchn(int port
)
312 struct shared_info
*s
= HYPERVISOR_shared_info
;
313 sync_set_bit(port
, &s
->evtchn_pending
[0]);
316 static inline int test_evtchn(int port
)
318 struct shared_info
*s
= HYPERVISOR_shared_info
;
319 return sync_test_bit(port
, &s
->evtchn_pending
[0]);
324 * notify_remote_via_irq - send event to remote end of event channel via irq
325 * @irq: irq of event channel to send event to
327 * Unlike notify_remote_via_evtchn(), this is safe to use across
328 * save/restore. Notifications on a broken connection are silently
331 void notify_remote_via_irq(int irq
)
333 int evtchn
= evtchn_from_irq(irq
);
335 if (VALID_EVTCHN(evtchn
))
336 notify_remote_via_evtchn(evtchn
);
338 EXPORT_SYMBOL_GPL(notify_remote_via_irq
);
340 static void mask_evtchn(int port
)
342 struct shared_info
*s
= HYPERVISOR_shared_info
;
343 sync_set_bit(port
, &s
->evtchn_mask
[0]);
346 static void unmask_evtchn(int port
)
348 struct shared_info
*s
= HYPERVISOR_shared_info
;
349 unsigned int cpu
= get_cpu();
351 BUG_ON(!irqs_disabled());
353 /* Slow path (hypercall) if this is a non-local port. */
354 if (unlikely(cpu
!= cpu_from_evtchn(port
))) {
355 struct evtchn_unmask unmask
= { .port
= port
};
356 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask
, &unmask
);
358 struct vcpu_info
*vcpu_info
= __this_cpu_read(xen_vcpu
);
360 sync_clear_bit(port
, &s
->evtchn_mask
[0]);
363 * The following is basically the equivalent of
364 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
365 * the interrupt edge' if the channel is masked.
367 if (sync_test_bit(port
, &s
->evtchn_pending
[0]) &&
368 !sync_test_and_set_bit(port
/ BITS_PER_LONG
,
369 &vcpu_info
->evtchn_pending_sel
))
370 vcpu_info
->evtchn_upcall_pending
= 1;
376 static int get_nr_hw_irqs(void)
380 #ifdef CONFIG_X86_IO_APIC
381 ret
= get_nr_irqs_gsi();
387 static int find_unbound_pirq(int type
)
390 struct physdev_get_free_pirq op_get_free_pirq
;
391 op_get_free_pirq
.type
= type
;
393 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq
, &op_get_free_pirq
);
395 return op_get_free_pirq
.pirq
;
397 for (i
= 0; i
< nr_irqs
; i
++) {
398 if (pirq_to_irq
[i
] < 0)
404 static int find_unbound_irq(void)
406 struct irq_data
*data
;
408 int start
= get_nr_hw_irqs();
410 if (start
== nr_irqs
)
413 /* nr_irqs is a magic value. Must not use it.*/
414 for (irq
= nr_irqs
-1; irq
> start
; irq
--) {
415 data
= irq_get_irq_data(irq
);
416 /* only 0->15 have init'd desc; handle irq > 16 */
419 if (data
->chip
== &no_irq_chip
)
421 if (data
->chip
!= &xen_dynamic_chip
)
423 if (irq_info
[irq
].type
== IRQT_UNBOUND
)
430 res
= irq_alloc_desc_at(irq
, -1);
432 if (WARN_ON(res
!= irq
))
438 panic("No available IRQ to bind to: increase nr_irqs!\n");
441 static bool identity_mapped_irq(unsigned irq
)
443 /* identity map all the hardware irqs */
444 return irq
< get_nr_hw_irqs();
447 static void pirq_unmask_notify(int irq
)
449 struct physdev_eoi eoi
= { .irq
= pirq_from_irq(irq
) };
451 if (unlikely(pirq_needs_eoi(irq
))) {
452 int rc
= HYPERVISOR_physdev_op(PHYSDEVOP_eoi
, &eoi
);
457 static void pirq_query_unmask(int irq
)
459 struct physdev_irq_status_query irq_status
;
460 struct irq_info
*info
= info_for_irq(irq
);
462 BUG_ON(info
->type
!= IRQT_PIRQ
);
464 irq_status
.irq
= pirq_from_irq(irq
);
465 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query
, &irq_status
))
466 irq_status
.flags
= 0;
468 info
->u
.pirq
.flags
&= ~PIRQ_NEEDS_EOI
;
469 if (irq_status
.flags
& XENIRQSTAT_needs_eoi
)
470 info
->u
.pirq
.flags
|= PIRQ_NEEDS_EOI
;
473 static bool probing_irq(int irq
)
475 struct irq_desc
*desc
= irq_to_desc(irq
);
477 return desc
&& desc
->action
== NULL
;
480 static unsigned int startup_pirq(unsigned int irq
)
482 struct evtchn_bind_pirq bind_pirq
;
483 struct irq_info
*info
= info_for_irq(irq
);
484 int evtchn
= evtchn_from_irq(irq
);
487 BUG_ON(info
->type
!= IRQT_PIRQ
);
489 if (VALID_EVTCHN(evtchn
))
492 bind_pirq
.pirq
= pirq_from_irq(irq
);
493 /* NB. We are happy to share unless we are probing. */
494 bind_pirq
.flags
= info
->u
.pirq
.flags
& PIRQ_SHAREABLE
?
495 BIND_PIRQ__WILL_SHARE
: 0;
496 rc
= HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq
, &bind_pirq
);
498 if (!probing_irq(irq
))
499 printk(KERN_INFO
"Failed to obtain physical IRQ %d\n",
503 evtchn
= bind_pirq
.port
;
505 pirq_query_unmask(irq
);
507 evtchn_to_irq
[evtchn
] = irq
;
508 bind_evtchn_to_cpu(evtchn
, 0);
509 info
->evtchn
= evtchn
;
512 unmask_evtchn(evtchn
);
513 pirq_unmask_notify(irq
);
518 static void shutdown_pirq(unsigned int irq
)
520 struct evtchn_close close
;
521 struct irq_info
*info
= info_for_irq(irq
);
522 int evtchn
= evtchn_from_irq(irq
);
524 BUG_ON(info
->type
!= IRQT_PIRQ
);
526 if (!VALID_EVTCHN(evtchn
))
532 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
535 bind_evtchn_to_cpu(evtchn
, 0);
536 evtchn_to_irq
[evtchn
] = -1;
540 static void enable_pirq(unsigned int irq
)
545 static void disable_pirq(unsigned int irq
)
549 static void ack_pirq(unsigned int irq
)
551 int evtchn
= evtchn_from_irq(irq
);
553 move_native_irq(irq
);
555 if (VALID_EVTCHN(evtchn
)) {
557 clear_evtchn(evtchn
);
561 static void end_pirq(unsigned int irq
)
563 int evtchn
= evtchn_from_irq(irq
);
564 struct irq_desc
*desc
= irq_to_desc(irq
);
569 if ((desc
->status
& (IRQ_DISABLED
|IRQ_PENDING
)) ==
570 (IRQ_DISABLED
|IRQ_PENDING
)) {
572 } else if (VALID_EVTCHN(evtchn
)) {
573 unmask_evtchn(evtchn
);
574 pirq_unmask_notify(irq
);
578 static int find_irq_by_gsi(unsigned gsi
)
582 for (irq
= 0; irq
< nr_irqs
; irq
++) {
583 struct irq_info
*info
= info_for_irq(irq
);
585 if (info
== NULL
|| info
->type
!= IRQT_PIRQ
)
588 if (gsi_from_irq(irq
) == gsi
)
595 int xen_allocate_pirq(unsigned gsi
, int shareable
, char *name
)
597 return xen_map_pirq_gsi(gsi
, gsi
, shareable
, name
);
600 /* xen_map_pirq_gsi might allocate irqs from the top down, as a
601 * consequence don't assume that the irq number returned has a low value
602 * or can be used as a pirq number unless you know otherwise.
604 * One notable exception is when xen_map_pirq_gsi is called passing an
605 * hardware gsi as argument, in that case the irq number returned
606 * matches the gsi number passed as second argument.
608 * Note: We don't assign an event channel until the irq actually started
609 * up. Return an existing irq if we've already got one for the gsi.
611 int xen_map_pirq_gsi(unsigned pirq
, unsigned gsi
, int shareable
, char *name
)
614 struct physdev_irq irq_op
;
616 spin_lock(&irq_mapping_update_lock
);
618 if ((pirq
> nr_irqs
) || (gsi
> nr_irqs
)) {
619 printk(KERN_WARNING
"xen_map_pirq_gsi: %s %s is incorrect!\n",
620 pirq
> nr_irqs
? "pirq" :"",
621 gsi
> nr_irqs
? "gsi" : "");
625 irq
= find_irq_by_gsi(gsi
);
627 printk(KERN_INFO
"xen_map_pirq_gsi: returning irq %d for gsi %u\n",
629 goto out
; /* XXX need refcount? */
632 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
633 * we are using the !xen_initial_domain() to drop in the function.*/
634 if (identity_mapped_irq(gsi
) || (!xen_initial_domain() &&
637 irq_alloc_desc_at(irq
, -1);
639 irq
= find_unbound_irq();
641 set_irq_chip_and_handler_name(irq
, &xen_pirq_chip
,
642 handle_level_irq
, name
);
647 /* Only the privileged domain can do this. For non-priv, the pcifront
648 * driver provides a PCI bus that does the call to do exactly
649 * this in the priv domain. */
650 if (xen_initial_domain() &&
651 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector
, &irq_op
)) {
657 irq_info
[irq
] = mk_pirq_info(0, pirq
, gsi
, irq_op
.vector
);
658 irq_info
[irq
].u
.pirq
.flags
|= shareable
? PIRQ_SHAREABLE
: 0;
659 pirq_to_irq
[pirq
] = irq
;
662 spin_unlock(&irq_mapping_update_lock
);
667 #ifdef CONFIG_PCI_MSI
668 #include <linux/msi.h>
669 #include "../pci/msi.h"
671 void xen_allocate_pirq_msi(char *name
, int *irq
, int *pirq
, int alloc
)
673 spin_lock(&irq_mapping_update_lock
);
675 if (alloc
& XEN_ALLOC_IRQ
) {
676 *irq
= find_unbound_irq();
681 if (alloc
& XEN_ALLOC_PIRQ
) {
682 *pirq
= find_unbound_pirq(MAP_PIRQ_TYPE_MSI
);
687 set_irq_chip_and_handler_name(*irq
, &xen_pirq_chip
,
688 handle_level_irq
, name
);
690 irq_info
[*irq
] = mk_pirq_info(0, *pirq
, 0, 0);
691 pirq_to_irq
[*pirq
] = *irq
;
694 spin_unlock(&irq_mapping_update_lock
);
697 int xen_create_msi_irq(struct pci_dev
*dev
, struct msi_desc
*msidesc
, int type
)
700 struct physdev_map_pirq map_irq
;
703 u32 table_offset
, bir
;
705 memset(&map_irq
, 0, sizeof(map_irq
));
706 map_irq
.domid
= DOMID_SELF
;
707 map_irq
.type
= MAP_PIRQ_TYPE_MSI
;
710 map_irq
.bus
= dev
->bus
->number
;
711 map_irq
.devfn
= dev
->devfn
;
713 if (type
== PCI_CAP_ID_MSIX
) {
714 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
716 pci_read_config_dword(dev
, msix_table_offset_reg(pos
),
718 bir
= (u8
)(table_offset
& PCI_MSIX_FLAGS_BIRMASK
);
720 map_irq
.table_base
= pci_resource_start(dev
, bir
);
721 map_irq
.entry_nr
= msidesc
->msi_attrib
.entry_nr
;
724 spin_lock(&irq_mapping_update_lock
);
726 irq
= find_unbound_irq();
731 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
, &map_irq
);
733 printk(KERN_WARNING
"xen map irq failed %d\n", rc
);
740 irq_info
[irq
] = mk_pirq_info(0, map_irq
.pirq
, 0, map_irq
.index
);
742 set_irq_chip_and_handler_name(irq
, &xen_pirq_chip
,
744 (type
== PCI_CAP_ID_MSIX
) ? "msi-x":"msi");
747 spin_unlock(&irq_mapping_update_lock
);
752 int xen_destroy_irq(int irq
)
754 struct irq_desc
*desc
;
755 struct physdev_unmap_pirq unmap_irq
;
756 struct irq_info
*info
= info_for_irq(irq
);
759 spin_lock(&irq_mapping_update_lock
);
761 desc
= irq_to_desc(irq
);
765 if (xen_initial_domain()) {
766 unmap_irq
.pirq
= info
->u
.pirq
.pirq
;
767 unmap_irq
.domid
= DOMID_SELF
;
768 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq
, &unmap_irq
);
770 printk(KERN_WARNING
"unmap irq failed %d\n", rc
);
773 pirq_to_irq
[info
->u
.pirq
.pirq
] = -1;
775 irq_info
[irq
] = mk_unbound_info();
780 spin_unlock(&irq_mapping_update_lock
);
784 int xen_vector_from_irq(unsigned irq
)
786 return vector_from_irq(irq
);
789 int xen_gsi_from_irq(unsigned irq
)
791 return gsi_from_irq(irq
);
794 int xen_irq_from_pirq(unsigned pirq
)
796 return pirq_to_irq
[pirq
];
799 int bind_evtchn_to_irq(unsigned int evtchn
)
803 spin_lock(&irq_mapping_update_lock
);
805 irq
= evtchn_to_irq
[evtchn
];
808 irq
= find_unbound_irq();
810 set_irq_chip_and_handler_name(irq
, &xen_dynamic_chip
,
811 handle_fasteoi_irq
, "event");
813 evtchn_to_irq
[evtchn
] = irq
;
814 irq_info
[irq
] = mk_evtchn_info(evtchn
);
817 spin_unlock(&irq_mapping_update_lock
);
821 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq
);
823 static int bind_ipi_to_irq(unsigned int ipi
, unsigned int cpu
)
825 struct evtchn_bind_ipi bind_ipi
;
828 spin_lock(&irq_mapping_update_lock
);
830 irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
];
833 irq
= find_unbound_irq();
837 set_irq_chip_and_handler_name(irq
, &xen_percpu_chip
,
838 handle_percpu_irq
, "ipi");
841 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
844 evtchn
= bind_ipi
.port
;
846 evtchn_to_irq
[evtchn
] = irq
;
847 irq_info
[irq
] = mk_ipi_info(evtchn
, ipi
);
848 per_cpu(ipi_to_irq
, cpu
)[ipi
] = irq
;
850 bind_evtchn_to_cpu(evtchn
, cpu
);
854 spin_unlock(&irq_mapping_update_lock
);
859 int bind_virq_to_irq(unsigned int virq
, unsigned int cpu
)
861 struct evtchn_bind_virq bind_virq
;
864 spin_lock(&irq_mapping_update_lock
);
866 irq
= per_cpu(virq_to_irq
, cpu
)[virq
];
869 irq
= find_unbound_irq();
871 set_irq_chip_and_handler_name(irq
, &xen_percpu_chip
,
872 handle_percpu_irq
, "virq");
874 bind_virq
.virq
= virq
;
875 bind_virq
.vcpu
= cpu
;
876 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
879 evtchn
= bind_virq
.port
;
881 evtchn_to_irq
[evtchn
] = irq
;
882 irq_info
[irq
] = mk_virq_info(evtchn
, virq
);
884 per_cpu(virq_to_irq
, cpu
)[virq
] = irq
;
886 bind_evtchn_to_cpu(evtchn
, cpu
);
889 spin_unlock(&irq_mapping_update_lock
);
894 static void unbind_from_irq(unsigned int irq
)
896 struct evtchn_close close
;
897 int evtchn
= evtchn_from_irq(irq
);
899 spin_lock(&irq_mapping_update_lock
);
901 if (VALID_EVTCHN(evtchn
)) {
903 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
906 switch (type_from_irq(irq
)) {
908 per_cpu(virq_to_irq
, cpu_from_evtchn(evtchn
))
909 [virq_from_irq(irq
)] = -1;
912 per_cpu(ipi_to_irq
, cpu_from_evtchn(evtchn
))
913 [ipi_from_irq(irq
)] = -1;
919 /* Closed ports are implicitly re-bound to VCPU0. */
920 bind_evtchn_to_cpu(evtchn
, 0);
922 evtchn_to_irq
[evtchn
] = -1;
925 if (irq_info
[irq
].type
!= IRQT_UNBOUND
) {
926 irq_info
[irq
] = mk_unbound_info();
931 spin_unlock(&irq_mapping_update_lock
);
934 int bind_evtchn_to_irqhandler(unsigned int evtchn
,
935 irq_handler_t handler
,
936 unsigned long irqflags
,
937 const char *devname
, void *dev_id
)
942 irq
= bind_evtchn_to_irq(evtchn
);
943 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
945 unbind_from_irq(irq
);
951 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler
);
953 int bind_virq_to_irqhandler(unsigned int virq
, unsigned int cpu
,
954 irq_handler_t handler
,
955 unsigned long irqflags
, const char *devname
, void *dev_id
)
960 irq
= bind_virq_to_irq(virq
, cpu
);
961 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
963 unbind_from_irq(irq
);
969 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler
);
971 int bind_ipi_to_irqhandler(enum ipi_vector ipi
,
973 irq_handler_t handler
,
974 unsigned long irqflags
,
980 irq
= bind_ipi_to_irq(ipi
, cpu
);
984 irqflags
|= IRQF_NO_SUSPEND
;
985 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
987 unbind_from_irq(irq
);
994 void unbind_from_irqhandler(unsigned int irq
, void *dev_id
)
996 free_irq(irq
, dev_id
);
997 unbind_from_irq(irq
);
999 EXPORT_SYMBOL_GPL(unbind_from_irqhandler
);
1001 void xen_send_IPI_one(unsigned int cpu
, enum ipi_vector vector
)
1003 int irq
= per_cpu(ipi_to_irq
, cpu
)[vector
];
1005 notify_remote_via_irq(irq
);
1008 irqreturn_t
xen_debug_interrupt(int irq
, void *dev_id
)
1010 struct shared_info
*sh
= HYPERVISOR_shared_info
;
1011 int cpu
= smp_processor_id();
1012 unsigned long *cpu_evtchn
= cpu_evtchn_mask(cpu
);
1014 unsigned long flags
;
1015 static DEFINE_SPINLOCK(debug_lock
);
1016 struct vcpu_info
*v
;
1018 spin_lock_irqsave(&debug_lock
, flags
);
1020 printk("\nvcpu %d\n ", cpu
);
1022 for_each_online_cpu(i
) {
1024 v
= per_cpu(xen_vcpu
, i
);
1025 pending
= (get_irq_regs() && i
== cpu
)
1026 ? xen_irqs_disabled(get_irq_regs())
1027 : v
->evtchn_upcall_mask
;
1028 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i
,
1029 pending
, v
->evtchn_upcall_pending
,
1030 (int)(sizeof(v
->evtchn_pending_sel
)*2),
1031 v
->evtchn_pending_sel
);
1033 v
= per_cpu(xen_vcpu
, cpu
);
1035 printk("\npending:\n ");
1036 for (i
= ARRAY_SIZE(sh
->evtchn_pending
)-1; i
>= 0; i
--)
1037 printk("%0*lx%s", (int)sizeof(sh
->evtchn_pending
[0])*2,
1038 sh
->evtchn_pending
[i
],
1039 i
% 8 == 0 ? "\n " : " ");
1040 printk("\nglobal mask:\n ");
1041 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
1043 (int)(sizeof(sh
->evtchn_mask
[0])*2),
1045 i
% 8 == 0 ? "\n " : " ");
1047 printk("\nglobally unmasked:\n ");
1048 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
1049 printk("%0*lx%s", (int)(sizeof(sh
->evtchn_mask
[0])*2),
1050 sh
->evtchn_pending
[i
] & ~sh
->evtchn_mask
[i
],
1051 i
% 8 == 0 ? "\n " : " ");
1053 printk("\nlocal cpu%d mask:\n ", cpu
);
1054 for (i
= (NR_EVENT_CHANNELS
/BITS_PER_LONG
)-1; i
>= 0; i
--)
1055 printk("%0*lx%s", (int)(sizeof(cpu_evtchn
[0])*2),
1057 i
% 8 == 0 ? "\n " : " ");
1059 printk("\nlocally unmasked:\n ");
1060 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--) {
1061 unsigned long pending
= sh
->evtchn_pending
[i
]
1062 & ~sh
->evtchn_mask
[i
]
1064 printk("%0*lx%s", (int)(sizeof(sh
->evtchn_mask
[0])*2),
1065 pending
, i
% 8 == 0 ? "\n " : " ");
1068 printk("\npending list:\n");
1069 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++) {
1070 if (sync_test_bit(i
, sh
->evtchn_pending
)) {
1071 int word_idx
= i
/ BITS_PER_LONG
;
1072 printk(" %d: event %d -> irq %d%s%s%s\n",
1073 cpu_from_evtchn(i
), i
,
1075 sync_test_bit(word_idx
, &v
->evtchn_pending_sel
)
1077 !sync_test_bit(i
, sh
->evtchn_mask
)
1078 ? "" : " globally-masked",
1079 sync_test_bit(i
, cpu_evtchn
)
1080 ? "" : " locally-masked");
1084 spin_unlock_irqrestore(&debug_lock
, flags
);
1089 static DEFINE_PER_CPU(unsigned, xed_nesting_count
);
1092 * Search the CPUs pending events bitmasks. For each one found, map
1093 * the event number to an irq, and feed it into do_IRQ() for
1096 * Xen uses a two-level bitmap to speed searching. The first level is
1097 * a bitset of words which contain pending event bits. The second
1098 * level is a bitset of pending events themselves.
1100 static void __xen_evtchn_do_upcall(void)
1102 int cpu
= get_cpu();
1103 struct shared_info
*s
= HYPERVISOR_shared_info
;
1104 struct vcpu_info
*vcpu_info
= __this_cpu_read(xen_vcpu
);
1108 unsigned long pending_words
;
1110 vcpu_info
->evtchn_upcall_pending
= 0;
1112 if (__this_cpu_inc_return(xed_nesting_count
) - 1)
1115 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1116 /* Clear master flag /before/ clearing selector flag. */
1119 pending_words
= xchg(&vcpu_info
->evtchn_pending_sel
, 0);
1120 while (pending_words
!= 0) {
1121 unsigned long pending_bits
;
1122 int word_idx
= __ffs(pending_words
);
1123 pending_words
&= ~(1UL << word_idx
);
1125 while ((pending_bits
= active_evtchns(cpu
, s
, word_idx
)) != 0) {
1126 int bit_idx
= __ffs(pending_bits
);
1127 int port
= (word_idx
* BITS_PER_LONG
) + bit_idx
;
1128 int irq
= evtchn_to_irq
[port
];
1129 struct irq_desc
*desc
;
1135 desc
= irq_to_desc(irq
);
1137 generic_handle_irq_desc(irq
, desc
);
1142 BUG_ON(!irqs_disabled());
1144 count
= __this_cpu_read(xed_nesting_count
);
1145 __this_cpu_write(xed_nesting_count
, 0);
1146 } while (count
!= 1 || vcpu_info
->evtchn_upcall_pending
);
1153 void xen_evtchn_do_upcall(struct pt_regs
*regs
)
1155 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1160 __xen_evtchn_do_upcall();
1163 set_irq_regs(old_regs
);
1166 void xen_hvm_evtchn_do_upcall(void)
1168 __xen_evtchn_do_upcall();
1170 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall
);
1172 /* Rebind a new event channel to an existing irq. */
1173 void rebind_evtchn_irq(int evtchn
, int irq
)
1175 struct irq_info
*info
= info_for_irq(irq
);
1177 /* Make sure the irq is masked, since the new event channel
1178 will also be masked. */
1181 spin_lock(&irq_mapping_update_lock
);
1183 /* After resume the irq<->evtchn mappings are all cleared out */
1184 BUG_ON(evtchn_to_irq
[evtchn
] != -1);
1185 /* Expect irq to have been bound before,
1186 so there should be a proper type */
1187 BUG_ON(info
->type
== IRQT_UNBOUND
);
1189 evtchn_to_irq
[evtchn
] = irq
;
1190 irq_info
[irq
] = mk_evtchn_info(evtchn
);
1192 spin_unlock(&irq_mapping_update_lock
);
1194 /* new event channels are always bound to cpu 0 */
1195 irq_set_affinity(irq
, cpumask_of(0));
1197 /* Unmask the event channel. */
1201 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1202 static int rebind_irq_to_cpu(unsigned irq
, unsigned tcpu
)
1204 struct evtchn_bind_vcpu bind_vcpu
;
1205 int evtchn
= evtchn_from_irq(irq
);
1207 /* events delivered via platform PCI interrupts are always
1208 * routed to vcpu 0 */
1209 if (!VALID_EVTCHN(evtchn
) ||
1210 (xen_hvm_domain() && !xen_have_vector_callback
))
1213 /* Send future instances of this interrupt to other vcpu. */
1214 bind_vcpu
.port
= evtchn
;
1215 bind_vcpu
.vcpu
= tcpu
;
1218 * If this fails, it usually just indicates that we're dealing with a
1219 * virq or IPI channel, which don't actually need to be rebound. Ignore
1220 * it, but don't do the xenlinux-level rebind in that case.
1222 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu
, &bind_vcpu
) >= 0)
1223 bind_evtchn_to_cpu(evtchn
, tcpu
);
1228 static int set_affinity_irq(unsigned irq
, const struct cpumask
*dest
)
1230 unsigned tcpu
= cpumask_first(dest
);
1232 return rebind_irq_to_cpu(irq
, tcpu
);
1235 int resend_irq_on_evtchn(unsigned int irq
)
1237 int masked
, evtchn
= evtchn_from_irq(irq
);
1238 struct shared_info
*s
= HYPERVISOR_shared_info
;
1240 if (!VALID_EVTCHN(evtchn
))
1243 masked
= sync_test_and_set_bit(evtchn
, s
->evtchn_mask
);
1244 sync_set_bit(evtchn
, s
->evtchn_pending
);
1246 unmask_evtchn(evtchn
);
1251 static void enable_dynirq(unsigned int irq
)
1253 int evtchn
= evtchn_from_irq(irq
);
1255 if (VALID_EVTCHN(evtchn
))
1256 unmask_evtchn(evtchn
);
1259 static void disable_dynirq(unsigned int irq
)
1261 int evtchn
= evtchn_from_irq(irq
);
1263 if (VALID_EVTCHN(evtchn
))
1264 mask_evtchn(evtchn
);
1267 static void ack_dynirq(unsigned int irq
)
1269 int evtchn
= evtchn_from_irq(irq
);
1271 move_masked_irq(irq
);
1273 if (VALID_EVTCHN(evtchn
))
1274 unmask_evtchn(evtchn
);
1277 static int retrigger_dynirq(unsigned int irq
)
1279 int evtchn
= evtchn_from_irq(irq
);
1280 struct shared_info
*sh
= HYPERVISOR_shared_info
;
1283 if (VALID_EVTCHN(evtchn
)) {
1286 masked
= sync_test_and_set_bit(evtchn
, sh
->evtchn_mask
);
1287 sync_set_bit(evtchn
, sh
->evtchn_pending
);
1289 unmask_evtchn(evtchn
);
1296 static void restore_cpu_pirqs(void)
1298 int pirq
, rc
, irq
, gsi
;
1299 struct physdev_map_pirq map_irq
;
1301 for (pirq
= 0; pirq
< nr_irqs
; pirq
++) {
1302 irq
= pirq_to_irq
[pirq
];
1306 /* save/restore of PT devices doesn't work, so at this point the
1307 * only devices present are GSI based emulated devices */
1308 gsi
= gsi_from_irq(irq
);
1312 map_irq
.domid
= DOMID_SELF
;
1313 map_irq
.type
= MAP_PIRQ_TYPE_GSI
;
1314 map_irq
.index
= gsi
;
1315 map_irq
.pirq
= pirq
;
1317 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
, &map_irq
);
1319 printk(KERN_WARNING
"xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1320 gsi
, irq
, pirq
, rc
);
1321 irq_info
[irq
] = mk_unbound_info();
1322 pirq_to_irq
[pirq
] = -1;
1326 printk(KERN_DEBUG
"xen: --> irq=%d, pirq=%d\n", irq
, map_irq
.pirq
);
1332 static void restore_cpu_virqs(unsigned int cpu
)
1334 struct evtchn_bind_virq bind_virq
;
1335 int virq
, irq
, evtchn
;
1337 for (virq
= 0; virq
< NR_VIRQS
; virq
++) {
1338 if ((irq
= per_cpu(virq_to_irq
, cpu
)[virq
]) == -1)
1341 BUG_ON(virq_from_irq(irq
) != virq
);
1343 /* Get a new binding from Xen. */
1344 bind_virq
.virq
= virq
;
1345 bind_virq
.vcpu
= cpu
;
1346 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
1349 evtchn
= bind_virq
.port
;
1351 /* Record the new mapping. */
1352 evtchn_to_irq
[evtchn
] = irq
;
1353 irq_info
[irq
] = mk_virq_info(evtchn
, virq
);
1354 bind_evtchn_to_cpu(evtchn
, cpu
);
1358 static void restore_cpu_ipis(unsigned int cpu
)
1360 struct evtchn_bind_ipi bind_ipi
;
1361 int ipi
, irq
, evtchn
;
1363 for (ipi
= 0; ipi
< XEN_NR_IPIS
; ipi
++) {
1364 if ((irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
]) == -1)
1367 BUG_ON(ipi_from_irq(irq
) != ipi
);
1369 /* Get a new binding from Xen. */
1370 bind_ipi
.vcpu
= cpu
;
1371 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
1374 evtchn
= bind_ipi
.port
;
1376 /* Record the new mapping. */
1377 evtchn_to_irq
[evtchn
] = irq
;
1378 irq_info
[irq
] = mk_ipi_info(evtchn
, ipi
);
1379 bind_evtchn_to_cpu(evtchn
, cpu
);
1383 /* Clear an irq's pending state, in preparation for polling on it */
1384 void xen_clear_irq_pending(int irq
)
1386 int evtchn
= evtchn_from_irq(irq
);
1388 if (VALID_EVTCHN(evtchn
))
1389 clear_evtchn(evtchn
);
1391 EXPORT_SYMBOL(xen_clear_irq_pending
);
1392 void xen_set_irq_pending(int irq
)
1394 int evtchn
= evtchn_from_irq(irq
);
1396 if (VALID_EVTCHN(evtchn
))
1400 bool xen_test_irq_pending(int irq
)
1402 int evtchn
= evtchn_from_irq(irq
);
1405 if (VALID_EVTCHN(evtchn
))
1406 ret
= test_evtchn(evtchn
);
1411 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1412 * the irq will be disabled so it won't deliver an interrupt. */
1413 void xen_poll_irq_timeout(int irq
, u64 timeout
)
1415 evtchn_port_t evtchn
= evtchn_from_irq(irq
);
1417 if (VALID_EVTCHN(evtchn
)) {
1418 struct sched_poll poll
;
1421 poll
.timeout
= timeout
;
1422 set_xen_guest_handle(poll
.ports
, &evtchn
);
1424 if (HYPERVISOR_sched_op(SCHEDOP_poll
, &poll
) != 0)
1428 EXPORT_SYMBOL(xen_poll_irq_timeout
);
1429 /* Poll waiting for an irq to become pending. In the usual case, the
1430 * irq will be disabled so it won't deliver an interrupt. */
1431 void xen_poll_irq(int irq
)
1433 xen_poll_irq_timeout(irq
, 0 /* no timeout */);
1436 void xen_irq_resume(void)
1438 unsigned int cpu
, irq
, evtchn
;
1439 struct irq_desc
*desc
;
1441 init_evtchn_cpu_bindings();
1443 /* New event-channel space is not 'live' yet. */
1444 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
1445 mask_evtchn(evtchn
);
1447 /* No IRQ <-> event-channel mappings. */
1448 for (irq
= 0; irq
< nr_irqs
; irq
++)
1449 irq_info
[irq
].evtchn
= 0; /* zap event-channel binding */
1451 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
1452 evtchn_to_irq
[evtchn
] = -1;
1454 for_each_possible_cpu(cpu
) {
1455 restore_cpu_virqs(cpu
);
1456 restore_cpu_ipis(cpu
);
1460 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1461 * are not handled by the IRQ core.
1463 for_each_irq_desc(irq
, desc
) {
1464 if (!desc
->action
|| !(desc
->action
->flags
& IRQF_NO_SUSPEND
))
1466 if (desc
->status
& IRQ_DISABLED
)
1469 evtchn
= evtchn_from_irq(irq
);
1473 unmask_evtchn(evtchn
);
1476 restore_cpu_pirqs();
1479 static struct irq_chip xen_dynamic_chip __read_mostly
= {
1482 .disable
= disable_dynirq
,
1483 .mask
= disable_dynirq
,
1484 .unmask
= enable_dynirq
,
1487 .set_affinity
= set_affinity_irq
,
1488 .retrigger
= retrigger_dynirq
,
1491 static struct irq_chip xen_pirq_chip __read_mostly
= {
1494 .startup
= startup_pirq
,
1495 .shutdown
= shutdown_pirq
,
1497 .enable
= enable_pirq
,
1498 .unmask
= enable_pirq
,
1500 .disable
= disable_pirq
,
1501 .mask
= disable_pirq
,
1506 .set_affinity
= set_affinity_irq
,
1508 .retrigger
= retrigger_dynirq
,
1511 static struct irq_chip xen_percpu_chip __read_mostly
= {
1512 .name
= "xen-percpu",
1514 .disable
= disable_dynirq
,
1515 .mask
= disable_dynirq
,
1516 .unmask
= enable_dynirq
,
1521 int xen_set_callback_via(uint64_t via
)
1523 struct xen_hvm_param a
;
1524 a
.domid
= DOMID_SELF
;
1525 a
.index
= HVM_PARAM_CALLBACK_IRQ
;
1527 return HYPERVISOR_hvm_op(HVMOP_set_param
, &a
);
1529 EXPORT_SYMBOL_GPL(xen_set_callback_via
);
1531 #ifdef CONFIG_XEN_PVHVM
1532 /* Vector callbacks are better than PCI interrupts to receive event
1533 * channel notifications because we can receive vector callbacks on any
1534 * vcpu and we don't need PCI support or APIC interactions. */
1535 void xen_callback_vector(void)
1538 uint64_t callback_via
;
1539 if (xen_have_vector_callback
) {
1540 callback_via
= HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK
);
1541 rc
= xen_set_callback_via(callback_via
);
1543 printk(KERN_ERR
"Request for Xen HVM callback vector"
1545 xen_have_vector_callback
= 0;
1548 printk(KERN_INFO
"Xen HVM callback vector for event delivery is "
1550 /* in the restore case the vector has already been allocated */
1551 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK
, used_vectors
))
1552 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK
, xen_hvm_callback_vector
);
1556 void xen_callback_vector(void) {}
1559 void __init
xen_init_IRQ(void)
1563 cpu_evtchn_mask_p
= kcalloc(nr_cpu_ids
, sizeof(struct cpu_evtchn_s
),
1565 irq_info
= kcalloc(nr_irqs
, sizeof(*irq_info
), GFP_KERNEL
);
1567 /* We are using nr_irqs as the maximum number of pirq available but
1568 * that number is actually chosen by Xen and we don't know exactly
1569 * what it is. Be careful choosing high pirq numbers. */
1570 pirq_to_irq
= kcalloc(nr_irqs
, sizeof(*pirq_to_irq
), GFP_KERNEL
);
1571 for (i
= 0; i
< nr_irqs
; i
++)
1572 pirq_to_irq
[i
] = -1;
1574 evtchn_to_irq
= kcalloc(NR_EVENT_CHANNELS
, sizeof(*evtchn_to_irq
),
1576 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
1577 evtchn_to_irq
[i
] = -1;
1579 init_evtchn_cpu_bindings();
1581 /* No event channels are 'live' right now. */
1582 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
1585 if (xen_hvm_domain()) {
1586 xen_callback_vector();
1588 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1589 * __acpi_register_gsi can point at the right function */
1592 irq_ctx_init(smp_processor_id());
1593 if (xen_initial_domain())