4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
35 #include <asm/ptrace.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
57 static DEFINE_SPINLOCK(irq_mapping_update_lock
);
59 /* IRQ <-> VIRQ mapping. */
60 static DEFINE_PER_CPU(int [NR_VIRQS
], virq_to_irq
) = {[0 ... NR_VIRQS
-1] = -1};
62 /* IRQ <-> IPI mapping */
63 static DEFINE_PER_CPU(int [XEN_NR_IPIS
], ipi_to_irq
) = {[0 ... XEN_NR_IPIS
-1] = -1};
65 /* Interrupt types. */
75 * Packed IRQ information:
76 * type - enum xen_irq_type
77 * event channel - irq->event channel mapping
78 * cpu - cpu this event channel is bound to
79 * index - type-specific information:
80 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
81 * guest, or GSI (real passthrough IRQ) of the device.
88 enum xen_irq_type type
; /* type */
89 unsigned short evtchn
; /* event channel */
90 unsigned short cpu
; /* cpu bound */
103 #define PIRQ_NEEDS_EOI (1 << 0)
104 #define PIRQ_SHAREABLE (1 << 1)
106 static struct irq_info
*irq_info
;
107 static int *pirq_to_irq
;
109 static int *evtchn_to_irq
;
110 struct cpu_evtchn_s
{
111 unsigned long bits
[NR_EVENT_CHANNELS
/BITS_PER_LONG
];
114 static __initdata
struct cpu_evtchn_s init_evtchn_mask
= {
115 .bits
[0 ... (NR_EVENT_CHANNELS
/BITS_PER_LONG
)-1] = ~0ul,
117 static struct cpu_evtchn_s
*cpu_evtchn_mask_p
= &init_evtchn_mask
;
119 static inline unsigned long *cpu_evtchn_mask(int cpu
)
121 return cpu_evtchn_mask_p
[cpu
].bits
;
124 /* Xen will never allocate port zero for any purpose. */
125 #define VALID_EVTCHN(chn) ((chn) != 0)
127 static struct irq_chip xen_dynamic_chip
;
128 static struct irq_chip xen_percpu_chip
;
129 static struct irq_chip xen_pirq_chip
;
131 /* Constructor for packed IRQ information. */
132 static struct irq_info
mk_unbound_info(void)
134 return (struct irq_info
) { .type
= IRQT_UNBOUND
};
137 static struct irq_info
mk_evtchn_info(unsigned short evtchn
)
139 return (struct irq_info
) { .type
= IRQT_EVTCHN
, .evtchn
= evtchn
,
143 static struct irq_info
mk_ipi_info(unsigned short evtchn
, enum ipi_vector ipi
)
145 return (struct irq_info
) { .type
= IRQT_IPI
, .evtchn
= evtchn
,
146 .cpu
= 0, .u
.ipi
= ipi
};
149 static struct irq_info
mk_virq_info(unsigned short evtchn
, unsigned short virq
)
151 return (struct irq_info
) { .type
= IRQT_VIRQ
, .evtchn
= evtchn
,
152 .cpu
= 0, .u
.virq
= virq
};
155 static struct irq_info
mk_pirq_info(unsigned short evtchn
, unsigned short pirq
,
156 unsigned short gsi
, unsigned short vector
)
158 return (struct irq_info
) { .type
= IRQT_PIRQ
, .evtchn
= evtchn
,
160 .u
.pirq
= { .pirq
= pirq
, .gsi
= gsi
, .vector
= vector
} };
164 * Accessors for packed IRQ information.
166 static struct irq_info
*info_for_irq(unsigned irq
)
168 return &irq_info
[irq
];
171 static unsigned int evtchn_from_irq(unsigned irq
)
173 if (unlikely(WARN(irq
< 0 || irq
>= nr_irqs
, "Invalid irq %d!\n", irq
)))
176 return info_for_irq(irq
)->evtchn
;
179 unsigned irq_from_evtchn(unsigned int evtchn
)
181 return evtchn_to_irq
[evtchn
];
183 EXPORT_SYMBOL_GPL(irq_from_evtchn
);
185 static enum ipi_vector
ipi_from_irq(unsigned irq
)
187 struct irq_info
*info
= info_for_irq(irq
);
189 BUG_ON(info
== NULL
);
190 BUG_ON(info
->type
!= IRQT_IPI
);
195 static unsigned virq_from_irq(unsigned irq
)
197 struct irq_info
*info
= info_for_irq(irq
);
199 BUG_ON(info
== NULL
);
200 BUG_ON(info
->type
!= IRQT_VIRQ
);
205 static unsigned pirq_from_irq(unsigned irq
)
207 struct irq_info
*info
= info_for_irq(irq
);
209 BUG_ON(info
== NULL
);
210 BUG_ON(info
->type
!= IRQT_PIRQ
);
212 return info
->u
.pirq
.pirq
;
215 static unsigned gsi_from_irq(unsigned irq
)
217 struct irq_info
*info
= info_for_irq(irq
);
219 BUG_ON(info
== NULL
);
220 BUG_ON(info
->type
!= IRQT_PIRQ
);
222 return info
->u
.pirq
.gsi
;
225 static unsigned vector_from_irq(unsigned irq
)
227 struct irq_info
*info
= info_for_irq(irq
);
229 BUG_ON(info
== NULL
);
230 BUG_ON(info
->type
!= IRQT_PIRQ
);
232 return info
->u
.pirq
.vector
;
235 static enum xen_irq_type
type_from_irq(unsigned irq
)
237 return info_for_irq(irq
)->type
;
240 static unsigned cpu_from_irq(unsigned irq
)
242 return info_for_irq(irq
)->cpu
;
245 static unsigned int cpu_from_evtchn(unsigned int evtchn
)
247 int irq
= evtchn_to_irq
[evtchn
];
251 ret
= cpu_from_irq(irq
);
256 static bool pirq_needs_eoi(unsigned irq
)
258 struct irq_info
*info
= info_for_irq(irq
);
260 BUG_ON(info
->type
!= IRQT_PIRQ
);
262 return info
->u
.pirq
.flags
& PIRQ_NEEDS_EOI
;
265 static inline unsigned long active_evtchns(unsigned int cpu
,
266 struct shared_info
*sh
,
269 return (sh
->evtchn_pending
[idx
] &
270 cpu_evtchn_mask(cpu
)[idx
] &
271 ~sh
->evtchn_mask
[idx
]);
274 static void bind_evtchn_to_cpu(unsigned int chn
, unsigned int cpu
)
276 int irq
= evtchn_to_irq
[chn
];
280 cpumask_copy(irq_to_desc(irq
)->affinity
, cpumask_of(cpu
));
283 clear_bit(chn
, cpu_evtchn_mask(cpu_from_irq(irq
)));
284 set_bit(chn
, cpu_evtchn_mask(cpu
));
286 irq_info
[irq
].cpu
= cpu
;
289 static void init_evtchn_cpu_bindings(void)
293 struct irq_desc
*desc
;
295 /* By default all event channels notify CPU#0. */
296 for_each_irq_desc(i
, desc
) {
297 cpumask_copy(desc
->affinity
, cpumask_of(0));
301 for_each_possible_cpu(i
)
302 memset(cpu_evtchn_mask(i
),
303 (i
== 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s
));
307 static inline void clear_evtchn(int port
)
309 struct shared_info
*s
= HYPERVISOR_shared_info
;
310 sync_clear_bit(port
, &s
->evtchn_pending
[0]);
313 static inline void set_evtchn(int port
)
315 struct shared_info
*s
= HYPERVISOR_shared_info
;
316 sync_set_bit(port
, &s
->evtchn_pending
[0]);
319 static inline int test_evtchn(int port
)
321 struct shared_info
*s
= HYPERVISOR_shared_info
;
322 return sync_test_bit(port
, &s
->evtchn_pending
[0]);
327 * notify_remote_via_irq - send event to remote end of event channel via irq
328 * @irq: irq of event channel to send event to
330 * Unlike notify_remote_via_evtchn(), this is safe to use across
331 * save/restore. Notifications on a broken connection are silently
334 void notify_remote_via_irq(int irq
)
336 int evtchn
= evtchn_from_irq(irq
);
338 if (VALID_EVTCHN(evtchn
))
339 notify_remote_via_evtchn(evtchn
);
341 EXPORT_SYMBOL_GPL(notify_remote_via_irq
);
343 static void mask_evtchn(int port
)
345 struct shared_info
*s
= HYPERVISOR_shared_info
;
346 sync_set_bit(port
, &s
->evtchn_mask
[0]);
349 static void unmask_evtchn(int port
)
351 struct shared_info
*s
= HYPERVISOR_shared_info
;
352 unsigned int cpu
= get_cpu();
354 BUG_ON(!irqs_disabled());
356 /* Slow path (hypercall) if this is a non-local port. */
357 if (unlikely(cpu
!= cpu_from_evtchn(port
))) {
358 struct evtchn_unmask unmask
= { .port
= port
};
359 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask
, &unmask
);
361 struct vcpu_info
*vcpu_info
= __this_cpu_read(xen_vcpu
);
363 sync_clear_bit(port
, &s
->evtchn_mask
[0]);
366 * The following is basically the equivalent of
367 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
368 * the interrupt edge' if the channel is masked.
370 if (sync_test_bit(port
, &s
->evtchn_pending
[0]) &&
371 !sync_test_and_set_bit(port
/ BITS_PER_LONG
,
372 &vcpu_info
->evtchn_pending_sel
))
373 vcpu_info
->evtchn_upcall_pending
= 1;
379 static int xen_allocate_irq_dynamic(void)
384 #ifdef CONFIG_X86_IO_APIC
386 * For an HVM guest or domain 0 which see "real" (emulated or
387 * actual repectively) GSIs we allocate dynamic IRQs
388 * e.g. those corresponding to event channels or MSIs
389 * etc. from the range above those "real" GSIs to avoid
392 if (xen_initial_domain() || xen_hvm_domain())
393 first
= get_nr_irqs_gsi();
397 irq
= irq_alloc_desc_from(first
, -1);
399 if (irq
== -ENOMEM
&& first
> NR_IRQS_LEGACY
) {
400 printk(KERN_ERR
"Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
401 first
= max(NR_IRQS_LEGACY
, first
- NR_IRQS_LEGACY
);
406 panic("No available IRQ to bind to: increase nr_irqs!\n");
411 static int xen_allocate_irq_gsi(unsigned gsi
)
416 * A PV guest has no concept of a GSI (since it has no ACPI
417 * nor access to/knowledge of the physical APICs). Therefore
418 * all IRQs are dynamically allocated from the entire IRQ
421 if (xen_pv_domain() && !xen_initial_domain())
422 return xen_allocate_irq_dynamic();
424 /* Legacy IRQ descriptors are already allocated by the arch. */
425 if (gsi
< NR_IRQS_LEGACY
)
428 irq
= irq_alloc_desc_at(gsi
, -1);
430 panic("Unable to allocate to IRQ%d (%d)\n", gsi
, irq
);
435 static void xen_free_irq(unsigned irq
)
437 /* Legacy IRQ descriptors are managed by the arch. */
438 if (irq
< NR_IRQS_LEGACY
)
444 static void pirq_unmask_notify(int irq
)
446 struct physdev_eoi eoi
= { .irq
= pirq_from_irq(irq
) };
448 if (unlikely(pirq_needs_eoi(irq
))) {
449 int rc
= HYPERVISOR_physdev_op(PHYSDEVOP_eoi
, &eoi
);
454 static void pirq_query_unmask(int irq
)
456 struct physdev_irq_status_query irq_status
;
457 struct irq_info
*info
= info_for_irq(irq
);
459 BUG_ON(info
->type
!= IRQT_PIRQ
);
461 irq_status
.irq
= pirq_from_irq(irq
);
462 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query
, &irq_status
))
463 irq_status
.flags
= 0;
465 info
->u
.pirq
.flags
&= ~PIRQ_NEEDS_EOI
;
466 if (irq_status
.flags
& XENIRQSTAT_needs_eoi
)
467 info
->u
.pirq
.flags
|= PIRQ_NEEDS_EOI
;
470 static bool probing_irq(int irq
)
472 struct irq_desc
*desc
= irq_to_desc(irq
);
474 return desc
&& desc
->action
== NULL
;
477 static unsigned int startup_pirq(unsigned int irq
)
479 struct evtchn_bind_pirq bind_pirq
;
480 struct irq_info
*info
= info_for_irq(irq
);
481 int evtchn
= evtchn_from_irq(irq
);
484 BUG_ON(info
->type
!= IRQT_PIRQ
);
486 if (VALID_EVTCHN(evtchn
))
489 bind_pirq
.pirq
= pirq_from_irq(irq
);
490 /* NB. We are happy to share unless we are probing. */
491 bind_pirq
.flags
= info
->u
.pirq
.flags
& PIRQ_SHAREABLE
?
492 BIND_PIRQ__WILL_SHARE
: 0;
493 rc
= HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq
, &bind_pirq
);
495 if (!probing_irq(irq
))
496 printk(KERN_INFO
"Failed to obtain physical IRQ %d\n",
500 evtchn
= bind_pirq
.port
;
502 pirq_query_unmask(irq
);
504 evtchn_to_irq
[evtchn
] = irq
;
505 bind_evtchn_to_cpu(evtchn
, 0);
506 info
->evtchn
= evtchn
;
509 unmask_evtchn(evtchn
);
510 pirq_unmask_notify(irq
);
515 static void shutdown_pirq(unsigned int irq
)
517 struct evtchn_close close
;
518 struct irq_info
*info
= info_for_irq(irq
);
519 int evtchn
= evtchn_from_irq(irq
);
521 BUG_ON(info
->type
!= IRQT_PIRQ
);
523 if (!VALID_EVTCHN(evtchn
))
529 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
532 bind_evtchn_to_cpu(evtchn
, 0);
533 evtchn_to_irq
[evtchn
] = -1;
537 static void enable_pirq(unsigned int irq
)
542 static void disable_pirq(unsigned int irq
)
546 static void ack_pirq(unsigned int irq
)
548 int evtchn
= evtchn_from_irq(irq
);
550 move_native_irq(irq
);
552 if (VALID_EVTCHN(evtchn
)) {
554 clear_evtchn(evtchn
);
558 static void end_pirq(unsigned int irq
)
560 int evtchn
= evtchn_from_irq(irq
);
561 struct irq_desc
*desc
= irq_to_desc(irq
);
566 if ((desc
->status
& (IRQ_DISABLED
|IRQ_PENDING
)) ==
567 (IRQ_DISABLED
|IRQ_PENDING
)) {
569 } else if (VALID_EVTCHN(evtchn
)) {
570 unmask_evtchn(evtchn
);
571 pirq_unmask_notify(irq
);
575 static int find_irq_by_gsi(unsigned gsi
)
579 for (irq
= 0; irq
< nr_irqs
; irq
++) {
580 struct irq_info
*info
= info_for_irq(irq
);
582 if (info
== NULL
|| info
->type
!= IRQT_PIRQ
)
585 if (gsi_from_irq(irq
) == gsi
)
592 int xen_allocate_pirq(unsigned gsi
, int shareable
, char *name
)
594 return xen_map_pirq_gsi(gsi
, gsi
, shareable
, name
);
597 /* xen_map_pirq_gsi might allocate irqs from the top down, as a
598 * consequence don't assume that the irq number returned has a low value
599 * or can be used as a pirq number unless you know otherwise.
601 * One notable exception is when xen_map_pirq_gsi is called passing an
602 * hardware gsi as argument, in that case the irq number returned
603 * matches the gsi number passed as second argument.
605 * Note: We don't assign an event channel until the irq actually started
606 * up. Return an existing irq if we've already got one for the gsi.
608 int xen_map_pirq_gsi(unsigned pirq
, unsigned gsi
, int shareable
, char *name
)
611 struct physdev_irq irq_op
;
613 spin_lock(&irq_mapping_update_lock
);
615 if ((pirq
> nr_irqs
) || (gsi
> nr_irqs
)) {
616 printk(KERN_WARNING
"xen_map_pirq_gsi: %s %s is incorrect!\n",
617 pirq
> nr_irqs
? "pirq" :"",
618 gsi
> nr_irqs
? "gsi" : "");
622 irq
= find_irq_by_gsi(gsi
);
624 printk(KERN_INFO
"xen_map_pirq_gsi: returning irq %d for gsi %u\n",
626 goto out
; /* XXX need refcount? */
629 irq
= xen_allocate_irq_gsi(gsi
);
631 set_irq_chip_and_handler_name(irq
, &xen_pirq_chip
,
632 handle_level_irq
, name
);
637 /* Only the privileged domain can do this. For non-priv, the pcifront
638 * driver provides a PCI bus that does the call to do exactly
639 * this in the priv domain. */
640 if (xen_initial_domain() &&
641 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector
, &irq_op
)) {
647 irq_info
[irq
] = mk_pirq_info(0, pirq
, gsi
, irq_op
.vector
);
648 irq_info
[irq
].u
.pirq
.flags
|= shareable
? PIRQ_SHAREABLE
: 0;
649 pirq_to_irq
[pirq
] = irq
;
652 spin_unlock(&irq_mapping_update_lock
);
657 #ifdef CONFIG_PCI_MSI
658 #include <linux/msi.h>
659 #include "../pci/msi.h"
661 static int find_unbound_pirq(int type
)
664 struct physdev_get_free_pirq op_get_free_pirq
;
665 op_get_free_pirq
.type
= type
;
667 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq
, &op_get_free_pirq
);
669 return op_get_free_pirq
.pirq
;
671 for (i
= 0; i
< nr_irqs
; i
++) {
672 if (pirq_to_irq
[i
] < 0)
678 void xen_allocate_pirq_msi(char *name
, int *irq
, int *pirq
, int alloc
)
680 spin_lock(&irq_mapping_update_lock
);
682 if (alloc
& XEN_ALLOC_IRQ
) {
683 *irq
= xen_allocate_irq_dynamic();
688 if (alloc
& XEN_ALLOC_PIRQ
) {
689 *pirq
= find_unbound_pirq(MAP_PIRQ_TYPE_MSI
);
694 set_irq_chip_and_handler_name(*irq
, &xen_pirq_chip
,
695 handle_level_irq
, name
);
697 irq_info
[*irq
] = mk_pirq_info(0, *pirq
, 0, 0);
698 pirq_to_irq
[*pirq
] = *irq
;
701 spin_unlock(&irq_mapping_update_lock
);
704 int xen_create_msi_irq(struct pci_dev
*dev
, struct msi_desc
*msidesc
, int type
)
707 struct physdev_map_pirq map_irq
;
710 u32 table_offset
, bir
;
712 memset(&map_irq
, 0, sizeof(map_irq
));
713 map_irq
.domid
= DOMID_SELF
;
714 map_irq
.type
= MAP_PIRQ_TYPE_MSI
;
717 map_irq
.bus
= dev
->bus
->number
;
718 map_irq
.devfn
= dev
->devfn
;
720 if (type
== PCI_CAP_ID_MSIX
) {
721 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
723 pci_read_config_dword(dev
, msix_table_offset_reg(pos
),
725 bir
= (u8
)(table_offset
& PCI_MSIX_FLAGS_BIRMASK
);
727 map_irq
.table_base
= pci_resource_start(dev
, bir
);
728 map_irq
.entry_nr
= msidesc
->msi_attrib
.entry_nr
;
731 spin_lock(&irq_mapping_update_lock
);
733 irq
= xen_allocate_irq_dynamic();
738 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
, &map_irq
);
740 printk(KERN_WARNING
"xen map irq failed %d\n", rc
);
747 irq_info
[irq
] = mk_pirq_info(0, map_irq
.pirq
, 0, map_irq
.index
);
749 set_irq_chip_and_handler_name(irq
, &xen_pirq_chip
,
751 (type
== PCI_CAP_ID_MSIX
) ? "msi-x":"msi");
754 spin_unlock(&irq_mapping_update_lock
);
759 int xen_destroy_irq(int irq
)
761 struct irq_desc
*desc
;
762 struct physdev_unmap_pirq unmap_irq
;
763 struct irq_info
*info
= info_for_irq(irq
);
766 spin_lock(&irq_mapping_update_lock
);
768 desc
= irq_to_desc(irq
);
772 if (xen_initial_domain()) {
773 unmap_irq
.pirq
= info
->u
.pirq
.pirq
;
774 unmap_irq
.domid
= DOMID_SELF
;
775 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq
, &unmap_irq
);
777 printk(KERN_WARNING
"unmap irq failed %d\n", rc
);
780 pirq_to_irq
[info
->u
.pirq
.pirq
] = -1;
782 irq_info
[irq
] = mk_unbound_info();
787 spin_unlock(&irq_mapping_update_lock
);
791 int xen_vector_from_irq(unsigned irq
)
793 return vector_from_irq(irq
);
796 int xen_gsi_from_irq(unsigned irq
)
798 return gsi_from_irq(irq
);
801 int xen_irq_from_pirq(unsigned pirq
)
803 return pirq_to_irq
[pirq
];
806 int bind_evtchn_to_irq(unsigned int evtchn
)
810 spin_lock(&irq_mapping_update_lock
);
812 irq
= evtchn_to_irq
[evtchn
];
815 irq
= xen_allocate_irq_dynamic();
817 set_irq_chip_and_handler_name(irq
, &xen_dynamic_chip
,
818 handle_fasteoi_irq
, "event");
820 evtchn_to_irq
[evtchn
] = irq
;
821 irq_info
[irq
] = mk_evtchn_info(evtchn
);
824 spin_unlock(&irq_mapping_update_lock
);
828 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq
);
830 static int bind_ipi_to_irq(unsigned int ipi
, unsigned int cpu
)
832 struct evtchn_bind_ipi bind_ipi
;
835 spin_lock(&irq_mapping_update_lock
);
837 irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
];
840 irq
= xen_allocate_irq_dynamic();
844 set_irq_chip_and_handler_name(irq
, &xen_percpu_chip
,
845 handle_percpu_irq
, "ipi");
848 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
851 evtchn
= bind_ipi
.port
;
853 evtchn_to_irq
[evtchn
] = irq
;
854 irq_info
[irq
] = mk_ipi_info(evtchn
, ipi
);
855 per_cpu(ipi_to_irq
, cpu
)[ipi
] = irq
;
857 bind_evtchn_to_cpu(evtchn
, cpu
);
861 spin_unlock(&irq_mapping_update_lock
);
866 int bind_virq_to_irq(unsigned int virq
, unsigned int cpu
)
868 struct evtchn_bind_virq bind_virq
;
871 spin_lock(&irq_mapping_update_lock
);
873 irq
= per_cpu(virq_to_irq
, cpu
)[virq
];
876 irq
= xen_allocate_irq_dynamic();
878 set_irq_chip_and_handler_name(irq
, &xen_percpu_chip
,
879 handle_percpu_irq
, "virq");
881 bind_virq
.virq
= virq
;
882 bind_virq
.vcpu
= cpu
;
883 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
886 evtchn
= bind_virq
.port
;
888 evtchn_to_irq
[evtchn
] = irq
;
889 irq_info
[irq
] = mk_virq_info(evtchn
, virq
);
891 per_cpu(virq_to_irq
, cpu
)[virq
] = irq
;
893 bind_evtchn_to_cpu(evtchn
, cpu
);
896 spin_unlock(&irq_mapping_update_lock
);
901 static void unbind_from_irq(unsigned int irq
)
903 struct evtchn_close close
;
904 int evtchn
= evtchn_from_irq(irq
);
906 spin_lock(&irq_mapping_update_lock
);
908 if (VALID_EVTCHN(evtchn
)) {
910 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
913 switch (type_from_irq(irq
)) {
915 per_cpu(virq_to_irq
, cpu_from_evtchn(evtchn
))
916 [virq_from_irq(irq
)] = -1;
919 per_cpu(ipi_to_irq
, cpu_from_evtchn(evtchn
))
920 [ipi_from_irq(irq
)] = -1;
926 /* Closed ports are implicitly re-bound to VCPU0. */
927 bind_evtchn_to_cpu(evtchn
, 0);
929 evtchn_to_irq
[evtchn
] = -1;
932 if (irq_info
[irq
].type
!= IRQT_UNBOUND
) {
933 irq_info
[irq
] = mk_unbound_info();
938 spin_unlock(&irq_mapping_update_lock
);
941 int bind_evtchn_to_irqhandler(unsigned int evtchn
,
942 irq_handler_t handler
,
943 unsigned long irqflags
,
944 const char *devname
, void *dev_id
)
949 irq
= bind_evtchn_to_irq(evtchn
);
950 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
952 unbind_from_irq(irq
);
958 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler
);
960 int bind_virq_to_irqhandler(unsigned int virq
, unsigned int cpu
,
961 irq_handler_t handler
,
962 unsigned long irqflags
, const char *devname
, void *dev_id
)
967 irq
= bind_virq_to_irq(virq
, cpu
);
968 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
970 unbind_from_irq(irq
);
976 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler
);
978 int bind_ipi_to_irqhandler(enum ipi_vector ipi
,
980 irq_handler_t handler
,
981 unsigned long irqflags
,
987 irq
= bind_ipi_to_irq(ipi
, cpu
);
991 irqflags
|= IRQF_NO_SUSPEND
;
992 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
994 unbind_from_irq(irq
);
1001 void unbind_from_irqhandler(unsigned int irq
, void *dev_id
)
1003 free_irq(irq
, dev_id
);
1004 unbind_from_irq(irq
);
1006 EXPORT_SYMBOL_GPL(unbind_from_irqhandler
);
1008 void xen_send_IPI_one(unsigned int cpu
, enum ipi_vector vector
)
1010 int irq
= per_cpu(ipi_to_irq
, cpu
)[vector
];
1012 notify_remote_via_irq(irq
);
1015 irqreturn_t
xen_debug_interrupt(int irq
, void *dev_id
)
1017 struct shared_info
*sh
= HYPERVISOR_shared_info
;
1018 int cpu
= smp_processor_id();
1019 unsigned long *cpu_evtchn
= cpu_evtchn_mask(cpu
);
1021 unsigned long flags
;
1022 static DEFINE_SPINLOCK(debug_lock
);
1023 struct vcpu_info
*v
;
1025 spin_lock_irqsave(&debug_lock
, flags
);
1027 printk("\nvcpu %d\n ", cpu
);
1029 for_each_online_cpu(i
) {
1031 v
= per_cpu(xen_vcpu
, i
);
1032 pending
= (get_irq_regs() && i
== cpu
)
1033 ? xen_irqs_disabled(get_irq_regs())
1034 : v
->evtchn_upcall_mask
;
1035 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i
,
1036 pending
, v
->evtchn_upcall_pending
,
1037 (int)(sizeof(v
->evtchn_pending_sel
)*2),
1038 v
->evtchn_pending_sel
);
1040 v
= per_cpu(xen_vcpu
, cpu
);
1042 printk("\npending:\n ");
1043 for (i
= ARRAY_SIZE(sh
->evtchn_pending
)-1; i
>= 0; i
--)
1044 printk("%0*lx%s", (int)sizeof(sh
->evtchn_pending
[0])*2,
1045 sh
->evtchn_pending
[i
],
1046 i
% 8 == 0 ? "\n " : " ");
1047 printk("\nglobal mask:\n ");
1048 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
1050 (int)(sizeof(sh
->evtchn_mask
[0])*2),
1052 i
% 8 == 0 ? "\n " : " ");
1054 printk("\nglobally unmasked:\n ");
1055 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
1056 printk("%0*lx%s", (int)(sizeof(sh
->evtchn_mask
[0])*2),
1057 sh
->evtchn_pending
[i
] & ~sh
->evtchn_mask
[i
],
1058 i
% 8 == 0 ? "\n " : " ");
1060 printk("\nlocal cpu%d mask:\n ", cpu
);
1061 for (i
= (NR_EVENT_CHANNELS
/BITS_PER_LONG
)-1; i
>= 0; i
--)
1062 printk("%0*lx%s", (int)(sizeof(cpu_evtchn
[0])*2),
1064 i
% 8 == 0 ? "\n " : " ");
1066 printk("\nlocally unmasked:\n ");
1067 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--) {
1068 unsigned long pending
= sh
->evtchn_pending
[i
]
1069 & ~sh
->evtchn_mask
[i
]
1071 printk("%0*lx%s", (int)(sizeof(sh
->evtchn_mask
[0])*2),
1072 pending
, i
% 8 == 0 ? "\n " : " ");
1075 printk("\npending list:\n");
1076 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++) {
1077 if (sync_test_bit(i
, sh
->evtchn_pending
)) {
1078 int word_idx
= i
/ BITS_PER_LONG
;
1079 printk(" %d: event %d -> irq %d%s%s%s\n",
1080 cpu_from_evtchn(i
), i
,
1082 sync_test_bit(word_idx
, &v
->evtchn_pending_sel
)
1084 !sync_test_bit(i
, sh
->evtchn_mask
)
1085 ? "" : " globally-masked",
1086 sync_test_bit(i
, cpu_evtchn
)
1087 ? "" : " locally-masked");
1091 spin_unlock_irqrestore(&debug_lock
, flags
);
1096 static DEFINE_PER_CPU(unsigned, xed_nesting_count
);
1099 * Search the CPUs pending events bitmasks. For each one found, map
1100 * the event number to an irq, and feed it into do_IRQ() for
1103 * Xen uses a two-level bitmap to speed searching. The first level is
1104 * a bitset of words which contain pending event bits. The second
1105 * level is a bitset of pending events themselves.
1107 static void __xen_evtchn_do_upcall(void)
1109 int cpu
= get_cpu();
1110 struct shared_info
*s
= HYPERVISOR_shared_info
;
1111 struct vcpu_info
*vcpu_info
= __this_cpu_read(xen_vcpu
);
1115 unsigned long pending_words
;
1117 vcpu_info
->evtchn_upcall_pending
= 0;
1119 if (__this_cpu_inc_return(xed_nesting_count
) - 1)
1122 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1123 /* Clear master flag /before/ clearing selector flag. */
1126 pending_words
= xchg(&vcpu_info
->evtchn_pending_sel
, 0);
1127 while (pending_words
!= 0) {
1128 unsigned long pending_bits
;
1129 int word_idx
= __ffs(pending_words
);
1130 pending_words
&= ~(1UL << word_idx
);
1132 while ((pending_bits
= active_evtchns(cpu
, s
, word_idx
)) != 0) {
1133 int bit_idx
= __ffs(pending_bits
);
1134 int port
= (word_idx
* BITS_PER_LONG
) + bit_idx
;
1135 int irq
= evtchn_to_irq
[port
];
1136 struct irq_desc
*desc
;
1142 desc
= irq_to_desc(irq
);
1144 generic_handle_irq_desc(irq
, desc
);
1149 BUG_ON(!irqs_disabled());
1151 count
= __this_cpu_read(xed_nesting_count
);
1152 __this_cpu_write(xed_nesting_count
, 0);
1153 } while (count
!= 1 || vcpu_info
->evtchn_upcall_pending
);
1160 void xen_evtchn_do_upcall(struct pt_regs
*regs
)
1162 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1167 __xen_evtchn_do_upcall();
1170 set_irq_regs(old_regs
);
1173 void xen_hvm_evtchn_do_upcall(void)
1175 __xen_evtchn_do_upcall();
1177 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall
);
1179 /* Rebind a new event channel to an existing irq. */
1180 void rebind_evtchn_irq(int evtchn
, int irq
)
1182 struct irq_info
*info
= info_for_irq(irq
);
1184 /* Make sure the irq is masked, since the new event channel
1185 will also be masked. */
1188 spin_lock(&irq_mapping_update_lock
);
1190 /* After resume the irq<->evtchn mappings are all cleared out */
1191 BUG_ON(evtchn_to_irq
[evtchn
] != -1);
1192 /* Expect irq to have been bound before,
1193 so there should be a proper type */
1194 BUG_ON(info
->type
== IRQT_UNBOUND
);
1196 evtchn_to_irq
[evtchn
] = irq
;
1197 irq_info
[irq
] = mk_evtchn_info(evtchn
);
1199 spin_unlock(&irq_mapping_update_lock
);
1201 /* new event channels are always bound to cpu 0 */
1202 irq_set_affinity(irq
, cpumask_of(0));
1204 /* Unmask the event channel. */
1208 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1209 static int rebind_irq_to_cpu(unsigned irq
, unsigned tcpu
)
1211 struct evtchn_bind_vcpu bind_vcpu
;
1212 int evtchn
= evtchn_from_irq(irq
);
1214 /* events delivered via platform PCI interrupts are always
1215 * routed to vcpu 0 */
1216 if (!VALID_EVTCHN(evtchn
) ||
1217 (xen_hvm_domain() && !xen_have_vector_callback
))
1220 /* Send future instances of this interrupt to other vcpu. */
1221 bind_vcpu
.port
= evtchn
;
1222 bind_vcpu
.vcpu
= tcpu
;
1225 * If this fails, it usually just indicates that we're dealing with a
1226 * virq or IPI channel, which don't actually need to be rebound. Ignore
1227 * it, but don't do the xenlinux-level rebind in that case.
1229 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu
, &bind_vcpu
) >= 0)
1230 bind_evtchn_to_cpu(evtchn
, tcpu
);
1235 static int set_affinity_irq(unsigned irq
, const struct cpumask
*dest
)
1237 unsigned tcpu
= cpumask_first(dest
);
1239 return rebind_irq_to_cpu(irq
, tcpu
);
1242 int resend_irq_on_evtchn(unsigned int irq
)
1244 int masked
, evtchn
= evtchn_from_irq(irq
);
1245 struct shared_info
*s
= HYPERVISOR_shared_info
;
1247 if (!VALID_EVTCHN(evtchn
))
1250 masked
= sync_test_and_set_bit(evtchn
, s
->evtchn_mask
);
1251 sync_set_bit(evtchn
, s
->evtchn_pending
);
1253 unmask_evtchn(evtchn
);
1258 static void enable_dynirq(unsigned int irq
)
1260 int evtchn
= evtchn_from_irq(irq
);
1262 if (VALID_EVTCHN(evtchn
))
1263 unmask_evtchn(evtchn
);
1266 static void disable_dynirq(unsigned int irq
)
1268 int evtchn
= evtchn_from_irq(irq
);
1270 if (VALID_EVTCHN(evtchn
))
1271 mask_evtchn(evtchn
);
1274 static void ack_dynirq(unsigned int irq
)
1276 int evtchn
= evtchn_from_irq(irq
);
1278 move_masked_irq(irq
);
1280 if (VALID_EVTCHN(evtchn
))
1281 unmask_evtchn(evtchn
);
1284 static int retrigger_dynirq(unsigned int irq
)
1286 int evtchn
= evtchn_from_irq(irq
);
1287 struct shared_info
*sh
= HYPERVISOR_shared_info
;
1290 if (VALID_EVTCHN(evtchn
)) {
1293 masked
= sync_test_and_set_bit(evtchn
, sh
->evtchn_mask
);
1294 sync_set_bit(evtchn
, sh
->evtchn_pending
);
1296 unmask_evtchn(evtchn
);
1303 static void restore_cpu_pirqs(void)
1305 int pirq
, rc
, irq
, gsi
;
1306 struct physdev_map_pirq map_irq
;
1308 for (pirq
= 0; pirq
< nr_irqs
; pirq
++) {
1309 irq
= pirq_to_irq
[pirq
];
1313 /* save/restore of PT devices doesn't work, so at this point the
1314 * only devices present are GSI based emulated devices */
1315 gsi
= gsi_from_irq(irq
);
1319 map_irq
.domid
= DOMID_SELF
;
1320 map_irq
.type
= MAP_PIRQ_TYPE_GSI
;
1321 map_irq
.index
= gsi
;
1322 map_irq
.pirq
= pirq
;
1324 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
, &map_irq
);
1326 printk(KERN_WARNING
"xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1327 gsi
, irq
, pirq
, rc
);
1328 irq_info
[irq
] = mk_unbound_info();
1329 pirq_to_irq
[pirq
] = -1;
1333 printk(KERN_DEBUG
"xen: --> irq=%d, pirq=%d\n", irq
, map_irq
.pirq
);
1339 static void restore_cpu_virqs(unsigned int cpu
)
1341 struct evtchn_bind_virq bind_virq
;
1342 int virq
, irq
, evtchn
;
1344 for (virq
= 0; virq
< NR_VIRQS
; virq
++) {
1345 if ((irq
= per_cpu(virq_to_irq
, cpu
)[virq
]) == -1)
1348 BUG_ON(virq_from_irq(irq
) != virq
);
1350 /* Get a new binding from Xen. */
1351 bind_virq
.virq
= virq
;
1352 bind_virq
.vcpu
= cpu
;
1353 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
1356 evtchn
= bind_virq
.port
;
1358 /* Record the new mapping. */
1359 evtchn_to_irq
[evtchn
] = irq
;
1360 irq_info
[irq
] = mk_virq_info(evtchn
, virq
);
1361 bind_evtchn_to_cpu(evtchn
, cpu
);
1365 static void restore_cpu_ipis(unsigned int cpu
)
1367 struct evtchn_bind_ipi bind_ipi
;
1368 int ipi
, irq
, evtchn
;
1370 for (ipi
= 0; ipi
< XEN_NR_IPIS
; ipi
++) {
1371 if ((irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
]) == -1)
1374 BUG_ON(ipi_from_irq(irq
) != ipi
);
1376 /* Get a new binding from Xen. */
1377 bind_ipi
.vcpu
= cpu
;
1378 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
1381 evtchn
= bind_ipi
.port
;
1383 /* Record the new mapping. */
1384 evtchn_to_irq
[evtchn
] = irq
;
1385 irq_info
[irq
] = mk_ipi_info(evtchn
, ipi
);
1386 bind_evtchn_to_cpu(evtchn
, cpu
);
1390 /* Clear an irq's pending state, in preparation for polling on it */
1391 void xen_clear_irq_pending(int irq
)
1393 int evtchn
= evtchn_from_irq(irq
);
1395 if (VALID_EVTCHN(evtchn
))
1396 clear_evtchn(evtchn
);
1398 EXPORT_SYMBOL(xen_clear_irq_pending
);
1399 void xen_set_irq_pending(int irq
)
1401 int evtchn
= evtchn_from_irq(irq
);
1403 if (VALID_EVTCHN(evtchn
))
1407 bool xen_test_irq_pending(int irq
)
1409 int evtchn
= evtchn_from_irq(irq
);
1412 if (VALID_EVTCHN(evtchn
))
1413 ret
= test_evtchn(evtchn
);
1418 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1419 * the irq will be disabled so it won't deliver an interrupt. */
1420 void xen_poll_irq_timeout(int irq
, u64 timeout
)
1422 evtchn_port_t evtchn
= evtchn_from_irq(irq
);
1424 if (VALID_EVTCHN(evtchn
)) {
1425 struct sched_poll poll
;
1428 poll
.timeout
= timeout
;
1429 set_xen_guest_handle(poll
.ports
, &evtchn
);
1431 if (HYPERVISOR_sched_op(SCHEDOP_poll
, &poll
) != 0)
1435 EXPORT_SYMBOL(xen_poll_irq_timeout
);
1436 /* Poll waiting for an irq to become pending. In the usual case, the
1437 * irq will be disabled so it won't deliver an interrupt. */
1438 void xen_poll_irq(int irq
)
1440 xen_poll_irq_timeout(irq
, 0 /* no timeout */);
1443 void xen_irq_resume(void)
1445 unsigned int cpu
, irq
, evtchn
;
1446 struct irq_desc
*desc
;
1448 init_evtchn_cpu_bindings();
1450 /* New event-channel space is not 'live' yet. */
1451 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
1452 mask_evtchn(evtchn
);
1454 /* No IRQ <-> event-channel mappings. */
1455 for (irq
= 0; irq
< nr_irqs
; irq
++)
1456 irq_info
[irq
].evtchn
= 0; /* zap event-channel binding */
1458 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
1459 evtchn_to_irq
[evtchn
] = -1;
1461 for_each_possible_cpu(cpu
) {
1462 restore_cpu_virqs(cpu
);
1463 restore_cpu_ipis(cpu
);
1467 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1468 * are not handled by the IRQ core.
1470 for_each_irq_desc(irq
, desc
) {
1471 if (!desc
->action
|| !(desc
->action
->flags
& IRQF_NO_SUSPEND
))
1473 if (desc
->status
& IRQ_DISABLED
)
1476 evtchn
= evtchn_from_irq(irq
);
1480 unmask_evtchn(evtchn
);
1483 restore_cpu_pirqs();
1486 static struct irq_chip xen_dynamic_chip __read_mostly
= {
1489 .disable
= disable_dynirq
,
1490 .mask
= disable_dynirq
,
1491 .unmask
= enable_dynirq
,
1494 .set_affinity
= set_affinity_irq
,
1495 .retrigger
= retrigger_dynirq
,
1498 static struct irq_chip xen_pirq_chip __read_mostly
= {
1501 .startup
= startup_pirq
,
1502 .shutdown
= shutdown_pirq
,
1504 .enable
= enable_pirq
,
1505 .unmask
= enable_pirq
,
1507 .disable
= disable_pirq
,
1508 .mask
= disable_pirq
,
1513 .set_affinity
= set_affinity_irq
,
1515 .retrigger
= retrigger_dynirq
,
1518 static struct irq_chip xen_percpu_chip __read_mostly
= {
1519 .name
= "xen-percpu",
1521 .disable
= disable_dynirq
,
1522 .mask
= disable_dynirq
,
1523 .unmask
= enable_dynirq
,
1528 int xen_set_callback_via(uint64_t via
)
1530 struct xen_hvm_param a
;
1531 a
.domid
= DOMID_SELF
;
1532 a
.index
= HVM_PARAM_CALLBACK_IRQ
;
1534 return HYPERVISOR_hvm_op(HVMOP_set_param
, &a
);
1536 EXPORT_SYMBOL_GPL(xen_set_callback_via
);
1538 #ifdef CONFIG_XEN_PVHVM
1539 /* Vector callbacks are better than PCI interrupts to receive event
1540 * channel notifications because we can receive vector callbacks on any
1541 * vcpu and we don't need PCI support or APIC interactions. */
1542 void xen_callback_vector(void)
1545 uint64_t callback_via
;
1546 if (xen_have_vector_callback
) {
1547 callback_via
= HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK
);
1548 rc
= xen_set_callback_via(callback_via
);
1550 printk(KERN_ERR
"Request for Xen HVM callback vector"
1552 xen_have_vector_callback
= 0;
1555 printk(KERN_INFO
"Xen HVM callback vector for event delivery is "
1557 /* in the restore case the vector has already been allocated */
1558 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK
, used_vectors
))
1559 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK
, xen_hvm_callback_vector
);
1563 void xen_callback_vector(void) {}
1566 void __init
xen_init_IRQ(void)
1570 cpu_evtchn_mask_p
= kcalloc(nr_cpu_ids
, sizeof(struct cpu_evtchn_s
),
1572 irq_info
= kcalloc(nr_irqs
, sizeof(*irq_info
), GFP_KERNEL
);
1574 /* We are using nr_irqs as the maximum number of pirq available but
1575 * that number is actually chosen by Xen and we don't know exactly
1576 * what it is. Be careful choosing high pirq numbers. */
1577 pirq_to_irq
= kcalloc(nr_irqs
, sizeof(*pirq_to_irq
), GFP_KERNEL
);
1578 for (i
= 0; i
< nr_irqs
; i
++)
1579 pirq_to_irq
[i
] = -1;
1581 evtchn_to_irq
= kcalloc(NR_EVENT_CHANNELS
, sizeof(*evtchn_to_irq
),
1583 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
1584 evtchn_to_irq
[i
] = -1;
1586 init_evtchn_cpu_bindings();
1588 /* No event channels are 'live' right now. */
1589 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
1592 if (xen_hvm_domain()) {
1593 xen_callback_vector();
1595 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1596 * __acpi_register_gsi can point at the right function */
1599 irq_ctx_init(smp_processor_id());
1600 if (xen_initial_domain())