4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
34 #include <asm/ptrace.h>
37 #include <asm/io_apic.h>
38 #include <asm/sync_bitops.h>
39 #include <asm/xen/hypercall.h>
40 #include <asm/xen/hypervisor.h>
44 #include <xen/xen-ops.h>
45 #include <xen/events.h>
46 #include <xen/interface/xen.h>
47 #include <xen/interface/event_channel.h>
48 #include <xen/interface/hvm/hvm_op.h>
49 #include <xen/interface/hvm/params.h>
52 * This lock protects updates to the following mapping and reference-count
53 * arrays. The lock does not need to be acquired to read the mapping tables.
55 static DEFINE_SPINLOCK(irq_mapping_update_lock
);
57 /* IRQ <-> VIRQ mapping. */
58 static DEFINE_PER_CPU(int [NR_VIRQS
], virq_to_irq
) = {[0 ... NR_VIRQS
-1] = -1};
60 /* IRQ <-> IPI mapping */
61 static DEFINE_PER_CPU(int [XEN_NR_IPIS
], ipi_to_irq
) = {[0 ... XEN_NR_IPIS
-1] = -1};
63 /* Interrupt types. */
73 * Packed IRQ information:
74 * type - enum xen_irq_type
75 * event channel - irq->event channel mapping
76 * cpu - cpu this event channel is bound to
77 * index - type-specific information:
78 * PIRQ - vector, with MSB being "needs EIO"
85 enum xen_irq_type type
; /* type */
86 unsigned short evtchn
; /* event channel */
87 unsigned short cpu
; /* cpu bound */
100 #define PIRQ_NEEDS_EOI (1 << 0)
101 #define PIRQ_SHAREABLE (1 << 1)
103 static struct irq_info
*irq_info
;
104 static int *pirq_to_irq
;
107 static int *evtchn_to_irq
;
108 struct cpu_evtchn_s
{
109 unsigned long bits
[NR_EVENT_CHANNELS
/BITS_PER_LONG
];
112 static __initdata
struct cpu_evtchn_s init_evtchn_mask
= {
113 .bits
[0 ... (NR_EVENT_CHANNELS
/BITS_PER_LONG
)-1] = ~0ul,
115 static struct cpu_evtchn_s
*cpu_evtchn_mask_p
= &init_evtchn_mask
;
117 static inline unsigned long *cpu_evtchn_mask(int cpu
)
119 return cpu_evtchn_mask_p
[cpu
].bits
;
122 /* Xen will never allocate port zero for any purpose. */
123 #define VALID_EVTCHN(chn) ((chn) != 0)
125 static struct irq_chip xen_dynamic_chip
;
126 static struct irq_chip xen_percpu_chip
;
127 static struct irq_chip xen_pirq_chip
;
129 /* Constructor for packed IRQ information. */
130 static struct irq_info
mk_unbound_info(void)
132 return (struct irq_info
) { .type
= IRQT_UNBOUND
};
135 static struct irq_info
mk_evtchn_info(unsigned short evtchn
)
137 return (struct irq_info
) { .type
= IRQT_EVTCHN
, .evtchn
= evtchn
,
141 static struct irq_info
mk_ipi_info(unsigned short evtchn
, enum ipi_vector ipi
)
143 return (struct irq_info
) { .type
= IRQT_IPI
, .evtchn
= evtchn
,
144 .cpu
= 0, .u
.ipi
= ipi
};
147 static struct irq_info
mk_virq_info(unsigned short evtchn
, unsigned short virq
)
149 return (struct irq_info
) { .type
= IRQT_VIRQ
, .evtchn
= evtchn
,
150 .cpu
= 0, .u
.virq
= virq
};
153 static struct irq_info
mk_pirq_info(unsigned short evtchn
, unsigned short pirq
,
154 unsigned short gsi
, unsigned short vector
)
156 return (struct irq_info
) { .type
= IRQT_PIRQ
, .evtchn
= evtchn
,
158 .u
.pirq
= { .pirq
= pirq
, .gsi
= gsi
, .vector
= vector
} };
162 * Accessors for packed IRQ information.
164 static struct irq_info
*info_for_irq(unsigned irq
)
166 return &irq_info
[irq
];
169 static unsigned int evtchn_from_irq(unsigned irq
)
171 return info_for_irq(irq
)->evtchn
;
174 unsigned irq_from_evtchn(unsigned int evtchn
)
176 return evtchn_to_irq
[evtchn
];
178 EXPORT_SYMBOL_GPL(irq_from_evtchn
);
180 static enum ipi_vector
ipi_from_irq(unsigned irq
)
182 struct irq_info
*info
= info_for_irq(irq
);
184 BUG_ON(info
== NULL
);
185 BUG_ON(info
->type
!= IRQT_IPI
);
190 static unsigned virq_from_irq(unsigned irq
)
192 struct irq_info
*info
= info_for_irq(irq
);
194 BUG_ON(info
== NULL
);
195 BUG_ON(info
->type
!= IRQT_VIRQ
);
200 static unsigned pirq_from_irq(unsigned irq
)
202 struct irq_info
*info
= info_for_irq(irq
);
204 BUG_ON(info
== NULL
);
205 BUG_ON(info
->type
!= IRQT_PIRQ
);
207 return info
->u
.pirq
.pirq
;
210 static unsigned gsi_from_irq(unsigned irq
)
212 struct irq_info
*info
= info_for_irq(irq
);
214 BUG_ON(info
== NULL
);
215 BUG_ON(info
->type
!= IRQT_PIRQ
);
217 return info
->u
.pirq
.gsi
;
220 static unsigned vector_from_irq(unsigned irq
)
222 struct irq_info
*info
= info_for_irq(irq
);
224 BUG_ON(info
== NULL
);
225 BUG_ON(info
->type
!= IRQT_PIRQ
);
227 return info
->u
.pirq
.vector
;
230 static enum xen_irq_type
type_from_irq(unsigned irq
)
232 return info_for_irq(irq
)->type
;
235 static unsigned cpu_from_irq(unsigned irq
)
237 return info_for_irq(irq
)->cpu
;
240 static unsigned int cpu_from_evtchn(unsigned int evtchn
)
242 int irq
= evtchn_to_irq
[evtchn
];
246 ret
= cpu_from_irq(irq
);
251 static bool pirq_needs_eoi(unsigned irq
)
253 struct irq_info
*info
= info_for_irq(irq
);
255 BUG_ON(info
->type
!= IRQT_PIRQ
);
257 return info
->u
.pirq
.flags
& PIRQ_NEEDS_EOI
;
260 static inline unsigned long active_evtchns(unsigned int cpu
,
261 struct shared_info
*sh
,
264 return (sh
->evtchn_pending
[idx
] &
265 cpu_evtchn_mask(cpu
)[idx
] &
266 ~sh
->evtchn_mask
[idx
]);
269 static void bind_evtchn_to_cpu(unsigned int chn
, unsigned int cpu
)
271 int irq
= evtchn_to_irq
[chn
];
275 cpumask_copy(irq_to_desc(irq
)->affinity
, cpumask_of(cpu
));
278 __clear_bit(chn
, cpu_evtchn_mask(cpu_from_irq(irq
)));
279 __set_bit(chn
, cpu_evtchn_mask(cpu
));
281 irq_info
[irq
].cpu
= cpu
;
284 static void init_evtchn_cpu_bindings(void)
287 struct irq_desc
*desc
;
290 /* By default all event channels notify CPU#0. */
291 for_each_irq_desc(i
, desc
) {
292 cpumask_copy(desc
->affinity
, cpumask_of(0));
296 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
299 static inline void clear_evtchn(int port
)
301 struct shared_info
*s
= HYPERVISOR_shared_info
;
302 sync_clear_bit(port
, &s
->evtchn_pending
[0]);
305 static inline void set_evtchn(int port
)
307 struct shared_info
*s
= HYPERVISOR_shared_info
;
308 sync_set_bit(port
, &s
->evtchn_pending
[0]);
311 static inline int test_evtchn(int port
)
313 struct shared_info
*s
= HYPERVISOR_shared_info
;
314 return sync_test_bit(port
, &s
->evtchn_pending
[0]);
319 * notify_remote_via_irq - send event to remote end of event channel via irq
320 * @irq: irq of event channel to send event to
322 * Unlike notify_remote_via_evtchn(), this is safe to use across
323 * save/restore. Notifications on a broken connection are silently
326 void notify_remote_via_irq(int irq
)
328 int evtchn
= evtchn_from_irq(irq
);
330 if (VALID_EVTCHN(evtchn
))
331 notify_remote_via_evtchn(evtchn
);
333 EXPORT_SYMBOL_GPL(notify_remote_via_irq
);
335 static void mask_evtchn(int port
)
337 struct shared_info
*s
= HYPERVISOR_shared_info
;
338 sync_set_bit(port
, &s
->evtchn_mask
[0]);
341 static void unmask_evtchn(int port
)
343 struct shared_info
*s
= HYPERVISOR_shared_info
;
344 unsigned int cpu
= get_cpu();
346 BUG_ON(!irqs_disabled());
348 /* Slow path (hypercall) if this is a non-local port. */
349 if (unlikely(cpu
!= cpu_from_evtchn(port
))) {
350 struct evtchn_unmask unmask
= { .port
= port
};
351 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask
, &unmask
);
353 struct vcpu_info
*vcpu_info
= __get_cpu_var(xen_vcpu
);
355 sync_clear_bit(port
, &s
->evtchn_mask
[0]);
358 * The following is basically the equivalent of
359 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
360 * the interrupt edge' if the channel is masked.
362 if (sync_test_bit(port
, &s
->evtchn_pending
[0]) &&
363 !sync_test_and_set_bit(port
/ BITS_PER_LONG
,
364 &vcpu_info
->evtchn_pending_sel
))
365 vcpu_info
->evtchn_upcall_pending
= 1;
371 static int get_nr_hw_irqs(void)
375 #ifdef CONFIG_X86_IO_APIC
376 ret
= get_nr_irqs_gsi();
382 /* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs
383 * succeeded otherwise nr_pirqs won't hold the right value */
384 static int find_unbound_pirq(void)
387 for (i
= nr_pirqs
-1; i
>= 0; i
--) {
388 if (pirq_to_irq
[i
] < 0)
394 static int find_unbound_irq(void)
396 struct irq_data
*data
;
398 int start
= get_nr_hw_irqs();
400 if (start
== nr_irqs
)
403 /* nr_irqs is a magic value. Must not use it.*/
404 for (irq
= nr_irqs
-1; irq
> start
; irq
--) {
405 data
= irq_get_irq_data(irq
);
406 /* only 0->15 have init'd desc; handle irq > 16 */
409 if (data
->chip
== &no_irq_chip
)
411 if (data
->chip
!= &xen_dynamic_chip
)
413 if (irq_info
[irq
].type
== IRQT_UNBOUND
)
420 res
= irq_alloc_desc_at(irq
, 0);
422 if (WARN_ON(res
!= irq
))
428 panic("No available IRQ to bind to: increase nr_irqs!\n");
431 static bool identity_mapped_irq(unsigned irq
)
433 /* identity map all the hardware irqs */
434 return irq
< get_nr_hw_irqs();
437 static void pirq_unmask_notify(int irq
)
439 struct physdev_eoi eoi
= { .irq
= pirq_from_irq(irq
) };
441 if (unlikely(pirq_needs_eoi(irq
))) {
442 int rc
= HYPERVISOR_physdev_op(PHYSDEVOP_eoi
, &eoi
);
447 static void pirq_query_unmask(int irq
)
449 struct physdev_irq_status_query irq_status
;
450 struct irq_info
*info
= info_for_irq(irq
);
452 BUG_ON(info
->type
!= IRQT_PIRQ
);
454 irq_status
.irq
= pirq_from_irq(irq
);
455 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query
, &irq_status
))
456 irq_status
.flags
= 0;
458 info
->u
.pirq
.flags
&= ~PIRQ_NEEDS_EOI
;
459 if (irq_status
.flags
& XENIRQSTAT_needs_eoi
)
460 info
->u
.pirq
.flags
|= PIRQ_NEEDS_EOI
;
463 static bool probing_irq(int irq
)
465 struct irq_desc
*desc
= irq_to_desc(irq
);
467 return desc
&& desc
->action
== NULL
;
470 static unsigned int startup_pirq(unsigned int irq
)
472 struct evtchn_bind_pirq bind_pirq
;
473 struct irq_info
*info
= info_for_irq(irq
);
474 int evtchn
= evtchn_from_irq(irq
);
477 BUG_ON(info
->type
!= IRQT_PIRQ
);
479 if (VALID_EVTCHN(evtchn
))
482 bind_pirq
.pirq
= pirq_from_irq(irq
);
483 /* NB. We are happy to share unless we are probing. */
484 bind_pirq
.flags
= info
->u
.pirq
.flags
& PIRQ_SHAREABLE
?
485 BIND_PIRQ__WILL_SHARE
: 0;
486 rc
= HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq
, &bind_pirq
);
488 if (!probing_irq(irq
))
489 printk(KERN_INFO
"Failed to obtain physical IRQ %d\n",
493 evtchn
= bind_pirq
.port
;
495 pirq_query_unmask(irq
);
497 evtchn_to_irq
[evtchn
] = irq
;
498 bind_evtchn_to_cpu(evtchn
, 0);
499 info
->evtchn
= evtchn
;
502 unmask_evtchn(evtchn
);
503 pirq_unmask_notify(irq
);
508 static void shutdown_pirq(unsigned int irq
)
510 struct evtchn_close close
;
511 struct irq_info
*info
= info_for_irq(irq
);
512 int evtchn
= evtchn_from_irq(irq
);
514 BUG_ON(info
->type
!= IRQT_PIRQ
);
516 if (!VALID_EVTCHN(evtchn
))
522 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
525 bind_evtchn_to_cpu(evtchn
, 0);
526 evtchn_to_irq
[evtchn
] = -1;
530 static void enable_pirq(unsigned int irq
)
535 static void disable_pirq(unsigned int irq
)
539 static void ack_pirq(unsigned int irq
)
541 int evtchn
= evtchn_from_irq(irq
);
543 move_native_irq(irq
);
545 if (VALID_EVTCHN(evtchn
)) {
547 clear_evtchn(evtchn
);
551 static void end_pirq(unsigned int irq
)
553 int evtchn
= evtchn_from_irq(irq
);
554 struct irq_desc
*desc
= irq_to_desc(irq
);
559 if ((desc
->status
& (IRQ_DISABLED
|IRQ_PENDING
)) ==
560 (IRQ_DISABLED
|IRQ_PENDING
)) {
562 } else if (VALID_EVTCHN(evtchn
)) {
563 unmask_evtchn(evtchn
);
564 pirq_unmask_notify(irq
);
568 static int find_irq_by_gsi(unsigned gsi
)
572 for (irq
= 0; irq
< nr_irqs
; irq
++) {
573 struct irq_info
*info
= info_for_irq(irq
);
575 if (info
== NULL
|| info
->type
!= IRQT_PIRQ
)
578 if (gsi_from_irq(irq
) == gsi
)
585 int xen_allocate_pirq(unsigned gsi
, int shareable
, char *name
)
587 return xen_map_pirq_gsi(gsi
, gsi
, shareable
, name
);
590 /* xen_map_pirq_gsi might allocate irqs from the top down, as a
591 * consequence don't assume that the irq number returned has a low value
592 * or can be used as a pirq number unless you know otherwise.
594 * One notable exception is when xen_map_pirq_gsi is called passing an
595 * hardware gsi as argument, in that case the irq number returned
596 * matches the gsi number passed as second argument.
598 * Note: We don't assign an event channel until the irq actually started
599 * up. Return an existing irq if we've already got one for the gsi.
601 int xen_map_pirq_gsi(unsigned pirq
, unsigned gsi
, int shareable
, char *name
)
604 struct physdev_irq irq_op
;
606 spin_lock(&irq_mapping_update_lock
);
608 if ((pirq
> nr_pirqs
) || (gsi
> nr_irqs
)) {
609 printk(KERN_WARNING
"xen_map_pirq_gsi: %s %s is incorrect!\n",
610 pirq
> nr_pirqs
? "nr_pirqs" :"",
611 gsi
> nr_irqs
? "nr_irqs" : "");
615 irq
= find_irq_by_gsi(gsi
);
617 printk(KERN_INFO
"xen_map_pirq_gsi: returning irq %d for gsi %u\n",
619 goto out
; /* XXX need refcount? */
622 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
623 * we are using the !xen_initial_domain() to drop in the function.*/
624 if (identity_mapped_irq(gsi
) || !xen_initial_domain()) {
626 irq_alloc_desc_at(irq
, 0);
628 irq
= find_unbound_irq();
630 set_irq_chip_and_handler_name(irq
, &xen_pirq_chip
,
631 handle_level_irq
, name
);
636 /* Only the privileged domain can do this. For non-priv, the pcifront
637 * driver provides a PCI bus that does the call to do exactly
638 * this in the priv domain. */
639 if (xen_initial_domain() &&
640 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector
, &irq_op
)) {
646 irq_info
[irq
] = mk_pirq_info(0, pirq
, gsi
, irq_op
.vector
);
647 irq_info
[irq
].u
.pirq
.flags
|= shareable
? PIRQ_SHAREABLE
: 0;
648 pirq_to_irq
[pirq
] = irq
;
651 spin_unlock(&irq_mapping_update_lock
);
656 int xen_destroy_irq(int irq
)
658 struct irq_desc
*desc
;
661 spin_lock(&irq_mapping_update_lock
);
663 desc
= irq_to_desc(irq
);
667 irq_info
[irq
] = mk_unbound_info();
672 spin_unlock(&irq_mapping_update_lock
);
676 int xen_vector_from_irq(unsigned irq
)
678 return vector_from_irq(irq
);
681 int xen_gsi_from_irq(unsigned irq
)
683 return gsi_from_irq(irq
);
686 int bind_evtchn_to_irq(unsigned int evtchn
)
690 spin_lock(&irq_mapping_update_lock
);
692 irq
= evtchn_to_irq
[evtchn
];
695 irq
= find_unbound_irq();
697 set_irq_chip_and_handler_name(irq
, &xen_dynamic_chip
,
698 handle_edge_irq
, "event");
700 evtchn_to_irq
[evtchn
] = irq
;
701 irq_info
[irq
] = mk_evtchn_info(evtchn
);
704 spin_unlock(&irq_mapping_update_lock
);
708 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq
);
710 static int bind_ipi_to_irq(unsigned int ipi
, unsigned int cpu
)
712 struct evtchn_bind_ipi bind_ipi
;
715 spin_lock(&irq_mapping_update_lock
);
717 irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
];
720 irq
= find_unbound_irq();
724 set_irq_chip_and_handler_name(irq
, &xen_percpu_chip
,
725 handle_percpu_irq
, "ipi");
728 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
731 evtchn
= bind_ipi
.port
;
733 evtchn_to_irq
[evtchn
] = irq
;
734 irq_info
[irq
] = mk_ipi_info(evtchn
, ipi
);
735 per_cpu(ipi_to_irq
, cpu
)[ipi
] = irq
;
737 bind_evtchn_to_cpu(evtchn
, cpu
);
741 spin_unlock(&irq_mapping_update_lock
);
746 static int bind_virq_to_irq(unsigned int virq
, unsigned int cpu
)
748 struct evtchn_bind_virq bind_virq
;
751 spin_lock(&irq_mapping_update_lock
);
753 irq
= per_cpu(virq_to_irq
, cpu
)[virq
];
756 bind_virq
.virq
= virq
;
757 bind_virq
.vcpu
= cpu
;
758 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
761 evtchn
= bind_virq
.port
;
763 irq
= find_unbound_irq();
765 set_irq_chip_and_handler_name(irq
, &xen_percpu_chip
,
766 handle_percpu_irq
, "virq");
768 evtchn_to_irq
[evtchn
] = irq
;
769 irq_info
[irq
] = mk_virq_info(evtchn
, virq
);
771 per_cpu(virq_to_irq
, cpu
)[virq
] = irq
;
773 bind_evtchn_to_cpu(evtchn
, cpu
);
776 spin_unlock(&irq_mapping_update_lock
);
781 static void unbind_from_irq(unsigned int irq
)
783 struct evtchn_close close
;
784 int evtchn
= evtchn_from_irq(irq
);
786 spin_lock(&irq_mapping_update_lock
);
788 if (VALID_EVTCHN(evtchn
)) {
790 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
793 switch (type_from_irq(irq
)) {
795 per_cpu(virq_to_irq
, cpu_from_evtchn(evtchn
))
796 [virq_from_irq(irq
)] = -1;
799 per_cpu(ipi_to_irq
, cpu_from_evtchn(evtchn
))
800 [ipi_from_irq(irq
)] = -1;
806 /* Closed ports are implicitly re-bound to VCPU0. */
807 bind_evtchn_to_cpu(evtchn
, 0);
809 evtchn_to_irq
[evtchn
] = -1;
812 if (irq_info
[irq
].type
!= IRQT_UNBOUND
) {
813 irq_info
[irq
] = mk_unbound_info();
818 spin_unlock(&irq_mapping_update_lock
);
821 int bind_evtchn_to_irqhandler(unsigned int evtchn
,
822 irq_handler_t handler
,
823 unsigned long irqflags
,
824 const char *devname
, void *dev_id
)
829 irq
= bind_evtchn_to_irq(evtchn
);
830 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
832 unbind_from_irq(irq
);
838 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler
);
840 int bind_virq_to_irqhandler(unsigned int virq
, unsigned int cpu
,
841 irq_handler_t handler
,
842 unsigned long irqflags
, const char *devname
, void *dev_id
)
847 irq
= bind_virq_to_irq(virq
, cpu
);
848 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
850 unbind_from_irq(irq
);
856 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler
);
858 int bind_ipi_to_irqhandler(enum ipi_vector ipi
,
860 irq_handler_t handler
,
861 unsigned long irqflags
,
867 irq
= bind_ipi_to_irq(ipi
, cpu
);
871 irqflags
|= IRQF_NO_SUSPEND
;
872 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
874 unbind_from_irq(irq
);
881 void unbind_from_irqhandler(unsigned int irq
, void *dev_id
)
883 free_irq(irq
, dev_id
);
884 unbind_from_irq(irq
);
886 EXPORT_SYMBOL_GPL(unbind_from_irqhandler
);
888 void xen_send_IPI_one(unsigned int cpu
, enum ipi_vector vector
)
890 int irq
= per_cpu(ipi_to_irq
, cpu
)[vector
];
892 notify_remote_via_irq(irq
);
895 irqreturn_t
xen_debug_interrupt(int irq
, void *dev_id
)
897 struct shared_info
*sh
= HYPERVISOR_shared_info
;
898 int cpu
= smp_processor_id();
901 static DEFINE_SPINLOCK(debug_lock
);
903 spin_lock_irqsave(&debug_lock
, flags
);
905 printk("vcpu %d\n ", cpu
);
907 for_each_online_cpu(i
) {
908 struct vcpu_info
*v
= per_cpu(xen_vcpu
, i
);
909 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i
,
910 (get_irq_regs() && i
== cpu
) ? xen_irqs_disabled(get_irq_regs()) : v
->evtchn_upcall_mask
,
911 v
->evtchn_upcall_pending
,
912 v
->evtchn_pending_sel
);
914 printk("pending:\n ");
915 for(i
= ARRAY_SIZE(sh
->evtchn_pending
)-1; i
>= 0; i
--)
916 printk("%08lx%s", sh
->evtchn_pending
[i
],
917 i
% 8 == 0 ? "\n " : " ");
918 printk("\nmasks:\n ");
919 for(i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
920 printk("%08lx%s", sh
->evtchn_mask
[i
],
921 i
% 8 == 0 ? "\n " : " ");
923 printk("\nunmasked:\n ");
924 for(i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
925 printk("%08lx%s", sh
->evtchn_pending
[i
] & ~sh
->evtchn_mask
[i
],
926 i
% 8 == 0 ? "\n " : " ");
928 printk("\npending list:\n");
929 for(i
= 0; i
< NR_EVENT_CHANNELS
; i
++) {
930 if (sync_test_bit(i
, sh
->evtchn_pending
)) {
931 printk(" %d: event %d -> irq %d\n",
932 cpu_from_evtchn(i
), i
,
937 spin_unlock_irqrestore(&debug_lock
, flags
);
942 static DEFINE_PER_CPU(unsigned, xed_nesting_count
);
945 * Search the CPUs pending events bitmasks. For each one found, map
946 * the event number to an irq, and feed it into do_IRQ() for
949 * Xen uses a two-level bitmap to speed searching. The first level is
950 * a bitset of words which contain pending event bits. The second
951 * level is a bitset of pending events themselves.
953 static void __xen_evtchn_do_upcall(void)
956 struct shared_info
*s
= HYPERVISOR_shared_info
;
957 struct vcpu_info
*vcpu_info
= __get_cpu_var(xen_vcpu
);
961 unsigned long pending_words
;
963 vcpu_info
->evtchn_upcall_pending
= 0;
965 if (__get_cpu_var(xed_nesting_count
)++)
968 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
969 /* Clear master flag /before/ clearing selector flag. */
972 pending_words
= xchg(&vcpu_info
->evtchn_pending_sel
, 0);
973 while (pending_words
!= 0) {
974 unsigned long pending_bits
;
975 int word_idx
= __ffs(pending_words
);
976 pending_words
&= ~(1UL << word_idx
);
978 while ((pending_bits
= active_evtchns(cpu
, s
, word_idx
)) != 0) {
979 int bit_idx
= __ffs(pending_bits
);
980 int port
= (word_idx
* BITS_PER_LONG
) + bit_idx
;
981 int irq
= evtchn_to_irq
[port
];
982 struct irq_desc
*desc
;
985 desc
= irq_to_desc(irq
);
987 generic_handle_irq_desc(irq
, desc
);
992 BUG_ON(!irqs_disabled());
994 count
= __get_cpu_var(xed_nesting_count
);
995 __get_cpu_var(xed_nesting_count
) = 0;
996 } while (count
!= 1 || vcpu_info
->evtchn_upcall_pending
);
1003 void xen_evtchn_do_upcall(struct pt_regs
*regs
)
1005 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1010 __xen_evtchn_do_upcall();
1013 set_irq_regs(old_regs
);
1016 void xen_hvm_evtchn_do_upcall(void)
1018 __xen_evtchn_do_upcall();
1020 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall
);
1022 /* Rebind a new event channel to an existing irq. */
1023 void rebind_evtchn_irq(int evtchn
, int irq
)
1025 struct irq_info
*info
= info_for_irq(irq
);
1027 /* Make sure the irq is masked, since the new event channel
1028 will also be masked. */
1031 spin_lock(&irq_mapping_update_lock
);
1033 /* After resume the irq<->evtchn mappings are all cleared out */
1034 BUG_ON(evtchn_to_irq
[evtchn
] != -1);
1035 /* Expect irq to have been bound before,
1036 so there should be a proper type */
1037 BUG_ON(info
->type
== IRQT_UNBOUND
);
1039 evtchn_to_irq
[evtchn
] = irq
;
1040 irq_info
[irq
] = mk_evtchn_info(evtchn
);
1042 spin_unlock(&irq_mapping_update_lock
);
1044 /* new event channels are always bound to cpu 0 */
1045 irq_set_affinity(irq
, cpumask_of(0));
1047 /* Unmask the event channel. */
1051 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1052 static int rebind_irq_to_cpu(unsigned irq
, unsigned tcpu
)
1054 struct evtchn_bind_vcpu bind_vcpu
;
1055 int evtchn
= evtchn_from_irq(irq
);
1057 /* events delivered via platform PCI interrupts are always
1058 * routed to vcpu 0 */
1059 if (!VALID_EVTCHN(evtchn
) ||
1060 (xen_hvm_domain() && !xen_have_vector_callback
))
1063 /* Send future instances of this interrupt to other vcpu. */
1064 bind_vcpu
.port
= evtchn
;
1065 bind_vcpu
.vcpu
= tcpu
;
1068 * If this fails, it usually just indicates that we're dealing with a
1069 * virq or IPI channel, which don't actually need to be rebound. Ignore
1070 * it, but don't do the xenlinux-level rebind in that case.
1072 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu
, &bind_vcpu
) >= 0)
1073 bind_evtchn_to_cpu(evtchn
, tcpu
);
1078 static int set_affinity_irq(unsigned irq
, const struct cpumask
*dest
)
1080 unsigned tcpu
= cpumask_first(dest
);
1082 return rebind_irq_to_cpu(irq
, tcpu
);
1085 int resend_irq_on_evtchn(unsigned int irq
)
1087 int masked
, evtchn
= evtchn_from_irq(irq
);
1088 struct shared_info
*s
= HYPERVISOR_shared_info
;
1090 if (!VALID_EVTCHN(evtchn
))
1093 masked
= sync_test_and_set_bit(evtchn
, s
->evtchn_mask
);
1094 sync_set_bit(evtchn
, s
->evtchn_pending
);
1096 unmask_evtchn(evtchn
);
1101 static void enable_dynirq(unsigned int irq
)
1103 int evtchn
= evtchn_from_irq(irq
);
1105 if (VALID_EVTCHN(evtchn
))
1106 unmask_evtchn(evtchn
);
1109 static void disable_dynirq(unsigned int irq
)
1111 int evtchn
= evtchn_from_irq(irq
);
1113 if (VALID_EVTCHN(evtchn
))
1114 mask_evtchn(evtchn
);
1117 static void ack_dynirq(unsigned int irq
)
1119 int evtchn
= evtchn_from_irq(irq
);
1121 move_native_irq(irq
);
1123 if (VALID_EVTCHN(evtchn
))
1124 clear_evtchn(evtchn
);
1127 static int retrigger_dynirq(unsigned int irq
)
1129 int evtchn
= evtchn_from_irq(irq
);
1130 struct shared_info
*sh
= HYPERVISOR_shared_info
;
1133 if (VALID_EVTCHN(evtchn
)) {
1136 masked
= sync_test_and_set_bit(evtchn
, sh
->evtchn_mask
);
1137 sync_set_bit(evtchn
, sh
->evtchn_pending
);
1139 unmask_evtchn(evtchn
);
1146 static void restore_cpu_virqs(unsigned int cpu
)
1148 struct evtchn_bind_virq bind_virq
;
1149 int virq
, irq
, evtchn
;
1151 for (virq
= 0; virq
< NR_VIRQS
; virq
++) {
1152 if ((irq
= per_cpu(virq_to_irq
, cpu
)[virq
]) == -1)
1155 BUG_ON(virq_from_irq(irq
) != virq
);
1157 /* Get a new binding from Xen. */
1158 bind_virq
.virq
= virq
;
1159 bind_virq
.vcpu
= cpu
;
1160 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
1163 evtchn
= bind_virq
.port
;
1165 /* Record the new mapping. */
1166 evtchn_to_irq
[evtchn
] = irq
;
1167 irq_info
[irq
] = mk_virq_info(evtchn
, virq
);
1168 bind_evtchn_to_cpu(evtchn
, cpu
);
1170 /* Ready for use. */
1171 unmask_evtchn(evtchn
);
1175 static void restore_cpu_ipis(unsigned int cpu
)
1177 struct evtchn_bind_ipi bind_ipi
;
1178 int ipi
, irq
, evtchn
;
1180 for (ipi
= 0; ipi
< XEN_NR_IPIS
; ipi
++) {
1181 if ((irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
]) == -1)
1184 BUG_ON(ipi_from_irq(irq
) != ipi
);
1186 /* Get a new binding from Xen. */
1187 bind_ipi
.vcpu
= cpu
;
1188 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
1191 evtchn
= bind_ipi
.port
;
1193 /* Record the new mapping. */
1194 evtchn_to_irq
[evtchn
] = irq
;
1195 irq_info
[irq
] = mk_ipi_info(evtchn
, ipi
);
1196 bind_evtchn_to_cpu(evtchn
, cpu
);
1198 /* Ready for use. */
1199 unmask_evtchn(evtchn
);
1204 /* Clear an irq's pending state, in preparation for polling on it */
1205 void xen_clear_irq_pending(int irq
)
1207 int evtchn
= evtchn_from_irq(irq
);
1209 if (VALID_EVTCHN(evtchn
))
1210 clear_evtchn(evtchn
);
1212 EXPORT_SYMBOL(xen_clear_irq_pending
);
1213 void xen_set_irq_pending(int irq
)
1215 int evtchn
= evtchn_from_irq(irq
);
1217 if (VALID_EVTCHN(evtchn
))
1221 bool xen_test_irq_pending(int irq
)
1223 int evtchn
= evtchn_from_irq(irq
);
1226 if (VALID_EVTCHN(evtchn
))
1227 ret
= test_evtchn(evtchn
);
1232 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1233 * the irq will be disabled so it won't deliver an interrupt. */
1234 void xen_poll_irq_timeout(int irq
, u64 timeout
)
1236 evtchn_port_t evtchn
= evtchn_from_irq(irq
);
1238 if (VALID_EVTCHN(evtchn
)) {
1239 struct sched_poll poll
;
1242 poll
.timeout
= timeout
;
1243 set_xen_guest_handle(poll
.ports
, &evtchn
);
1245 if (HYPERVISOR_sched_op(SCHEDOP_poll
, &poll
) != 0)
1249 EXPORT_SYMBOL(xen_poll_irq_timeout
);
1250 /* Poll waiting for an irq to become pending. In the usual case, the
1251 * irq will be disabled so it won't deliver an interrupt. */
1252 void xen_poll_irq(int irq
)
1254 xen_poll_irq_timeout(irq
, 0 /* no timeout */);
1257 void xen_irq_resume(void)
1259 unsigned int cpu
, irq
, evtchn
;
1261 init_evtchn_cpu_bindings();
1263 /* New event-channel space is not 'live' yet. */
1264 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
1265 mask_evtchn(evtchn
);
1267 /* No IRQ <-> event-channel mappings. */
1268 for (irq
= 0; irq
< nr_irqs
; irq
++)
1269 irq_info
[irq
].evtchn
= 0; /* zap event-channel binding */
1271 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
1272 evtchn_to_irq
[evtchn
] = -1;
1274 for_each_possible_cpu(cpu
) {
1275 restore_cpu_virqs(cpu
);
1276 restore_cpu_ipis(cpu
);
1280 static struct irq_chip xen_dynamic_chip __read_mostly
= {
1283 .disable
= disable_dynirq
,
1284 .mask
= disable_dynirq
,
1285 .unmask
= enable_dynirq
,
1288 .set_affinity
= set_affinity_irq
,
1289 .retrigger
= retrigger_dynirq
,
1292 static struct irq_chip xen_pirq_chip __read_mostly
= {
1295 .startup
= startup_pirq
,
1296 .shutdown
= shutdown_pirq
,
1298 .enable
= enable_pirq
,
1299 .unmask
= enable_pirq
,
1301 .disable
= disable_pirq
,
1302 .mask
= disable_pirq
,
1307 .set_affinity
= set_affinity_irq
,
1309 .retrigger
= retrigger_dynirq
,
1312 static struct irq_chip xen_percpu_chip __read_mostly
= {
1313 .name
= "xen-percpu",
1315 .disable
= disable_dynirq
,
1316 .mask
= disable_dynirq
,
1317 .unmask
= enable_dynirq
,
1322 int xen_set_callback_via(uint64_t via
)
1324 struct xen_hvm_param a
;
1325 a
.domid
= DOMID_SELF
;
1326 a
.index
= HVM_PARAM_CALLBACK_IRQ
;
1328 return HYPERVISOR_hvm_op(HVMOP_set_param
, &a
);
1330 EXPORT_SYMBOL_GPL(xen_set_callback_via
);
1332 #ifdef CONFIG_XEN_PVHVM
1333 /* Vector callbacks are better than PCI interrupts to receive event
1334 * channel notifications because we can receive vector callbacks on any
1335 * vcpu and we don't need PCI support or APIC interactions. */
1336 void xen_callback_vector(void)
1339 uint64_t callback_via
;
1340 if (xen_have_vector_callback
) {
1341 callback_via
= HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK
);
1342 rc
= xen_set_callback_via(callback_via
);
1344 printk(KERN_ERR
"Request for Xen HVM callback vector"
1346 xen_have_vector_callback
= 0;
1349 printk(KERN_INFO
"Xen HVM callback vector for event delivery is "
1351 /* in the restore case the vector has already been allocated */
1352 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK
, used_vectors
))
1353 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK
, xen_hvm_callback_vector
);
1357 void xen_callback_vector(void) {}
1360 void __init
xen_init_IRQ(void)
1363 struct physdev_nr_pirqs op_nr_pirqs
;
1365 cpu_evtchn_mask_p
= kcalloc(nr_cpu_ids
, sizeof(struct cpu_evtchn_s
),
1367 irq_info
= kcalloc(nr_irqs
, sizeof(*irq_info
), GFP_KERNEL
);
1369 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs
, &op_nr_pirqs
);
1373 printk(KERN_WARNING
"PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc
);
1375 if (xen_pv_domain() && !xen_initial_domain())
1376 nr_pirqs
= max((int)op_nr_pirqs
.nr_pirqs
, nr_irqs
);
1378 nr_pirqs
= op_nr_pirqs
.nr_pirqs
;
1380 pirq_to_irq
= kcalloc(nr_pirqs
, sizeof(*pirq_to_irq
), GFP_KERNEL
);
1381 for (i
= 0; i
< nr_pirqs
; i
++)
1382 pirq_to_irq
[i
] = -1;
1384 evtchn_to_irq
= kcalloc(NR_EVENT_CHANNELS
, sizeof(*evtchn_to_irq
),
1386 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
1387 evtchn_to_irq
[i
] = -1;
1389 init_evtchn_cpu_bindings();
1391 /* No event channels are 'live' right now. */
1392 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
1395 if (xen_hvm_domain()) {
1396 xen_callback_vector();
1399 irq_ctx_init(smp_processor_id());