af_unix: limit unix_tot_inflight
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / xen / events.c
blob321a0c8346e581bc3504ac0891b7b4883d98589c
1 /*
2 * Xen event channels
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
12 * channel:
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
34 #include <asm/desc.h>
35 #include <asm/ptrace.h>
36 #include <asm/irq.h>
37 #include <asm/idle.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
44 #include <xen/xen.h>
45 #include <xen/hvm.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
57 static DEFINE_SPINLOCK(irq_mapping_update_lock);
59 /* IRQ <-> VIRQ mapping. */
60 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
62 /* IRQ <-> IPI mapping */
63 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
65 /* Interrupt types. */
66 enum xen_irq_type {
67 IRQT_UNBOUND = 0,
68 IRQT_PIRQ,
69 IRQT_VIRQ,
70 IRQT_IPI,
71 IRQT_EVTCHN
75 * Packed IRQ information:
76 * type - enum xen_irq_type
77 * event channel - irq->event channel mapping
78 * cpu - cpu this event channel is bound to
79 * index - type-specific information:
80 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
81 * guest, or GSI (real passthrough IRQ) of the device.
82 * VIRQ - virq number
83 * IPI - IPI vector
84 * EVTCHN -
86 struct irq_info
88 enum xen_irq_type type; /* type */
89 unsigned short evtchn; /* event channel */
90 unsigned short cpu; /* cpu bound */
92 union {
93 unsigned short virq;
94 enum ipi_vector ipi;
95 struct {
96 unsigned short pirq;
97 unsigned short gsi;
98 unsigned char vector;
99 unsigned char flags;
100 } pirq;
101 } u;
103 #define PIRQ_NEEDS_EOI (1 << 0)
104 #define PIRQ_SHAREABLE (1 << 1)
106 static struct irq_info *irq_info;
107 static int *pirq_to_irq;
108 static int nr_pirqs;
110 static int *evtchn_to_irq;
111 struct cpu_evtchn_s {
112 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
115 static __initdata struct cpu_evtchn_s init_evtchn_mask = {
116 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
118 static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
120 static inline unsigned long *cpu_evtchn_mask(int cpu)
122 return cpu_evtchn_mask_p[cpu].bits;
125 /* Xen will never allocate port zero for any purpose. */
126 #define VALID_EVTCHN(chn) ((chn) != 0)
128 static struct irq_chip xen_dynamic_chip;
129 static struct irq_chip xen_percpu_chip;
130 static struct irq_chip xen_pirq_chip;
132 /* Constructor for packed IRQ information. */
133 static struct irq_info mk_unbound_info(void)
135 return (struct irq_info) { .type = IRQT_UNBOUND };
138 static struct irq_info mk_evtchn_info(unsigned short evtchn)
140 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
141 .cpu = 0 };
144 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
146 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
147 .cpu = 0, .u.ipi = ipi };
150 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
152 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
153 .cpu = 0, .u.virq = virq };
156 static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
157 unsigned short gsi, unsigned short vector)
159 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
160 .cpu = 0,
161 .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
165 * Accessors for packed IRQ information.
167 static struct irq_info *info_for_irq(unsigned irq)
169 return &irq_info[irq];
172 static unsigned int evtchn_from_irq(unsigned irq)
174 return info_for_irq(irq)->evtchn;
177 unsigned irq_from_evtchn(unsigned int evtchn)
179 return evtchn_to_irq[evtchn];
181 EXPORT_SYMBOL_GPL(irq_from_evtchn);
183 static enum ipi_vector ipi_from_irq(unsigned irq)
185 struct irq_info *info = info_for_irq(irq);
187 BUG_ON(info == NULL);
188 BUG_ON(info->type != IRQT_IPI);
190 return info->u.ipi;
193 static unsigned virq_from_irq(unsigned irq)
195 struct irq_info *info = info_for_irq(irq);
197 BUG_ON(info == NULL);
198 BUG_ON(info->type != IRQT_VIRQ);
200 return info->u.virq;
203 static unsigned pirq_from_irq(unsigned irq)
205 struct irq_info *info = info_for_irq(irq);
207 BUG_ON(info == NULL);
208 BUG_ON(info->type != IRQT_PIRQ);
210 return info->u.pirq.pirq;
213 static unsigned gsi_from_irq(unsigned irq)
215 struct irq_info *info = info_for_irq(irq);
217 BUG_ON(info == NULL);
218 BUG_ON(info->type != IRQT_PIRQ);
220 return info->u.pirq.gsi;
223 static unsigned vector_from_irq(unsigned irq)
225 struct irq_info *info = info_for_irq(irq);
227 BUG_ON(info == NULL);
228 BUG_ON(info->type != IRQT_PIRQ);
230 return info->u.pirq.vector;
233 static enum xen_irq_type type_from_irq(unsigned irq)
235 return info_for_irq(irq)->type;
238 static unsigned cpu_from_irq(unsigned irq)
240 return info_for_irq(irq)->cpu;
243 static unsigned int cpu_from_evtchn(unsigned int evtchn)
245 int irq = evtchn_to_irq[evtchn];
246 unsigned ret = 0;
248 if (irq != -1)
249 ret = cpu_from_irq(irq);
251 return ret;
254 static bool pirq_needs_eoi(unsigned irq)
256 struct irq_info *info = info_for_irq(irq);
258 BUG_ON(info->type != IRQT_PIRQ);
260 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
263 static inline unsigned long active_evtchns(unsigned int cpu,
264 struct shared_info *sh,
265 unsigned int idx)
267 return (sh->evtchn_pending[idx] &
268 cpu_evtchn_mask(cpu)[idx] &
269 ~sh->evtchn_mask[idx]);
272 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
274 int irq = evtchn_to_irq[chn];
276 BUG_ON(irq == -1);
277 #ifdef CONFIG_SMP
278 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
279 #endif
281 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
282 __set_bit(chn, cpu_evtchn_mask(cpu));
284 irq_info[irq].cpu = cpu;
287 static void init_evtchn_cpu_bindings(void)
289 #ifdef CONFIG_SMP
290 struct irq_desc *desc;
291 int i;
293 /* By default all event channels notify CPU#0. */
294 for_each_irq_desc(i, desc) {
295 cpumask_copy(desc->affinity, cpumask_of(0));
297 #endif
299 memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
302 static inline void clear_evtchn(int port)
304 struct shared_info *s = HYPERVISOR_shared_info;
305 sync_clear_bit(port, &s->evtchn_pending[0]);
308 static inline void set_evtchn(int port)
310 struct shared_info *s = HYPERVISOR_shared_info;
311 sync_set_bit(port, &s->evtchn_pending[0]);
314 static inline int test_evtchn(int port)
316 struct shared_info *s = HYPERVISOR_shared_info;
317 return sync_test_bit(port, &s->evtchn_pending[0]);
322 * notify_remote_via_irq - send event to remote end of event channel via irq
323 * @irq: irq of event channel to send event to
325 * Unlike notify_remote_via_evtchn(), this is safe to use across
326 * save/restore. Notifications on a broken connection are silently
327 * dropped.
329 void notify_remote_via_irq(int irq)
331 int evtchn = evtchn_from_irq(irq);
333 if (VALID_EVTCHN(evtchn))
334 notify_remote_via_evtchn(evtchn);
336 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
338 static void mask_evtchn(int port)
340 struct shared_info *s = HYPERVISOR_shared_info;
341 sync_set_bit(port, &s->evtchn_mask[0]);
344 static void unmask_evtchn(int port)
346 struct shared_info *s = HYPERVISOR_shared_info;
347 unsigned int cpu = get_cpu();
349 BUG_ON(!irqs_disabled());
351 /* Slow path (hypercall) if this is a non-local port. */
352 if (unlikely(cpu != cpu_from_evtchn(port))) {
353 struct evtchn_unmask unmask = { .port = port };
354 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
355 } else {
356 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
358 sync_clear_bit(port, &s->evtchn_mask[0]);
361 * The following is basically the equivalent of
362 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
363 * the interrupt edge' if the channel is masked.
365 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
366 !sync_test_and_set_bit(port / BITS_PER_LONG,
367 &vcpu_info->evtchn_pending_sel))
368 vcpu_info->evtchn_upcall_pending = 1;
371 put_cpu();
374 static int get_nr_hw_irqs(void)
376 int ret = 1;
378 #ifdef CONFIG_X86_IO_APIC
379 ret = get_nr_irqs_gsi();
380 #endif
382 return ret;
385 /* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs
386 * succeeded otherwise nr_pirqs won't hold the right value */
387 static int find_unbound_pirq(void)
389 int i;
390 for (i = nr_pirqs-1; i >= 0; i--) {
391 if (pirq_to_irq[i] < 0)
392 return i;
394 return -1;
397 static int find_unbound_irq(void)
399 struct irq_data *data;
400 int irq, res;
401 int start = get_nr_hw_irqs();
403 if (start == nr_irqs)
404 goto no_irqs;
406 /* nr_irqs is a magic value. Must not use it.*/
407 for (irq = nr_irqs-1; irq > start; irq--) {
408 data = irq_get_irq_data(irq);
409 /* only 0->15 have init'd desc; handle irq > 16 */
410 if (!data)
411 break;
412 if (data->chip == &no_irq_chip)
413 break;
414 if (data->chip != &xen_dynamic_chip)
415 continue;
416 if (irq_info[irq].type == IRQT_UNBOUND)
417 return irq;
420 if (irq == start)
421 goto no_irqs;
423 res = irq_alloc_desc_at(irq, 0);
425 if (WARN_ON(res != irq))
426 return -1;
428 return irq;
430 no_irqs:
431 panic("No available IRQ to bind to: increase nr_irqs!\n");
434 static bool identity_mapped_irq(unsigned irq)
436 /* identity map all the hardware irqs */
437 return irq < get_nr_hw_irqs();
440 static void pirq_unmask_notify(int irq)
442 struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
444 if (unlikely(pirq_needs_eoi(irq))) {
445 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
446 WARN_ON(rc);
450 static void pirq_query_unmask(int irq)
452 struct physdev_irq_status_query irq_status;
453 struct irq_info *info = info_for_irq(irq);
455 BUG_ON(info->type != IRQT_PIRQ);
457 irq_status.irq = pirq_from_irq(irq);
458 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
459 irq_status.flags = 0;
461 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
462 if (irq_status.flags & XENIRQSTAT_needs_eoi)
463 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
466 static bool probing_irq(int irq)
468 struct irq_desc *desc = irq_to_desc(irq);
470 return desc && desc->action == NULL;
473 static unsigned int startup_pirq(unsigned int irq)
475 struct evtchn_bind_pirq bind_pirq;
476 struct irq_info *info = info_for_irq(irq);
477 int evtchn = evtchn_from_irq(irq);
478 int rc;
480 BUG_ON(info->type != IRQT_PIRQ);
482 if (VALID_EVTCHN(evtchn))
483 goto out;
485 bind_pirq.pirq = pirq_from_irq(irq);
486 /* NB. We are happy to share unless we are probing. */
487 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
488 BIND_PIRQ__WILL_SHARE : 0;
489 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
490 if (rc != 0) {
491 if (!probing_irq(irq))
492 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
493 irq);
494 return 0;
496 evtchn = bind_pirq.port;
498 pirq_query_unmask(irq);
500 evtchn_to_irq[evtchn] = irq;
501 bind_evtchn_to_cpu(evtchn, 0);
502 info->evtchn = evtchn;
504 out:
505 unmask_evtchn(evtchn);
506 pirq_unmask_notify(irq);
508 return 0;
511 static void shutdown_pirq(unsigned int irq)
513 struct evtchn_close close;
514 struct irq_info *info = info_for_irq(irq);
515 int evtchn = evtchn_from_irq(irq);
517 BUG_ON(info->type != IRQT_PIRQ);
519 if (!VALID_EVTCHN(evtchn))
520 return;
522 mask_evtchn(evtchn);
524 close.port = evtchn;
525 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
526 BUG();
528 bind_evtchn_to_cpu(evtchn, 0);
529 evtchn_to_irq[evtchn] = -1;
530 info->evtchn = 0;
533 static void enable_pirq(unsigned int irq)
535 startup_pirq(irq);
538 static void disable_pirq(unsigned int irq)
542 static void ack_pirq(unsigned int irq)
544 int evtchn = evtchn_from_irq(irq);
546 move_native_irq(irq);
548 if (VALID_EVTCHN(evtchn)) {
549 mask_evtchn(evtchn);
550 clear_evtchn(evtchn);
554 static void end_pirq(unsigned int irq)
556 int evtchn = evtchn_from_irq(irq);
557 struct irq_desc *desc = irq_to_desc(irq);
559 if (WARN_ON(!desc))
560 return;
562 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
563 (IRQ_DISABLED|IRQ_PENDING)) {
564 shutdown_pirq(irq);
565 } else if (VALID_EVTCHN(evtchn)) {
566 unmask_evtchn(evtchn);
567 pirq_unmask_notify(irq);
571 static int find_irq_by_gsi(unsigned gsi)
573 int irq;
575 for (irq = 0; irq < nr_irqs; irq++) {
576 struct irq_info *info = info_for_irq(irq);
578 if (info == NULL || info->type != IRQT_PIRQ)
579 continue;
581 if (gsi_from_irq(irq) == gsi)
582 return irq;
585 return -1;
588 int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
590 return xen_map_pirq_gsi(gsi, gsi, shareable, name);
593 /* xen_map_pirq_gsi might allocate irqs from the top down, as a
594 * consequence don't assume that the irq number returned has a low value
595 * or can be used as a pirq number unless you know otherwise.
597 * One notable exception is when xen_map_pirq_gsi is called passing an
598 * hardware gsi as argument, in that case the irq number returned
599 * matches the gsi number passed as second argument.
601 * Note: We don't assign an event channel until the irq actually started
602 * up. Return an existing irq if we've already got one for the gsi.
604 int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
606 int irq = 0;
607 struct physdev_irq irq_op;
609 spin_lock(&irq_mapping_update_lock);
611 if ((pirq > nr_pirqs) || (gsi > nr_irqs)) {
612 printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
613 pirq > nr_pirqs ? "nr_pirqs" :"",
614 gsi > nr_irqs ? "nr_irqs" : "");
615 goto out;
618 irq = find_irq_by_gsi(gsi);
619 if (irq != -1) {
620 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
621 irq, gsi);
622 goto out; /* XXX need refcount? */
625 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
626 * we are using the !xen_initial_domain() to drop in the function.*/
627 if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
628 xen_pv_domain())) {
629 irq = gsi;
630 irq_alloc_desc_at(irq, 0);
631 } else
632 irq = find_unbound_irq();
634 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
635 handle_level_irq, name);
637 irq_op.irq = irq;
638 irq_op.vector = 0;
640 /* Only the privileged domain can do this. For non-priv, the pcifront
641 * driver provides a PCI bus that does the call to do exactly
642 * this in the priv domain. */
643 if (xen_initial_domain() &&
644 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
645 irq_free_desc(irq);
646 irq = -ENOSPC;
647 goto out;
650 irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
651 irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
652 pirq_to_irq[pirq] = irq;
654 out:
655 spin_unlock(&irq_mapping_update_lock);
657 return irq;
660 #ifdef CONFIG_PCI_MSI
661 #include <linux/msi.h>
662 #include "../pci/msi.h"
664 void xen_allocate_pirq_msi(char *name, int *irq, int *pirq)
666 spin_lock(&irq_mapping_update_lock);
668 *irq = find_unbound_irq();
669 if (*irq == -1)
670 goto out;
672 *pirq = find_unbound_pirq();
673 if (*pirq == -1)
674 goto out;
676 set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
677 handle_level_irq, name);
679 irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0);
680 pirq_to_irq[*pirq] = *irq;
682 out:
683 spin_unlock(&irq_mapping_update_lock);
686 int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
688 int irq = -1;
689 struct physdev_map_pirq map_irq;
690 int rc;
691 int pos;
692 u32 table_offset, bir;
694 memset(&map_irq, 0, sizeof(map_irq));
695 map_irq.domid = DOMID_SELF;
696 map_irq.type = MAP_PIRQ_TYPE_MSI;
697 map_irq.index = -1;
698 map_irq.pirq = -1;
699 map_irq.bus = dev->bus->number;
700 map_irq.devfn = dev->devfn;
702 if (type == PCI_CAP_ID_MSIX) {
703 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
705 pci_read_config_dword(dev, msix_table_offset_reg(pos),
706 &table_offset);
707 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
709 map_irq.table_base = pci_resource_start(dev, bir);
710 map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
713 spin_lock(&irq_mapping_update_lock);
715 irq = find_unbound_irq();
717 if (irq == -1)
718 goto out;
720 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
721 if (rc) {
722 printk(KERN_WARNING "xen map irq failed %d\n", rc);
724 irq_free_desc(irq);
726 irq = -1;
727 goto out;
729 irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
731 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
732 handle_level_irq,
733 (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
735 out:
736 spin_unlock(&irq_mapping_update_lock);
737 return irq;
739 #endif
741 int xen_destroy_irq(int irq)
743 struct irq_desc *desc;
744 struct physdev_unmap_pirq unmap_irq;
745 struct irq_info *info = info_for_irq(irq);
746 int rc = -ENOENT;
748 spin_lock(&irq_mapping_update_lock);
750 desc = irq_to_desc(irq);
751 if (!desc)
752 goto out;
754 if (xen_initial_domain()) {
755 unmap_irq.pirq = info->u.pirq.gsi;
756 unmap_irq.domid = DOMID_SELF;
757 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
758 if (rc) {
759 printk(KERN_WARNING "unmap irq failed %d\n", rc);
760 goto out;
763 irq_info[irq] = mk_unbound_info();
765 irq_free_desc(irq);
767 out:
768 spin_unlock(&irq_mapping_update_lock);
769 return rc;
772 int xen_vector_from_irq(unsigned irq)
774 return vector_from_irq(irq);
777 int xen_gsi_from_irq(unsigned irq)
779 return gsi_from_irq(irq);
782 int bind_evtchn_to_irq(unsigned int evtchn)
784 int irq;
786 spin_lock(&irq_mapping_update_lock);
788 irq = evtchn_to_irq[evtchn];
790 if (irq == -1) {
791 irq = find_unbound_irq();
793 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
794 handle_fasteoi_irq, "event");
796 evtchn_to_irq[evtchn] = irq;
797 irq_info[irq] = mk_evtchn_info(evtchn);
800 spin_unlock(&irq_mapping_update_lock);
802 return irq;
804 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
806 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
808 struct evtchn_bind_ipi bind_ipi;
809 int evtchn, irq;
811 spin_lock(&irq_mapping_update_lock);
813 irq = per_cpu(ipi_to_irq, cpu)[ipi];
815 if (irq == -1) {
816 irq = find_unbound_irq();
817 if (irq < 0)
818 goto out;
820 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
821 handle_percpu_irq, "ipi");
823 bind_ipi.vcpu = cpu;
824 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
825 &bind_ipi) != 0)
826 BUG();
827 evtchn = bind_ipi.port;
829 evtchn_to_irq[evtchn] = irq;
830 irq_info[irq] = mk_ipi_info(evtchn, ipi);
831 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
833 bind_evtchn_to_cpu(evtchn, cpu);
836 out:
837 spin_unlock(&irq_mapping_update_lock);
838 return irq;
842 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
844 struct evtchn_bind_virq bind_virq;
845 int evtchn, irq;
847 spin_lock(&irq_mapping_update_lock);
849 irq = per_cpu(virq_to_irq, cpu)[virq];
851 if (irq == -1) {
852 irq = find_unbound_irq();
854 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
855 handle_percpu_irq, "virq");
857 bind_virq.virq = virq;
858 bind_virq.vcpu = cpu;
859 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
860 &bind_virq) != 0)
861 BUG();
862 evtchn = bind_virq.port;
864 evtchn_to_irq[evtchn] = irq;
865 irq_info[irq] = mk_virq_info(evtchn, virq);
867 per_cpu(virq_to_irq, cpu)[virq] = irq;
869 bind_evtchn_to_cpu(evtchn, cpu);
872 spin_unlock(&irq_mapping_update_lock);
874 return irq;
877 static void unbind_from_irq(unsigned int irq)
879 struct evtchn_close close;
880 int evtchn = evtchn_from_irq(irq);
882 spin_lock(&irq_mapping_update_lock);
884 if (VALID_EVTCHN(evtchn)) {
885 close.port = evtchn;
886 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
887 BUG();
889 switch (type_from_irq(irq)) {
890 case IRQT_VIRQ:
891 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
892 [virq_from_irq(irq)] = -1;
893 break;
894 case IRQT_IPI:
895 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
896 [ipi_from_irq(irq)] = -1;
897 break;
898 default:
899 break;
902 /* Closed ports are implicitly re-bound to VCPU0. */
903 bind_evtchn_to_cpu(evtchn, 0);
905 evtchn_to_irq[evtchn] = -1;
908 if (irq_info[irq].type != IRQT_UNBOUND) {
909 irq_info[irq] = mk_unbound_info();
911 irq_free_desc(irq);
914 spin_unlock(&irq_mapping_update_lock);
917 int bind_evtchn_to_irqhandler(unsigned int evtchn,
918 irq_handler_t handler,
919 unsigned long irqflags,
920 const char *devname, void *dev_id)
922 unsigned int irq;
923 int retval;
925 irq = bind_evtchn_to_irq(evtchn);
926 retval = request_irq(irq, handler, irqflags, devname, dev_id);
927 if (retval != 0) {
928 unbind_from_irq(irq);
929 return retval;
932 return irq;
934 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
936 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
937 irq_handler_t handler,
938 unsigned long irqflags, const char *devname, void *dev_id)
940 unsigned int irq;
941 int retval;
943 irq = bind_virq_to_irq(virq, cpu);
944 retval = request_irq(irq, handler, irqflags, devname, dev_id);
945 if (retval != 0) {
946 unbind_from_irq(irq);
947 return retval;
950 return irq;
952 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
954 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
955 unsigned int cpu,
956 irq_handler_t handler,
957 unsigned long irqflags,
958 const char *devname,
959 void *dev_id)
961 int irq, retval;
963 irq = bind_ipi_to_irq(ipi, cpu);
964 if (irq < 0)
965 return irq;
967 irqflags |= IRQF_NO_SUSPEND;
968 retval = request_irq(irq, handler, irqflags, devname, dev_id);
969 if (retval != 0) {
970 unbind_from_irq(irq);
971 return retval;
974 return irq;
977 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
979 free_irq(irq, dev_id);
980 unbind_from_irq(irq);
982 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
984 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
986 int irq = per_cpu(ipi_to_irq, cpu)[vector];
987 BUG_ON(irq < 0);
988 notify_remote_via_irq(irq);
991 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
993 struct shared_info *sh = HYPERVISOR_shared_info;
994 int cpu = smp_processor_id();
995 unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
996 int i;
997 unsigned long flags;
998 static DEFINE_SPINLOCK(debug_lock);
999 struct vcpu_info *v;
1001 spin_lock_irqsave(&debug_lock, flags);
1003 printk("\nvcpu %d\n ", cpu);
1005 for_each_online_cpu(i) {
1006 int pending;
1007 v = per_cpu(xen_vcpu, i);
1008 pending = (get_irq_regs() && i == cpu)
1009 ? xen_irqs_disabled(get_irq_regs())
1010 : v->evtchn_upcall_mask;
1011 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
1012 pending, v->evtchn_upcall_pending,
1013 (int)(sizeof(v->evtchn_pending_sel)*2),
1014 v->evtchn_pending_sel);
1016 v = per_cpu(xen_vcpu, cpu);
1018 printk("\npending:\n ");
1019 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1020 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1021 sh->evtchn_pending[i],
1022 i % 8 == 0 ? "\n " : " ");
1023 printk("\nglobal mask:\n ");
1024 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1025 printk("%0*lx%s",
1026 (int)(sizeof(sh->evtchn_mask[0])*2),
1027 sh->evtchn_mask[i],
1028 i % 8 == 0 ? "\n " : " ");
1030 printk("\nglobally unmasked:\n ");
1031 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1032 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1033 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1034 i % 8 == 0 ? "\n " : " ");
1036 printk("\nlocal cpu%d mask:\n ", cpu);
1037 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1038 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1039 cpu_evtchn[i],
1040 i % 8 == 0 ? "\n " : " ");
1042 printk("\nlocally unmasked:\n ");
1043 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1044 unsigned long pending = sh->evtchn_pending[i]
1045 & ~sh->evtchn_mask[i]
1046 & cpu_evtchn[i];
1047 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1048 pending, i % 8 == 0 ? "\n " : " ");
1051 printk("\npending list:\n");
1052 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1053 if (sync_test_bit(i, sh->evtchn_pending)) {
1054 int word_idx = i / BITS_PER_LONG;
1055 printk(" %d: event %d -> irq %d%s%s%s\n",
1056 cpu_from_evtchn(i), i,
1057 evtchn_to_irq[i],
1058 sync_test_bit(word_idx, &v->evtchn_pending_sel)
1059 ? "" : " l2-clear",
1060 !sync_test_bit(i, sh->evtchn_mask)
1061 ? "" : " globally-masked",
1062 sync_test_bit(i, cpu_evtchn)
1063 ? "" : " locally-masked");
1067 spin_unlock_irqrestore(&debug_lock, flags);
1069 return IRQ_HANDLED;
1072 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1075 * Search the CPUs pending events bitmasks. For each one found, map
1076 * the event number to an irq, and feed it into do_IRQ() for
1077 * handling.
1079 * Xen uses a two-level bitmap to speed searching. The first level is
1080 * a bitset of words which contain pending event bits. The second
1081 * level is a bitset of pending events themselves.
1083 static void __xen_evtchn_do_upcall(void)
1085 int cpu = get_cpu();
1086 struct shared_info *s = HYPERVISOR_shared_info;
1087 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
1088 unsigned count;
1090 do {
1091 unsigned long pending_words;
1093 vcpu_info->evtchn_upcall_pending = 0;
1095 if (__get_cpu_var(xed_nesting_count)++)
1096 goto out;
1098 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1099 /* Clear master flag /before/ clearing selector flag. */
1100 wmb();
1101 #endif
1102 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
1103 while (pending_words != 0) {
1104 unsigned long pending_bits;
1105 int word_idx = __ffs(pending_words);
1106 pending_words &= ~(1UL << word_idx);
1108 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
1109 int bit_idx = __ffs(pending_bits);
1110 int port = (word_idx * BITS_PER_LONG) + bit_idx;
1111 int irq = evtchn_to_irq[port];
1112 struct irq_desc *desc;
1114 mask_evtchn(port);
1115 clear_evtchn(port);
1117 if (irq != -1) {
1118 desc = irq_to_desc(irq);
1119 if (desc)
1120 generic_handle_irq_desc(irq, desc);
1125 BUG_ON(!irqs_disabled());
1127 count = __get_cpu_var(xed_nesting_count);
1128 __get_cpu_var(xed_nesting_count) = 0;
1129 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1131 out:
1133 put_cpu();
1136 void xen_evtchn_do_upcall(struct pt_regs *regs)
1138 struct pt_regs *old_regs = set_irq_regs(regs);
1140 exit_idle();
1141 irq_enter();
1143 __xen_evtchn_do_upcall();
1145 irq_exit();
1146 set_irq_regs(old_regs);
1149 void xen_hvm_evtchn_do_upcall(void)
1151 __xen_evtchn_do_upcall();
1153 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1155 /* Rebind a new event channel to an existing irq. */
1156 void rebind_evtchn_irq(int evtchn, int irq)
1158 struct irq_info *info = info_for_irq(irq);
1160 /* Make sure the irq is masked, since the new event channel
1161 will also be masked. */
1162 disable_irq(irq);
1164 spin_lock(&irq_mapping_update_lock);
1166 /* After resume the irq<->evtchn mappings are all cleared out */
1167 BUG_ON(evtchn_to_irq[evtchn] != -1);
1168 /* Expect irq to have been bound before,
1169 so there should be a proper type */
1170 BUG_ON(info->type == IRQT_UNBOUND);
1172 evtchn_to_irq[evtchn] = irq;
1173 irq_info[irq] = mk_evtchn_info(evtchn);
1175 spin_unlock(&irq_mapping_update_lock);
1177 /* new event channels are always bound to cpu 0 */
1178 irq_set_affinity(irq, cpumask_of(0));
1180 /* Unmask the event channel. */
1181 enable_irq(irq);
1184 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1185 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1187 struct evtchn_bind_vcpu bind_vcpu;
1188 int evtchn = evtchn_from_irq(irq);
1190 /* events delivered via platform PCI interrupts are always
1191 * routed to vcpu 0 */
1192 if (!VALID_EVTCHN(evtchn) ||
1193 (xen_hvm_domain() && !xen_have_vector_callback))
1194 return -1;
1196 /* Send future instances of this interrupt to other vcpu. */
1197 bind_vcpu.port = evtchn;
1198 bind_vcpu.vcpu = tcpu;
1201 * If this fails, it usually just indicates that we're dealing with a
1202 * virq or IPI channel, which don't actually need to be rebound. Ignore
1203 * it, but don't do the xenlinux-level rebind in that case.
1205 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1206 bind_evtchn_to_cpu(evtchn, tcpu);
1208 return 0;
1211 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
1213 unsigned tcpu = cpumask_first(dest);
1215 return rebind_irq_to_cpu(irq, tcpu);
1218 int resend_irq_on_evtchn(unsigned int irq)
1220 int masked, evtchn = evtchn_from_irq(irq);
1221 struct shared_info *s = HYPERVISOR_shared_info;
1223 if (!VALID_EVTCHN(evtchn))
1224 return 1;
1226 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1227 sync_set_bit(evtchn, s->evtchn_pending);
1228 if (!masked)
1229 unmask_evtchn(evtchn);
1231 return 1;
1234 static void enable_dynirq(unsigned int irq)
1236 int evtchn = evtchn_from_irq(irq);
1238 if (VALID_EVTCHN(evtchn))
1239 unmask_evtchn(evtchn);
1242 static void disable_dynirq(unsigned int irq)
1244 int evtchn = evtchn_from_irq(irq);
1246 if (VALID_EVTCHN(evtchn))
1247 mask_evtchn(evtchn);
1250 static void ack_dynirq(unsigned int irq)
1252 int evtchn = evtchn_from_irq(irq);
1254 move_masked_irq(irq);
1256 if (VALID_EVTCHN(evtchn))
1257 unmask_evtchn(evtchn);
1260 static int retrigger_dynirq(unsigned int irq)
1262 int evtchn = evtchn_from_irq(irq);
1263 struct shared_info *sh = HYPERVISOR_shared_info;
1264 int ret = 0;
1266 if (VALID_EVTCHN(evtchn)) {
1267 int masked;
1269 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1270 sync_set_bit(evtchn, sh->evtchn_pending);
1271 if (!masked)
1272 unmask_evtchn(evtchn);
1273 ret = 1;
1276 return ret;
1279 static void restore_cpu_virqs(unsigned int cpu)
1281 struct evtchn_bind_virq bind_virq;
1282 int virq, irq, evtchn;
1284 for (virq = 0; virq < NR_VIRQS; virq++) {
1285 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1286 continue;
1288 BUG_ON(virq_from_irq(irq) != virq);
1290 /* Get a new binding from Xen. */
1291 bind_virq.virq = virq;
1292 bind_virq.vcpu = cpu;
1293 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1294 &bind_virq) != 0)
1295 BUG();
1296 evtchn = bind_virq.port;
1298 /* Record the new mapping. */
1299 evtchn_to_irq[evtchn] = irq;
1300 irq_info[irq] = mk_virq_info(evtchn, virq);
1301 bind_evtchn_to_cpu(evtchn, cpu);
1305 static void restore_cpu_ipis(unsigned int cpu)
1307 struct evtchn_bind_ipi bind_ipi;
1308 int ipi, irq, evtchn;
1310 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1311 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1312 continue;
1314 BUG_ON(ipi_from_irq(irq) != ipi);
1316 /* Get a new binding from Xen. */
1317 bind_ipi.vcpu = cpu;
1318 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1319 &bind_ipi) != 0)
1320 BUG();
1321 evtchn = bind_ipi.port;
1323 /* Record the new mapping. */
1324 evtchn_to_irq[evtchn] = irq;
1325 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1326 bind_evtchn_to_cpu(evtchn, cpu);
1330 /* Clear an irq's pending state, in preparation for polling on it */
1331 void xen_clear_irq_pending(int irq)
1333 int evtchn = evtchn_from_irq(irq);
1335 if (VALID_EVTCHN(evtchn))
1336 clear_evtchn(evtchn);
1338 EXPORT_SYMBOL(xen_clear_irq_pending);
1339 void xen_set_irq_pending(int irq)
1341 int evtchn = evtchn_from_irq(irq);
1343 if (VALID_EVTCHN(evtchn))
1344 set_evtchn(evtchn);
1347 bool xen_test_irq_pending(int irq)
1349 int evtchn = evtchn_from_irq(irq);
1350 bool ret = false;
1352 if (VALID_EVTCHN(evtchn))
1353 ret = test_evtchn(evtchn);
1355 return ret;
1358 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1359 * the irq will be disabled so it won't deliver an interrupt. */
1360 void xen_poll_irq_timeout(int irq, u64 timeout)
1362 evtchn_port_t evtchn = evtchn_from_irq(irq);
1364 if (VALID_EVTCHN(evtchn)) {
1365 struct sched_poll poll;
1367 poll.nr_ports = 1;
1368 poll.timeout = timeout;
1369 set_xen_guest_handle(poll.ports, &evtchn);
1371 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1372 BUG();
1375 EXPORT_SYMBOL(xen_poll_irq_timeout);
1376 /* Poll waiting for an irq to become pending. In the usual case, the
1377 * irq will be disabled so it won't deliver an interrupt. */
1378 void xen_poll_irq(int irq)
1380 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1383 void xen_irq_resume(void)
1385 unsigned int cpu, irq, evtchn;
1386 struct irq_desc *desc;
1388 init_evtchn_cpu_bindings();
1390 /* New event-channel space is not 'live' yet. */
1391 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1392 mask_evtchn(evtchn);
1394 /* No IRQ <-> event-channel mappings. */
1395 for (irq = 0; irq < nr_irqs; irq++)
1396 irq_info[irq].evtchn = 0; /* zap event-channel binding */
1398 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1399 evtchn_to_irq[evtchn] = -1;
1401 for_each_possible_cpu(cpu) {
1402 restore_cpu_virqs(cpu);
1403 restore_cpu_ipis(cpu);
1407 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1408 * are not handled by the IRQ core.
1410 for_each_irq_desc(irq, desc) {
1411 if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
1412 continue;
1413 if (desc->status & IRQ_DISABLED)
1414 continue;
1416 evtchn = evtchn_from_irq(irq);
1417 if (evtchn == -1)
1418 continue;
1420 unmask_evtchn(evtchn);
1424 static struct irq_chip xen_dynamic_chip __read_mostly = {
1425 .name = "xen-dyn",
1427 .disable = disable_dynirq,
1428 .mask = disable_dynirq,
1429 .unmask = enable_dynirq,
1431 .eoi = ack_dynirq,
1432 .set_affinity = set_affinity_irq,
1433 .retrigger = retrigger_dynirq,
1436 static struct irq_chip xen_pirq_chip __read_mostly = {
1437 .name = "xen-pirq",
1439 .startup = startup_pirq,
1440 .shutdown = shutdown_pirq,
1442 .enable = enable_pirq,
1443 .unmask = enable_pirq,
1445 .disable = disable_pirq,
1446 .mask = disable_pirq,
1448 .ack = ack_pirq,
1449 .end = end_pirq,
1451 .set_affinity = set_affinity_irq,
1453 .retrigger = retrigger_dynirq,
1456 static struct irq_chip xen_percpu_chip __read_mostly = {
1457 .name = "xen-percpu",
1459 .disable = disable_dynirq,
1460 .mask = disable_dynirq,
1461 .unmask = enable_dynirq,
1463 .ack = ack_dynirq,
1466 int xen_set_callback_via(uint64_t via)
1468 struct xen_hvm_param a;
1469 a.domid = DOMID_SELF;
1470 a.index = HVM_PARAM_CALLBACK_IRQ;
1471 a.value = via;
1472 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1474 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1476 #ifdef CONFIG_XEN_PVHVM
1477 /* Vector callbacks are better than PCI interrupts to receive event
1478 * channel notifications because we can receive vector callbacks on any
1479 * vcpu and we don't need PCI support or APIC interactions. */
1480 void xen_callback_vector(void)
1482 int rc;
1483 uint64_t callback_via;
1484 if (xen_have_vector_callback) {
1485 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1486 rc = xen_set_callback_via(callback_via);
1487 if (rc) {
1488 printk(KERN_ERR "Request for Xen HVM callback vector"
1489 " failed.\n");
1490 xen_have_vector_callback = 0;
1491 return;
1493 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1494 "enabled\n");
1495 /* in the restore case the vector has already been allocated */
1496 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1497 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1500 #else
1501 void xen_callback_vector(void) {}
1502 #endif
1504 void __init xen_init_IRQ(void)
1506 int i, rc;
1507 struct physdev_nr_pirqs op_nr_pirqs;
1509 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1510 GFP_KERNEL);
1511 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1513 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs, &op_nr_pirqs);
1514 if (rc < 0) {
1515 nr_pirqs = nr_irqs;
1516 if (rc != -ENOSYS)
1517 printk(KERN_WARNING "PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc);
1518 } else {
1519 if (xen_pv_domain() && !xen_initial_domain())
1520 nr_pirqs = max((int)op_nr_pirqs.nr_pirqs, nr_irqs);
1521 else
1522 nr_pirqs = op_nr_pirqs.nr_pirqs;
1524 pirq_to_irq = kcalloc(nr_pirqs, sizeof(*pirq_to_irq), GFP_KERNEL);
1525 for (i = 0; i < nr_pirqs; i++)
1526 pirq_to_irq[i] = -1;
1528 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1529 GFP_KERNEL);
1530 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1531 evtchn_to_irq[i] = -1;
1533 init_evtchn_cpu_bindings();
1535 /* No event channels are 'live' right now. */
1536 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1537 mask_evtchn(i);
1539 if (xen_hvm_domain()) {
1540 xen_callback_vector();
1541 native_init_IRQ();
1542 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1543 * __acpi_register_gsi can point at the right function */
1544 pci_xen_hvm_init();
1545 } else {
1546 irq_ctx_init(smp_processor_id());
1547 if (xen_initial_domain())
1548 xen_setup_pirqs();