4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
30 #include <asm/ptrace.h>
32 #include <asm/sync_bitops.h>
33 #include <asm/xen/hypercall.h>
34 #include <asm/xen/hypervisor.h>
36 #include <xen/xen-ops.h>
37 #include <xen/events.h>
38 #include <xen/interface/xen.h>
39 #include <xen/interface/event_channel.h>
42 * This lock protects updates to the following mapping and reference-count
43 * arrays. The lock does not need to be acquired to read the mapping tables.
45 static DEFINE_SPINLOCK(irq_mapping_update_lock
);
47 /* IRQ <-> VIRQ mapping. */
48 static DEFINE_PER_CPU(int, virq_to_irq
[NR_VIRQS
]) = {[0 ... NR_VIRQS
-1] = -1};
50 /* IRQ <-> IPI mapping */
51 static DEFINE_PER_CPU(int, ipi_to_irq
[XEN_NR_IPIS
]) = {[0 ... XEN_NR_IPIS
-1] = -1};
53 /* Packed IRQ information: binding type, sub-type index, and event channel. */
56 unsigned short evtchn
;
61 static struct packed_irq irq_info
[NR_IRQS
];
72 /* Convenient shorthand for packed representation of an unbound IRQ. */
73 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
75 static int evtchn_to_irq
[NR_EVENT_CHANNELS
] = {
76 [0 ... NR_EVENT_CHANNELS
-1] = -1
79 unsigned long bits
[NR_EVENT_CHANNELS
/BITS_PER_LONG
];
81 static struct cpu_evtchn_s
*cpu_evtchn_mask_p
;
82 static inline unsigned long *cpu_evtchn_mask(int cpu
)
84 return cpu_evtchn_mask_p
[cpu
].bits
;
86 static u8 cpu_evtchn
[NR_EVENT_CHANNELS
];
88 /* Reference counts for bindings to IRQs. */
89 static int irq_bindcount
[NR_IRQS
];
91 /* Xen will never allocate port zero for any purpose. */
92 #define VALID_EVTCHN(chn) ((chn) != 0)
94 static struct irq_chip xen_dynamic_chip
;
96 /* Constructor for packed IRQ information. */
97 static inline struct packed_irq
mk_irq_info(u32 type
, u32 index
, u32 evtchn
)
99 return (struct packed_irq
) { evtchn
, index
, type
};
103 * Accessors for packed IRQ information.
105 static inline unsigned int evtchn_from_irq(int irq
)
107 return irq_info
[irq
].evtchn
;
110 static inline unsigned int index_from_irq(int irq
)
112 return irq_info
[irq
].index
;
115 static inline unsigned int type_from_irq(int irq
)
117 return irq_info
[irq
].type
;
120 static inline unsigned long active_evtchns(unsigned int cpu
,
121 struct shared_info
*sh
,
124 return (sh
->evtchn_pending
[idx
] &
125 cpu_evtchn_mask(cpu
)[idx
] &
126 ~sh
->evtchn_mask
[idx
]);
129 static void bind_evtchn_to_cpu(unsigned int chn
, unsigned int cpu
)
131 int irq
= evtchn_to_irq
[chn
];
135 cpumask_copy(irq_to_desc(irq
)->affinity
, cpumask_of(cpu
));
138 __clear_bit(chn
, cpu_evtchn_mask(cpu_evtchn
[chn
]));
139 __set_bit(chn
, cpu_evtchn_mask(cpu
));
141 cpu_evtchn
[chn
] = cpu
;
144 static void init_evtchn_cpu_bindings(void)
147 struct irq_desc
*desc
;
150 /* By default all event channels notify CPU#0. */
151 for_each_irq_desc(i
, desc
) {
152 cpumask_copy(desc
->affinity
, cpumask_of(0));
156 memset(cpu_evtchn
, 0, sizeof(cpu_evtchn
));
157 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
160 static inline unsigned int cpu_from_evtchn(unsigned int evtchn
)
162 return cpu_evtchn
[evtchn
];
165 static inline void clear_evtchn(int port
)
167 struct shared_info
*s
= HYPERVISOR_shared_info
;
168 sync_clear_bit(port
, &s
->evtchn_pending
[0]);
171 static inline void set_evtchn(int port
)
173 struct shared_info
*s
= HYPERVISOR_shared_info
;
174 sync_set_bit(port
, &s
->evtchn_pending
[0]);
177 static inline int test_evtchn(int port
)
179 struct shared_info
*s
= HYPERVISOR_shared_info
;
180 return sync_test_bit(port
, &s
->evtchn_pending
[0]);
185 * notify_remote_via_irq - send event to remote end of event channel via irq
186 * @irq: irq of event channel to send event to
188 * Unlike notify_remote_via_evtchn(), this is safe to use across
189 * save/restore. Notifications on a broken connection are silently
192 void notify_remote_via_irq(int irq
)
194 int evtchn
= evtchn_from_irq(irq
);
196 if (VALID_EVTCHN(evtchn
))
197 notify_remote_via_evtchn(evtchn
);
199 EXPORT_SYMBOL_GPL(notify_remote_via_irq
);
201 static void mask_evtchn(int port
)
203 struct shared_info
*s
= HYPERVISOR_shared_info
;
204 sync_set_bit(port
, &s
->evtchn_mask
[0]);
207 static void unmask_evtchn(int port
)
209 struct shared_info
*s
= HYPERVISOR_shared_info
;
210 unsigned int cpu
= get_cpu();
212 BUG_ON(!irqs_disabled());
214 /* Slow path (hypercall) if this is a non-local port. */
215 if (unlikely(cpu
!= cpu_from_evtchn(port
))) {
216 struct evtchn_unmask unmask
= { .port
= port
};
217 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask
, &unmask
);
219 struct vcpu_info
*vcpu_info
= __get_cpu_var(xen_vcpu
);
221 sync_clear_bit(port
, &s
->evtchn_mask
[0]);
224 * The following is basically the equivalent of
225 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
226 * the interrupt edge' if the channel is masked.
228 if (sync_test_bit(port
, &s
->evtchn_pending
[0]) &&
229 !sync_test_and_set_bit(port
/ BITS_PER_LONG
,
230 &vcpu_info
->evtchn_pending_sel
))
231 vcpu_info
->evtchn_upcall_pending
= 1;
237 static int find_unbound_irq(void)
240 struct irq_desc
*desc
;
242 /* Only allocate from dynirq range */
243 for (irq
= 0; irq
< nr_irqs
; irq
++)
244 if (irq_bindcount
[irq
] == 0)
248 panic("No available IRQ to bind to: increase nr_irqs!\n");
250 desc
= irq_to_desc_alloc_cpu(irq
, 0);
251 if (WARN_ON(desc
== NULL
))
257 int bind_evtchn_to_irq(unsigned int evtchn
)
261 spin_lock(&irq_mapping_update_lock
);
263 irq
= evtchn_to_irq
[evtchn
];
266 irq
= find_unbound_irq();
268 dynamic_irq_init(irq
);
269 set_irq_chip_and_handler_name(irq
, &xen_dynamic_chip
,
270 handle_level_irq
, "event");
272 evtchn_to_irq
[evtchn
] = irq
;
273 irq_info
[irq
] = mk_irq_info(IRQT_EVTCHN
, 0, evtchn
);
276 irq_bindcount
[irq
]++;
278 spin_unlock(&irq_mapping_update_lock
);
282 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq
);
284 static int bind_ipi_to_irq(unsigned int ipi
, unsigned int cpu
)
286 struct evtchn_bind_ipi bind_ipi
;
289 spin_lock(&irq_mapping_update_lock
);
291 irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
];
293 irq
= find_unbound_irq();
297 dynamic_irq_init(irq
);
298 set_irq_chip_and_handler_name(irq
, &xen_dynamic_chip
,
299 handle_level_irq
, "ipi");
302 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
305 evtchn
= bind_ipi
.port
;
307 evtchn_to_irq
[evtchn
] = irq
;
308 irq_info
[irq
] = mk_irq_info(IRQT_IPI
, ipi
, evtchn
);
310 per_cpu(ipi_to_irq
, cpu
)[ipi
] = irq
;
312 bind_evtchn_to_cpu(evtchn
, cpu
);
315 irq_bindcount
[irq
]++;
318 spin_unlock(&irq_mapping_update_lock
);
323 static int bind_virq_to_irq(unsigned int virq
, unsigned int cpu
)
325 struct evtchn_bind_virq bind_virq
;
328 spin_lock(&irq_mapping_update_lock
);
330 irq
= per_cpu(virq_to_irq
, cpu
)[virq
];
333 bind_virq
.virq
= virq
;
334 bind_virq
.vcpu
= cpu
;
335 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
338 evtchn
= bind_virq
.port
;
340 irq
= find_unbound_irq();
342 dynamic_irq_init(irq
);
343 set_irq_chip_and_handler_name(irq
, &xen_dynamic_chip
,
344 handle_level_irq
, "virq");
346 evtchn_to_irq
[evtchn
] = irq
;
347 irq_info
[irq
] = mk_irq_info(IRQT_VIRQ
, virq
, evtchn
);
349 per_cpu(virq_to_irq
, cpu
)[virq
] = irq
;
351 bind_evtchn_to_cpu(evtchn
, cpu
);
354 irq_bindcount
[irq
]++;
356 spin_unlock(&irq_mapping_update_lock
);
361 static void unbind_from_irq(unsigned int irq
)
363 struct evtchn_close close
;
364 int evtchn
= evtchn_from_irq(irq
);
366 spin_lock(&irq_mapping_update_lock
);
368 if ((--irq_bindcount
[irq
] == 0) && VALID_EVTCHN(evtchn
)) {
370 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
373 switch (type_from_irq(irq
)) {
375 per_cpu(virq_to_irq
, cpu_from_evtchn(evtchn
))
376 [index_from_irq(irq
)] = -1;
379 per_cpu(ipi_to_irq
, cpu_from_evtchn(evtchn
))
380 [index_from_irq(irq
)] = -1;
386 /* Closed ports are implicitly re-bound to VCPU0. */
387 bind_evtchn_to_cpu(evtchn
, 0);
389 evtchn_to_irq
[evtchn
] = -1;
390 irq_info
[irq
] = IRQ_UNBOUND
;
392 dynamic_irq_cleanup(irq
);
395 spin_unlock(&irq_mapping_update_lock
);
398 int bind_evtchn_to_irqhandler(unsigned int evtchn
,
399 irq_handler_t handler
,
400 unsigned long irqflags
,
401 const char *devname
, void *dev_id
)
406 irq
= bind_evtchn_to_irq(evtchn
);
407 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
409 unbind_from_irq(irq
);
415 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler
);
417 int bind_virq_to_irqhandler(unsigned int virq
, unsigned int cpu
,
418 irq_handler_t handler
,
419 unsigned long irqflags
, const char *devname
, void *dev_id
)
424 irq
= bind_virq_to_irq(virq
, cpu
);
425 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
427 unbind_from_irq(irq
);
433 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler
);
435 int bind_ipi_to_irqhandler(enum ipi_vector ipi
,
437 irq_handler_t handler
,
438 unsigned long irqflags
,
444 irq
= bind_ipi_to_irq(ipi
, cpu
);
448 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
450 unbind_from_irq(irq
);
457 void unbind_from_irqhandler(unsigned int irq
, void *dev_id
)
459 free_irq(irq
, dev_id
);
460 unbind_from_irq(irq
);
462 EXPORT_SYMBOL_GPL(unbind_from_irqhandler
);
464 void xen_send_IPI_one(unsigned int cpu
, enum ipi_vector vector
)
466 int irq
= per_cpu(ipi_to_irq
, cpu
)[vector
];
468 notify_remote_via_irq(irq
);
471 irqreturn_t
xen_debug_interrupt(int irq
, void *dev_id
)
473 struct shared_info
*sh
= HYPERVISOR_shared_info
;
474 int cpu
= smp_processor_id();
477 static DEFINE_SPINLOCK(debug_lock
);
479 spin_lock_irqsave(&debug_lock
, flags
);
481 printk("vcpu %d\n ", cpu
);
483 for_each_online_cpu(i
) {
484 struct vcpu_info
*v
= per_cpu(xen_vcpu
, i
);
485 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i
,
486 (get_irq_regs() && i
== cpu
) ? xen_irqs_disabled(get_irq_regs()) : v
->evtchn_upcall_mask
,
487 v
->evtchn_upcall_pending
,
488 v
->evtchn_pending_sel
);
490 printk("pending:\n ");
491 for(i
= ARRAY_SIZE(sh
->evtchn_pending
)-1; i
>= 0; i
--)
492 printk("%08lx%s", sh
->evtchn_pending
[i
],
493 i
% 8 == 0 ? "\n " : " ");
494 printk("\nmasks:\n ");
495 for(i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
496 printk("%08lx%s", sh
->evtchn_mask
[i
],
497 i
% 8 == 0 ? "\n " : " ");
499 printk("\nunmasked:\n ");
500 for(i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
501 printk("%08lx%s", sh
->evtchn_pending
[i
] & ~sh
->evtchn_mask
[i
],
502 i
% 8 == 0 ? "\n " : " ");
504 printk("\npending list:\n");
505 for(i
= 0; i
< NR_EVENT_CHANNELS
; i
++) {
506 if (sync_test_bit(i
, sh
->evtchn_pending
)) {
507 printk(" %d: event %d -> irq %d\n",
513 spin_unlock_irqrestore(&debug_lock
, flags
);
520 * Search the CPUs pending events bitmasks. For each one found, map
521 * the event number to an irq, and feed it into do_IRQ() for
524 * Xen uses a two-level bitmap to speed searching. The first level is
525 * a bitset of words which contain pending event bits. The second
526 * level is a bitset of pending events themselves.
528 void xen_evtchn_do_upcall(struct pt_regs
*regs
)
531 struct shared_info
*s
= HYPERVISOR_shared_info
;
532 struct vcpu_info
*vcpu_info
= __get_cpu_var(xen_vcpu
);
533 static DEFINE_PER_CPU(unsigned, nesting_count
);
537 unsigned long pending_words
;
539 vcpu_info
->evtchn_upcall_pending
= 0;
541 if (__get_cpu_var(nesting_count
)++)
544 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
545 /* Clear master flag /before/ clearing selector flag. */
548 pending_words
= xchg(&vcpu_info
->evtchn_pending_sel
, 0);
549 while (pending_words
!= 0) {
550 unsigned long pending_bits
;
551 int word_idx
= __ffs(pending_words
);
552 pending_words
&= ~(1UL << word_idx
);
554 while ((pending_bits
= active_evtchns(cpu
, s
, word_idx
)) != 0) {
555 int bit_idx
= __ffs(pending_bits
);
556 int port
= (word_idx
* BITS_PER_LONG
) + bit_idx
;
557 int irq
= evtchn_to_irq
[port
];
560 xen_do_IRQ(irq
, regs
);
564 BUG_ON(!irqs_disabled());
566 count
= __get_cpu_var(nesting_count
);
567 __get_cpu_var(nesting_count
) = 0;
574 /* Rebind a new event channel to an existing irq. */
575 void rebind_evtchn_irq(int evtchn
, int irq
)
577 /* Make sure the irq is masked, since the new event channel
578 will also be masked. */
581 spin_lock(&irq_mapping_update_lock
);
583 /* After resume the irq<->evtchn mappings are all cleared out */
584 BUG_ON(evtchn_to_irq
[evtchn
] != -1);
585 /* Expect irq to have been bound before,
586 so the bindcount should be non-0 */
587 BUG_ON(irq_bindcount
[irq
] == 0);
589 evtchn_to_irq
[evtchn
] = irq
;
590 irq_info
[irq
] = mk_irq_info(IRQT_EVTCHN
, 0, evtchn
);
592 spin_unlock(&irq_mapping_update_lock
);
594 /* new event channels are always bound to cpu 0 */
595 irq_set_affinity(irq
, cpumask_of(0));
597 /* Unmask the event channel. */
601 /* Rebind an evtchn so that it gets delivered to a specific cpu */
602 static void rebind_irq_to_cpu(unsigned irq
, unsigned tcpu
)
604 struct evtchn_bind_vcpu bind_vcpu
;
605 int evtchn
= evtchn_from_irq(irq
);
607 if (!VALID_EVTCHN(evtchn
))
610 /* Send future instances of this interrupt to other vcpu. */
611 bind_vcpu
.port
= evtchn
;
612 bind_vcpu
.vcpu
= tcpu
;
615 * If this fails, it usually just indicates that we're dealing with a
616 * virq or IPI channel, which don't actually need to be rebound. Ignore
617 * it, but don't do the xenlinux-level rebind in that case.
619 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu
, &bind_vcpu
) >= 0)
620 bind_evtchn_to_cpu(evtchn
, tcpu
);
624 static void set_affinity_irq(unsigned irq
, const struct cpumask
*dest
)
626 unsigned tcpu
= cpumask_first(dest
);
627 rebind_irq_to_cpu(irq
, tcpu
);
630 int resend_irq_on_evtchn(unsigned int irq
)
632 int masked
, evtchn
= evtchn_from_irq(irq
);
633 struct shared_info
*s
= HYPERVISOR_shared_info
;
635 if (!VALID_EVTCHN(evtchn
))
638 masked
= sync_test_and_set_bit(evtchn
, s
->evtchn_mask
);
639 sync_set_bit(evtchn
, s
->evtchn_pending
);
641 unmask_evtchn(evtchn
);
646 static void enable_dynirq(unsigned int irq
)
648 int evtchn
= evtchn_from_irq(irq
);
650 if (VALID_EVTCHN(evtchn
))
651 unmask_evtchn(evtchn
);
654 static void disable_dynirq(unsigned int irq
)
656 int evtchn
= evtchn_from_irq(irq
);
658 if (VALID_EVTCHN(evtchn
))
662 static void ack_dynirq(unsigned int irq
)
664 int evtchn
= evtchn_from_irq(irq
);
666 move_native_irq(irq
);
668 if (VALID_EVTCHN(evtchn
))
669 clear_evtchn(evtchn
);
672 static int retrigger_dynirq(unsigned int irq
)
674 int evtchn
= evtchn_from_irq(irq
);
675 struct shared_info
*sh
= HYPERVISOR_shared_info
;
678 if (VALID_EVTCHN(evtchn
)) {
681 masked
= sync_test_and_set_bit(evtchn
, sh
->evtchn_mask
);
682 sync_set_bit(evtchn
, sh
->evtchn_pending
);
684 unmask_evtchn(evtchn
);
691 static void restore_cpu_virqs(unsigned int cpu
)
693 struct evtchn_bind_virq bind_virq
;
694 int virq
, irq
, evtchn
;
696 for (virq
= 0; virq
< NR_VIRQS
; virq
++) {
697 if ((irq
= per_cpu(virq_to_irq
, cpu
)[virq
]) == -1)
700 BUG_ON(irq_info
[irq
].type
!= IRQT_VIRQ
);
701 BUG_ON(irq_info
[irq
].index
!= virq
);
703 /* Get a new binding from Xen. */
704 bind_virq
.virq
= virq
;
705 bind_virq
.vcpu
= cpu
;
706 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
709 evtchn
= bind_virq
.port
;
711 /* Record the new mapping. */
712 evtchn_to_irq
[evtchn
] = irq
;
713 irq_info
[irq
] = mk_irq_info(IRQT_VIRQ
, virq
, evtchn
);
714 bind_evtchn_to_cpu(evtchn
, cpu
);
717 unmask_evtchn(evtchn
);
721 static void restore_cpu_ipis(unsigned int cpu
)
723 struct evtchn_bind_ipi bind_ipi
;
724 int ipi
, irq
, evtchn
;
726 for (ipi
= 0; ipi
< XEN_NR_IPIS
; ipi
++) {
727 if ((irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
]) == -1)
730 BUG_ON(irq_info
[irq
].type
!= IRQT_IPI
);
731 BUG_ON(irq_info
[irq
].index
!= ipi
);
733 /* Get a new binding from Xen. */
735 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
738 evtchn
= bind_ipi
.port
;
740 /* Record the new mapping. */
741 evtchn_to_irq
[evtchn
] = irq
;
742 irq_info
[irq
] = mk_irq_info(IRQT_IPI
, ipi
, evtchn
);
743 bind_evtchn_to_cpu(evtchn
, cpu
);
746 unmask_evtchn(evtchn
);
751 /* Clear an irq's pending state, in preparation for polling on it */
752 void xen_clear_irq_pending(int irq
)
754 int evtchn
= evtchn_from_irq(irq
);
756 if (VALID_EVTCHN(evtchn
))
757 clear_evtchn(evtchn
);
760 void xen_set_irq_pending(int irq
)
762 int evtchn
= evtchn_from_irq(irq
);
764 if (VALID_EVTCHN(evtchn
))
768 bool xen_test_irq_pending(int irq
)
770 int evtchn
= evtchn_from_irq(irq
);
773 if (VALID_EVTCHN(evtchn
))
774 ret
= test_evtchn(evtchn
);
779 /* Poll waiting for an irq to become pending. In the usual case, the
780 irq will be disabled so it won't deliver an interrupt. */
781 void xen_poll_irq(int irq
)
783 evtchn_port_t evtchn
= evtchn_from_irq(irq
);
785 if (VALID_EVTCHN(evtchn
)) {
786 struct sched_poll poll
;
790 set_xen_guest_handle(poll
.ports
, &evtchn
);
792 if (HYPERVISOR_sched_op(SCHEDOP_poll
, &poll
) != 0)
797 void xen_irq_resume(void)
799 unsigned int cpu
, irq
, evtchn
;
801 init_evtchn_cpu_bindings();
803 /* New event-channel space is not 'live' yet. */
804 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
807 /* No IRQ <-> event-channel mappings. */
808 for (irq
= 0; irq
< nr_irqs
; irq
++)
809 irq_info
[irq
].evtchn
= 0; /* zap event-channel binding */
811 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
812 evtchn_to_irq
[evtchn
] = -1;
814 for_each_possible_cpu(cpu
) {
815 restore_cpu_virqs(cpu
);
816 restore_cpu_ipis(cpu
);
820 static struct irq_chip xen_dynamic_chip __read_mostly
= {
822 .mask
= disable_dynirq
,
823 .unmask
= enable_dynirq
,
825 .set_affinity
= set_affinity_irq
,
826 .retrigger
= retrigger_dynirq
,
829 void __init
xen_init_IRQ(void)
832 size_t size
= nr_cpu_ids
* sizeof(struct cpu_evtchn_s
);
834 cpu_evtchn_mask_p
= kmalloc(size
, GFP_KERNEL
);
835 BUG_ON(cpu_evtchn_mask
== NULL
);
837 init_evtchn_cpu_bindings();
839 /* No event channels are 'live' right now. */
840 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
843 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
844 for (i
= 0; i
< nr_irqs
; i
++)
845 irq_bindcount
[i
] = 0;
847 irq_ctx_init(smp_processor_id());