thinkpad-acpi: document backlight level writeback at driver init
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / xen / events.c
blob28f133ae76add51719aadbfc1b1f76f3da82bf27
1 /*
2 * Xen event channels
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
12 * channel:
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
32 #include <asm/ptrace.h>
33 #include <asm/irq.h>
34 #include <asm/idle.h>
35 #include <asm/sync_bitops.h>
36 #include <asm/xen/hypercall.h>
37 #include <asm/xen/hypervisor.h>
39 #include <xen/xen-ops.h>
40 #include <xen/events.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/event_channel.h>
45 * This lock protects updates to the following mapping and reference-count
46 * arrays. The lock does not need to be acquired to read the mapping tables.
48 static DEFINE_SPINLOCK(irq_mapping_update_lock);
50 /* IRQ <-> VIRQ mapping. */
51 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
53 /* IRQ <-> IPI mapping */
54 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
56 /* Interrupt types. */
57 enum xen_irq_type {
58 IRQT_UNBOUND = 0,
59 IRQT_PIRQ,
60 IRQT_VIRQ,
61 IRQT_IPI,
62 IRQT_EVTCHN
66 * Packed IRQ information:
67 * type - enum xen_irq_type
68 * event channel - irq->event channel mapping
69 * cpu - cpu this event channel is bound to
70 * index - type-specific information:
71 * PIRQ - vector, with MSB being "needs EIO"
72 * VIRQ - virq number
73 * IPI - IPI vector
74 * EVTCHN -
76 struct irq_info
78 enum xen_irq_type type; /* type */
79 unsigned short evtchn; /* event channel */
80 unsigned short cpu; /* cpu bound */
82 union {
83 unsigned short virq;
84 enum ipi_vector ipi;
85 struct {
86 unsigned short gsi;
87 unsigned short vector;
88 } pirq;
89 } u;
92 static struct irq_info irq_info[NR_IRQS];
94 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
95 [0 ... NR_EVENT_CHANNELS-1] = -1
97 struct cpu_evtchn_s {
98 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
100 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
101 static inline unsigned long *cpu_evtchn_mask(int cpu)
103 return cpu_evtchn_mask_p[cpu].bits;
106 /* Xen will never allocate port zero for any purpose. */
107 #define VALID_EVTCHN(chn) ((chn) != 0)
109 static struct irq_chip xen_dynamic_chip;
111 /* Constructor for packed IRQ information. */
112 static struct irq_info mk_unbound_info(void)
114 return (struct irq_info) { .type = IRQT_UNBOUND };
117 static struct irq_info mk_evtchn_info(unsigned short evtchn)
119 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
120 .cpu = 0 };
123 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
125 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
126 .cpu = 0, .u.ipi = ipi };
129 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
131 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
132 .cpu = 0, .u.virq = virq };
135 static struct irq_info mk_pirq_info(unsigned short evtchn,
136 unsigned short gsi, unsigned short vector)
138 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
139 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
143 * Accessors for packed IRQ information.
145 static struct irq_info *info_for_irq(unsigned irq)
147 return &irq_info[irq];
150 static unsigned int evtchn_from_irq(unsigned irq)
152 return info_for_irq(irq)->evtchn;
155 unsigned irq_from_evtchn(unsigned int evtchn)
157 return evtchn_to_irq[evtchn];
159 EXPORT_SYMBOL_GPL(irq_from_evtchn);
161 static enum ipi_vector ipi_from_irq(unsigned irq)
163 struct irq_info *info = info_for_irq(irq);
165 BUG_ON(info == NULL);
166 BUG_ON(info->type != IRQT_IPI);
168 return info->u.ipi;
171 static unsigned virq_from_irq(unsigned irq)
173 struct irq_info *info = info_for_irq(irq);
175 BUG_ON(info == NULL);
176 BUG_ON(info->type != IRQT_VIRQ);
178 return info->u.virq;
181 static unsigned gsi_from_irq(unsigned irq)
183 struct irq_info *info = info_for_irq(irq);
185 BUG_ON(info == NULL);
186 BUG_ON(info->type != IRQT_PIRQ);
188 return info->u.pirq.gsi;
191 static unsigned vector_from_irq(unsigned irq)
193 struct irq_info *info = info_for_irq(irq);
195 BUG_ON(info == NULL);
196 BUG_ON(info->type != IRQT_PIRQ);
198 return info->u.pirq.vector;
201 static enum xen_irq_type type_from_irq(unsigned irq)
203 return info_for_irq(irq)->type;
206 static unsigned cpu_from_irq(unsigned irq)
208 return info_for_irq(irq)->cpu;
211 static unsigned int cpu_from_evtchn(unsigned int evtchn)
213 int irq = evtchn_to_irq[evtchn];
214 unsigned ret = 0;
216 if (irq != -1)
217 ret = cpu_from_irq(irq);
219 return ret;
222 static inline unsigned long active_evtchns(unsigned int cpu,
223 struct shared_info *sh,
224 unsigned int idx)
226 return (sh->evtchn_pending[idx] &
227 cpu_evtchn_mask(cpu)[idx] &
228 ~sh->evtchn_mask[idx]);
231 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
233 int irq = evtchn_to_irq[chn];
235 BUG_ON(irq == -1);
236 #ifdef CONFIG_SMP
237 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
238 #endif
240 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
241 __set_bit(chn, cpu_evtchn_mask(cpu));
243 irq_info[irq].cpu = cpu;
246 static void init_evtchn_cpu_bindings(void)
248 #ifdef CONFIG_SMP
249 struct irq_desc *desc;
250 int i;
252 /* By default all event channels notify CPU#0. */
253 for_each_irq_desc(i, desc) {
254 cpumask_copy(desc->affinity, cpumask_of(0));
256 #endif
258 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
261 static inline void clear_evtchn(int port)
263 struct shared_info *s = HYPERVISOR_shared_info;
264 sync_clear_bit(port, &s->evtchn_pending[0]);
267 static inline void set_evtchn(int port)
269 struct shared_info *s = HYPERVISOR_shared_info;
270 sync_set_bit(port, &s->evtchn_pending[0]);
273 static inline int test_evtchn(int port)
275 struct shared_info *s = HYPERVISOR_shared_info;
276 return sync_test_bit(port, &s->evtchn_pending[0]);
281 * notify_remote_via_irq - send event to remote end of event channel via irq
282 * @irq: irq of event channel to send event to
284 * Unlike notify_remote_via_evtchn(), this is safe to use across
285 * save/restore. Notifications on a broken connection are silently
286 * dropped.
288 void notify_remote_via_irq(int irq)
290 int evtchn = evtchn_from_irq(irq);
292 if (VALID_EVTCHN(evtchn))
293 notify_remote_via_evtchn(evtchn);
295 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
297 static void mask_evtchn(int port)
299 struct shared_info *s = HYPERVISOR_shared_info;
300 sync_set_bit(port, &s->evtchn_mask[0]);
303 static void unmask_evtchn(int port)
305 struct shared_info *s = HYPERVISOR_shared_info;
306 unsigned int cpu = get_cpu();
308 BUG_ON(!irqs_disabled());
310 /* Slow path (hypercall) if this is a non-local port. */
311 if (unlikely(cpu != cpu_from_evtchn(port))) {
312 struct evtchn_unmask unmask = { .port = port };
313 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
314 } else {
315 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
317 sync_clear_bit(port, &s->evtchn_mask[0]);
320 * The following is basically the equivalent of
321 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
322 * the interrupt edge' if the channel is masked.
324 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
325 !sync_test_and_set_bit(port / BITS_PER_LONG,
326 &vcpu_info->evtchn_pending_sel))
327 vcpu_info->evtchn_upcall_pending = 1;
330 put_cpu();
333 static int find_unbound_irq(void)
335 int irq;
336 struct irq_desc *desc;
338 for (irq = 0; irq < nr_irqs; irq++)
339 if (irq_info[irq].type == IRQT_UNBOUND)
340 break;
342 if (irq == nr_irqs)
343 panic("No available IRQ to bind to: increase nr_irqs!\n");
345 desc = irq_to_desc_alloc_node(irq, 0);
346 if (WARN_ON(desc == NULL))
347 return -1;
349 dynamic_irq_init(irq);
351 return irq;
354 int bind_evtchn_to_irq(unsigned int evtchn)
356 int irq;
358 spin_lock(&irq_mapping_update_lock);
360 irq = evtchn_to_irq[evtchn];
362 if (irq == -1) {
363 irq = find_unbound_irq();
365 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
366 handle_level_irq, "event");
368 evtchn_to_irq[evtchn] = irq;
369 irq_info[irq] = mk_evtchn_info(evtchn);
372 spin_unlock(&irq_mapping_update_lock);
374 return irq;
376 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
378 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
380 struct evtchn_bind_ipi bind_ipi;
381 int evtchn, irq;
383 spin_lock(&irq_mapping_update_lock);
385 irq = per_cpu(ipi_to_irq, cpu)[ipi];
387 if (irq == -1) {
388 irq = find_unbound_irq();
389 if (irq < 0)
390 goto out;
392 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
393 handle_level_irq, "ipi");
395 bind_ipi.vcpu = cpu;
396 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
397 &bind_ipi) != 0)
398 BUG();
399 evtchn = bind_ipi.port;
401 evtchn_to_irq[evtchn] = irq;
402 irq_info[irq] = mk_ipi_info(evtchn, ipi);
403 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
405 bind_evtchn_to_cpu(evtchn, cpu);
408 out:
409 spin_unlock(&irq_mapping_update_lock);
410 return irq;
414 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
416 struct evtchn_bind_virq bind_virq;
417 int evtchn, irq;
419 spin_lock(&irq_mapping_update_lock);
421 irq = per_cpu(virq_to_irq, cpu)[virq];
423 if (irq == -1) {
424 bind_virq.virq = virq;
425 bind_virq.vcpu = cpu;
426 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
427 &bind_virq) != 0)
428 BUG();
429 evtchn = bind_virq.port;
431 irq = find_unbound_irq();
433 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
434 handle_level_irq, "virq");
436 evtchn_to_irq[evtchn] = irq;
437 irq_info[irq] = mk_virq_info(evtchn, virq);
439 per_cpu(virq_to_irq, cpu)[virq] = irq;
441 bind_evtchn_to_cpu(evtchn, cpu);
444 spin_unlock(&irq_mapping_update_lock);
446 return irq;
449 static void unbind_from_irq(unsigned int irq)
451 struct evtchn_close close;
452 int evtchn = evtchn_from_irq(irq);
454 spin_lock(&irq_mapping_update_lock);
456 if (VALID_EVTCHN(evtchn)) {
457 close.port = evtchn;
458 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
459 BUG();
461 switch (type_from_irq(irq)) {
462 case IRQT_VIRQ:
463 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
464 [virq_from_irq(irq)] = -1;
465 break;
466 case IRQT_IPI:
467 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
468 [ipi_from_irq(irq)] = -1;
469 break;
470 default:
471 break;
474 /* Closed ports are implicitly re-bound to VCPU0. */
475 bind_evtchn_to_cpu(evtchn, 0);
477 evtchn_to_irq[evtchn] = -1;
480 if (irq_info[irq].type != IRQT_UNBOUND) {
481 irq_info[irq] = mk_unbound_info();
483 dynamic_irq_cleanup(irq);
486 spin_unlock(&irq_mapping_update_lock);
489 int bind_evtchn_to_irqhandler(unsigned int evtchn,
490 irq_handler_t handler,
491 unsigned long irqflags,
492 const char *devname, void *dev_id)
494 unsigned int irq;
495 int retval;
497 irq = bind_evtchn_to_irq(evtchn);
498 retval = request_irq(irq, handler, irqflags, devname, dev_id);
499 if (retval != 0) {
500 unbind_from_irq(irq);
501 return retval;
504 return irq;
506 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
508 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
509 irq_handler_t handler,
510 unsigned long irqflags, const char *devname, void *dev_id)
512 unsigned int irq;
513 int retval;
515 irq = bind_virq_to_irq(virq, cpu);
516 retval = request_irq(irq, handler, irqflags, devname, dev_id);
517 if (retval != 0) {
518 unbind_from_irq(irq);
519 return retval;
522 return irq;
524 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
526 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
527 unsigned int cpu,
528 irq_handler_t handler,
529 unsigned long irqflags,
530 const char *devname,
531 void *dev_id)
533 int irq, retval;
535 irq = bind_ipi_to_irq(ipi, cpu);
536 if (irq < 0)
537 return irq;
539 irqflags |= IRQF_NO_SUSPEND;
540 retval = request_irq(irq, handler, irqflags, devname, dev_id);
541 if (retval != 0) {
542 unbind_from_irq(irq);
543 return retval;
546 return irq;
549 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
551 free_irq(irq, dev_id);
552 unbind_from_irq(irq);
554 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
556 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
558 int irq = per_cpu(ipi_to_irq, cpu)[vector];
559 BUG_ON(irq < 0);
560 notify_remote_via_irq(irq);
563 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
565 struct shared_info *sh = HYPERVISOR_shared_info;
566 int cpu = smp_processor_id();
567 int i;
568 unsigned long flags;
569 static DEFINE_SPINLOCK(debug_lock);
571 spin_lock_irqsave(&debug_lock, flags);
573 printk("vcpu %d\n ", cpu);
575 for_each_online_cpu(i) {
576 struct vcpu_info *v = per_cpu(xen_vcpu, i);
577 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
578 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
579 v->evtchn_upcall_pending,
580 v->evtchn_pending_sel);
582 printk("pending:\n ");
583 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
584 printk("%08lx%s", sh->evtchn_pending[i],
585 i % 8 == 0 ? "\n " : " ");
586 printk("\nmasks:\n ");
587 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
588 printk("%08lx%s", sh->evtchn_mask[i],
589 i % 8 == 0 ? "\n " : " ");
591 printk("\nunmasked:\n ");
592 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
593 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
594 i % 8 == 0 ? "\n " : " ");
596 printk("\npending list:\n");
597 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
598 if (sync_test_bit(i, sh->evtchn_pending)) {
599 printk(" %d: event %d -> irq %d\n",
600 cpu_from_evtchn(i), i,
601 evtchn_to_irq[i]);
605 spin_unlock_irqrestore(&debug_lock, flags);
607 return IRQ_HANDLED;
610 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
613 * Search the CPUs pending events bitmasks. For each one found, map
614 * the event number to an irq, and feed it into do_IRQ() for
615 * handling.
617 * Xen uses a two-level bitmap to speed searching. The first level is
618 * a bitset of words which contain pending event bits. The second
619 * level is a bitset of pending events themselves.
621 void xen_evtchn_do_upcall(struct pt_regs *regs)
623 int cpu = get_cpu();
624 struct pt_regs *old_regs = set_irq_regs(regs);
625 struct shared_info *s = HYPERVISOR_shared_info;
626 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
627 unsigned count;
629 exit_idle();
630 irq_enter();
632 do {
633 unsigned long pending_words;
635 vcpu_info->evtchn_upcall_pending = 0;
637 if (__get_cpu_var(xed_nesting_count)++)
638 goto out;
640 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
641 /* Clear master flag /before/ clearing selector flag. */
642 wmb();
643 #endif
644 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
645 while (pending_words != 0) {
646 unsigned long pending_bits;
647 int word_idx = __ffs(pending_words);
648 pending_words &= ~(1UL << word_idx);
650 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
651 int bit_idx = __ffs(pending_bits);
652 int port = (word_idx * BITS_PER_LONG) + bit_idx;
653 int irq = evtchn_to_irq[port];
654 struct irq_desc *desc;
656 if (irq != -1) {
657 desc = irq_to_desc(irq);
658 if (desc)
659 generic_handle_irq_desc(irq, desc);
664 BUG_ON(!irqs_disabled());
666 count = __get_cpu_var(xed_nesting_count);
667 __get_cpu_var(xed_nesting_count) = 0;
668 } while(count != 1);
670 out:
671 irq_exit();
672 set_irq_regs(old_regs);
674 put_cpu();
677 /* Rebind a new event channel to an existing irq. */
678 void rebind_evtchn_irq(int evtchn, int irq)
680 struct irq_info *info = info_for_irq(irq);
682 /* Make sure the irq is masked, since the new event channel
683 will also be masked. */
684 disable_irq(irq);
686 spin_lock(&irq_mapping_update_lock);
688 /* After resume the irq<->evtchn mappings are all cleared out */
689 BUG_ON(evtchn_to_irq[evtchn] != -1);
690 /* Expect irq to have been bound before,
691 so there should be a proper type */
692 BUG_ON(info->type == IRQT_UNBOUND);
694 evtchn_to_irq[evtchn] = irq;
695 irq_info[irq] = mk_evtchn_info(evtchn);
697 spin_unlock(&irq_mapping_update_lock);
699 /* new event channels are always bound to cpu 0 */
700 irq_set_affinity(irq, cpumask_of(0));
702 /* Unmask the event channel. */
703 enable_irq(irq);
706 /* Rebind an evtchn so that it gets delivered to a specific cpu */
707 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
709 struct evtchn_bind_vcpu bind_vcpu;
710 int evtchn = evtchn_from_irq(irq);
712 if (!VALID_EVTCHN(evtchn))
713 return -1;
715 /* Send future instances of this interrupt to other vcpu. */
716 bind_vcpu.port = evtchn;
717 bind_vcpu.vcpu = tcpu;
720 * If this fails, it usually just indicates that we're dealing with a
721 * virq or IPI channel, which don't actually need to be rebound. Ignore
722 * it, but don't do the xenlinux-level rebind in that case.
724 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
725 bind_evtchn_to_cpu(evtchn, tcpu);
727 return 0;
730 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
732 unsigned tcpu = cpumask_first(dest);
734 return rebind_irq_to_cpu(irq, tcpu);
737 int resend_irq_on_evtchn(unsigned int irq)
739 int masked, evtchn = evtchn_from_irq(irq);
740 struct shared_info *s = HYPERVISOR_shared_info;
742 if (!VALID_EVTCHN(evtchn))
743 return 1;
745 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
746 sync_set_bit(evtchn, s->evtchn_pending);
747 if (!masked)
748 unmask_evtchn(evtchn);
750 return 1;
753 static void enable_dynirq(unsigned int irq)
755 int evtchn = evtchn_from_irq(irq);
757 if (VALID_EVTCHN(evtchn))
758 unmask_evtchn(evtchn);
761 static void disable_dynirq(unsigned int irq)
763 int evtchn = evtchn_from_irq(irq);
765 if (VALID_EVTCHN(evtchn))
766 mask_evtchn(evtchn);
769 static void ack_dynirq(unsigned int irq)
771 int evtchn = evtchn_from_irq(irq);
773 move_native_irq(irq);
775 if (VALID_EVTCHN(evtchn))
776 clear_evtchn(evtchn);
779 static int retrigger_dynirq(unsigned int irq)
781 int evtchn = evtchn_from_irq(irq);
782 struct shared_info *sh = HYPERVISOR_shared_info;
783 int ret = 0;
785 if (VALID_EVTCHN(evtchn)) {
786 int masked;
788 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
789 sync_set_bit(evtchn, sh->evtchn_pending);
790 if (!masked)
791 unmask_evtchn(evtchn);
792 ret = 1;
795 return ret;
798 static void restore_cpu_virqs(unsigned int cpu)
800 struct evtchn_bind_virq bind_virq;
801 int virq, irq, evtchn;
803 for (virq = 0; virq < NR_VIRQS; virq++) {
804 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
805 continue;
807 BUG_ON(virq_from_irq(irq) != virq);
809 /* Get a new binding from Xen. */
810 bind_virq.virq = virq;
811 bind_virq.vcpu = cpu;
812 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
813 &bind_virq) != 0)
814 BUG();
815 evtchn = bind_virq.port;
817 /* Record the new mapping. */
818 evtchn_to_irq[evtchn] = irq;
819 irq_info[irq] = mk_virq_info(evtchn, virq);
820 bind_evtchn_to_cpu(evtchn, cpu);
822 /* Ready for use. */
823 unmask_evtchn(evtchn);
827 static void restore_cpu_ipis(unsigned int cpu)
829 struct evtchn_bind_ipi bind_ipi;
830 int ipi, irq, evtchn;
832 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
833 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
834 continue;
836 BUG_ON(ipi_from_irq(irq) != ipi);
838 /* Get a new binding from Xen. */
839 bind_ipi.vcpu = cpu;
840 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
841 &bind_ipi) != 0)
842 BUG();
843 evtchn = bind_ipi.port;
845 /* Record the new mapping. */
846 evtchn_to_irq[evtchn] = irq;
847 irq_info[irq] = mk_ipi_info(evtchn, ipi);
848 bind_evtchn_to_cpu(evtchn, cpu);
850 /* Ready for use. */
851 unmask_evtchn(evtchn);
856 /* Clear an irq's pending state, in preparation for polling on it */
857 void xen_clear_irq_pending(int irq)
859 int evtchn = evtchn_from_irq(irq);
861 if (VALID_EVTCHN(evtchn))
862 clear_evtchn(evtchn);
865 void xen_set_irq_pending(int irq)
867 int evtchn = evtchn_from_irq(irq);
869 if (VALID_EVTCHN(evtchn))
870 set_evtchn(evtchn);
873 bool xen_test_irq_pending(int irq)
875 int evtchn = evtchn_from_irq(irq);
876 bool ret = false;
878 if (VALID_EVTCHN(evtchn))
879 ret = test_evtchn(evtchn);
881 return ret;
884 /* Poll waiting for an irq to become pending. In the usual case, the
885 irq will be disabled so it won't deliver an interrupt. */
886 void xen_poll_irq(int irq)
888 evtchn_port_t evtchn = evtchn_from_irq(irq);
890 if (VALID_EVTCHN(evtchn)) {
891 struct sched_poll poll;
893 poll.nr_ports = 1;
894 poll.timeout = 0;
895 set_xen_guest_handle(poll.ports, &evtchn);
897 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
898 BUG();
902 void xen_irq_resume(void)
904 unsigned int cpu, irq, evtchn;
906 init_evtchn_cpu_bindings();
908 /* New event-channel space is not 'live' yet. */
909 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
910 mask_evtchn(evtchn);
912 /* No IRQ <-> event-channel mappings. */
913 for (irq = 0; irq < nr_irqs; irq++)
914 irq_info[irq].evtchn = 0; /* zap event-channel binding */
916 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
917 evtchn_to_irq[evtchn] = -1;
919 for_each_possible_cpu(cpu) {
920 restore_cpu_virqs(cpu);
921 restore_cpu_ipis(cpu);
925 static struct irq_chip xen_dynamic_chip __read_mostly = {
926 .name = "xen-dyn",
928 .disable = disable_dynirq,
929 .mask = disable_dynirq,
930 .unmask = enable_dynirq,
932 .ack = ack_dynirq,
933 .set_affinity = set_affinity_irq,
934 .retrigger = retrigger_dynirq,
937 void __init xen_init_IRQ(void)
939 int i;
941 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
942 GFP_KERNEL);
943 BUG_ON(cpu_evtchn_mask_p == NULL);
945 init_evtchn_cpu_bindings();
947 /* No event channels are 'live' right now. */
948 for (i = 0; i < NR_EVENT_CHANNELS; i++)
949 mask_evtchn(i);
951 irq_ctx_init(smp_processor_id());