1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/linkage.h>
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/bootmem.h>
24 #include <linux/irq.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
32 #include <asm/iommu.h>
34 #include <asm/oplib.h>
36 #include <asm/timer.h>
38 #include <asm/starfire.h>
39 #include <asm/uaccess.h>
40 #include <asm/cache.h>
41 #include <asm/cpudata.h>
42 #include <asm/auxio.h>
44 #include <asm/hypervisor.h>
45 #include <asm/cacheflush.h>
49 #define NUM_IVECS (IMAP_INR + 1)
51 struct ino_bucket
*ivector_table
;
52 unsigned long ivector_table_pa
;
54 /* On several sun4u processors, it is illegal to mix bypass and
55 * non-bypass accesses. Therefore we access all INO buckets
56 * using bypass accesses only.
58 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa
)
62 __asm__
__volatile__("ldxa [%1] %2, %0"
65 offsetof(struct ino_bucket
,
67 "i" (ASI_PHYS_USE_EC
));
72 static void bucket_clear_chain_pa(unsigned long bucket_pa
)
74 __asm__
__volatile__("stxa %%g0, [%0] %1"
77 offsetof(struct ino_bucket
,
79 "i" (ASI_PHYS_USE_EC
));
82 static unsigned int bucket_get_virt_irq(unsigned long bucket_pa
)
86 __asm__
__volatile__("lduwa [%1] %2, %0"
89 offsetof(struct ino_bucket
,
91 "i" (ASI_PHYS_USE_EC
));
96 static void bucket_set_virt_irq(unsigned long bucket_pa
,
97 unsigned int virt_irq
)
99 __asm__
__volatile__("stwa %0, [%1] %2"
103 offsetof(struct ino_bucket
,
105 "i" (ASI_PHYS_USE_EC
));
108 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
111 unsigned int dev_handle
;
112 unsigned int dev_ino
;
114 } virt_irq_table
[NR_IRQS
];
115 static DEFINE_SPINLOCK(virt_irq_alloc_lock
);
117 unsigned char virt_irq_alloc(unsigned int dev_handle
,
118 unsigned int dev_ino
)
123 BUILD_BUG_ON(NR_IRQS
>= 256);
125 spin_lock_irqsave(&virt_irq_alloc_lock
, flags
);
127 for (ent
= 1; ent
< NR_IRQS
; ent
++) {
128 if (!virt_irq_table
[ent
].in_use
)
131 if (ent
>= NR_IRQS
) {
132 printk(KERN_ERR
"IRQ: Out of virtual IRQs.\n");
135 virt_irq_table
[ent
].dev_handle
= dev_handle
;
136 virt_irq_table
[ent
].dev_ino
= dev_ino
;
137 virt_irq_table
[ent
].in_use
= 1;
140 spin_unlock_irqrestore(&virt_irq_alloc_lock
, flags
);
145 #ifdef CONFIG_PCI_MSI
146 void virt_irq_free(unsigned int virt_irq
)
150 if (virt_irq
>= NR_IRQS
)
153 spin_lock_irqsave(&virt_irq_alloc_lock
, flags
);
155 virt_irq_table
[virt_irq
].in_use
= 0;
157 spin_unlock_irqrestore(&virt_irq_alloc_lock
, flags
);
162 * /proc/interrupts printing:
165 int show_interrupts(struct seq_file
*p
, void *v
)
167 int i
= *(loff_t
*) v
, j
;
168 struct irqaction
* action
;
173 for_each_online_cpu(j
)
174 seq_printf(p
, "CPU%d ",j
);
179 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
180 action
= irq_desc
[i
].action
;
183 seq_printf(p
, "%3d: ",i
);
185 seq_printf(p
, "%10u ", kstat_irqs(i
));
187 for_each_online_cpu(j
)
188 seq_printf(p
, "%10u ", kstat_cpu(j
).irqs
[i
]);
190 seq_printf(p
, " %9s", irq_desc
[i
].chip
->typename
);
191 seq_printf(p
, " %s", action
->name
);
193 for (action
=action
->next
; action
; action
= action
->next
)
194 seq_printf(p
, ", %s", action
->name
);
198 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
203 static unsigned int sun4u_compute_tid(unsigned long imap
, unsigned long cpuid
)
207 if (this_is_starfire
) {
208 tid
= starfire_translate(imap
, cpuid
);
209 tid
<<= IMAP_TID_SHIFT
;
212 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
215 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
216 if ((ver
>> 32UL) == __JALAPENO_ID
||
217 (ver
>> 32UL) == __SERRANO_ID
) {
218 tid
= cpuid
<< IMAP_TID_SHIFT
;
219 tid
&= IMAP_TID_JBUS
;
221 unsigned int a
= cpuid
& 0x1f;
222 unsigned int n
= (cpuid
>> 5) & 0x1f;
224 tid
= ((a
<< IMAP_AID_SHIFT
) |
225 (n
<< IMAP_NID_SHIFT
));
226 tid
&= (IMAP_AID_SAFARI
|
230 tid
= cpuid
<< IMAP_TID_SHIFT
;
238 struct irq_handler_data
{
242 void (*pre_handler
)(unsigned int, void *, void *);
248 static int irq_choose_cpu(unsigned int virt_irq
)
250 cpumask_t mask
= irq_desc
[virt_irq
].affinity
;
253 if (cpus_equal(mask
, CPU_MASK_ALL
)) {
254 static int irq_rover
;
255 static DEFINE_SPINLOCK(irq_rover_lock
);
258 /* Round-robin distribution... */
260 spin_lock_irqsave(&irq_rover_lock
, flags
);
262 while (!cpu_online(irq_rover
)) {
263 if (++irq_rover
>= NR_CPUS
)
268 if (++irq_rover
>= NR_CPUS
)
270 } while (!cpu_online(irq_rover
));
272 spin_unlock_irqrestore(&irq_rover_lock
, flags
);
276 cpus_and(tmp
, cpu_online_map
, mask
);
281 cpuid
= first_cpu(tmp
);
287 static int irq_choose_cpu(unsigned int virt_irq
)
289 return real_hard_smp_processor_id();
293 static void sun4u_irq_enable(unsigned int virt_irq
)
295 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
298 unsigned long cpuid
, imap
, val
;
301 cpuid
= irq_choose_cpu(virt_irq
);
304 tid
= sun4u_compute_tid(imap
, cpuid
);
306 val
= upa_readq(imap
);
307 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
308 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
309 val
|= tid
| IMAP_VALID
;
310 upa_writeq(val
, imap
);
311 upa_writeq(ICLR_IDLE
, data
->iclr
);
315 static void sun4u_set_affinity(unsigned int virt_irq
,
316 const struct cpumask
*mask
)
318 sun4u_irq_enable(virt_irq
);
321 static void sun4u_irq_disable(unsigned int virt_irq
)
323 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
326 unsigned long imap
= data
->imap
;
327 unsigned long tmp
= upa_readq(imap
);
330 upa_writeq(tmp
, imap
);
334 static void sun4u_irq_eoi(unsigned int virt_irq
)
336 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
337 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
339 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
343 upa_writeq(ICLR_IDLE
, data
->iclr
);
346 static void sun4v_irq_enable(unsigned int virt_irq
)
348 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
349 unsigned long cpuid
= irq_choose_cpu(virt_irq
);
352 err
= sun4v_intr_settarget(ino
, cpuid
);
354 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
355 "err(%d)\n", ino
, cpuid
, err
);
356 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
358 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
359 "err(%d)\n", ino
, err
);
360 err
= sun4v_intr_setenabled(ino
, HV_INTR_ENABLED
);
362 printk(KERN_ERR
"sun4v_intr_setenabled(%x): err(%d)\n",
366 static void sun4v_set_affinity(unsigned int virt_irq
,
367 const struct cpumask
*mask
)
369 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
370 unsigned long cpuid
= irq_choose_cpu(virt_irq
);
373 err
= sun4v_intr_settarget(ino
, cpuid
);
375 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
376 "err(%d)\n", ino
, cpuid
, err
);
379 static void sun4v_irq_disable(unsigned int virt_irq
)
381 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
384 err
= sun4v_intr_setenabled(ino
, HV_INTR_DISABLED
);
386 printk(KERN_ERR
"sun4v_intr_setenabled(%x): "
387 "err(%d)\n", ino
, err
);
390 static void sun4v_irq_eoi(unsigned int virt_irq
)
392 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
393 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
396 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
399 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
401 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
402 "err(%d)\n", ino
, err
);
405 static void sun4v_virq_enable(unsigned int virt_irq
)
407 unsigned long cpuid
, dev_handle
, dev_ino
;
410 cpuid
= irq_choose_cpu(virt_irq
);
412 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
413 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
415 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
417 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
419 dev_handle
, dev_ino
, cpuid
, err
);
420 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
423 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
424 "HV_INTR_STATE_IDLE): err(%d)\n",
425 dev_handle
, dev_ino
, err
);
426 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
429 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
430 "HV_INTR_ENABLED): err(%d)\n",
431 dev_handle
, dev_ino
, err
);
434 static void sun4v_virt_set_affinity(unsigned int virt_irq
,
435 const struct cpumask
*mask
)
437 unsigned long cpuid
, dev_handle
, dev_ino
;
440 cpuid
= irq_choose_cpu(virt_irq
);
442 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
443 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
445 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
447 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
449 dev_handle
, dev_ino
, cpuid
, err
);
452 static void sun4v_virq_disable(unsigned int virt_irq
)
454 unsigned long dev_handle
, dev_ino
;
457 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
458 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
460 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
463 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
464 "HV_INTR_DISABLED): err(%d)\n",
465 dev_handle
, dev_ino
, err
);
468 static void sun4v_virq_eoi(unsigned int virt_irq
)
470 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
471 unsigned long dev_handle
, dev_ino
;
474 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
477 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
478 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
480 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
483 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
484 "HV_INTR_STATE_IDLE): err(%d)\n",
485 dev_handle
, dev_ino
, err
);
488 static struct irq_chip sun4u_irq
= {
490 .enable
= sun4u_irq_enable
,
491 .disable
= sun4u_irq_disable
,
492 .eoi
= sun4u_irq_eoi
,
493 .set_affinity
= sun4u_set_affinity
,
496 static struct irq_chip sun4v_irq
= {
498 .enable
= sun4v_irq_enable
,
499 .disable
= sun4v_irq_disable
,
500 .eoi
= sun4v_irq_eoi
,
501 .set_affinity
= sun4v_set_affinity
,
504 static struct irq_chip sun4v_virq
= {
505 .typename
= "vsun4v",
506 .enable
= sun4v_virq_enable
,
507 .disable
= sun4v_virq_disable
,
508 .eoi
= sun4v_virq_eoi
,
509 .set_affinity
= sun4v_virt_set_affinity
,
512 static void pre_flow_handler(unsigned int virt_irq
,
513 struct irq_desc
*desc
)
515 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
516 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
518 data
->pre_handler(ino
, data
->arg1
, data
->arg2
);
520 handle_fasteoi_irq(virt_irq
, desc
);
523 void irq_install_pre_handler(int virt_irq
,
524 void (*func
)(unsigned int, void *, void *),
525 void *arg1
, void *arg2
)
527 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
528 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
530 data
->pre_handler
= func
;
534 desc
->handle_irq
= pre_flow_handler
;
537 unsigned int build_irq(int inofixup
, unsigned long iclr
, unsigned long imap
)
539 struct ino_bucket
*bucket
;
540 struct irq_handler_data
*data
;
541 unsigned int virt_irq
;
544 BUG_ON(tlb_type
== hypervisor
);
546 ino
= (upa_readq(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
547 bucket
= &ivector_table
[ino
];
548 virt_irq
= bucket_get_virt_irq(__pa(bucket
));
550 virt_irq
= virt_irq_alloc(0, ino
);
551 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
552 set_irq_chip_and_handler_name(virt_irq
,
558 data
= get_irq_chip_data(virt_irq
);
562 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
563 if (unlikely(!data
)) {
564 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
567 set_irq_chip_data(virt_irq
, data
);
576 static unsigned int sun4v_build_common(unsigned long sysino
,
577 struct irq_chip
*chip
)
579 struct ino_bucket
*bucket
;
580 struct irq_handler_data
*data
;
581 unsigned int virt_irq
;
583 BUG_ON(tlb_type
!= hypervisor
);
585 bucket
= &ivector_table
[sysino
];
586 virt_irq
= bucket_get_virt_irq(__pa(bucket
));
588 virt_irq
= virt_irq_alloc(0, sysino
);
589 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
590 set_irq_chip_and_handler_name(virt_irq
, chip
,
595 data
= get_irq_chip_data(virt_irq
);
599 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
600 if (unlikely(!data
)) {
601 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
604 set_irq_chip_data(virt_irq
, data
);
606 /* Catch accidental accesses to these things. IMAP/ICLR handling
607 * is done by hypervisor calls on sun4v platforms, not by direct
617 unsigned int sun4v_build_irq(u32 devhandle
, unsigned int devino
)
619 unsigned long sysino
= sun4v_devino_to_sysino(devhandle
, devino
);
621 return sun4v_build_common(sysino
, &sun4v_irq
);
624 unsigned int sun4v_build_virq(u32 devhandle
, unsigned int devino
)
626 struct irq_handler_data
*data
;
627 unsigned long hv_err
, cookie
;
628 struct ino_bucket
*bucket
;
629 struct irq_desc
*desc
;
630 unsigned int virt_irq
;
632 bucket
= kzalloc(sizeof(struct ino_bucket
), GFP_ATOMIC
);
633 if (unlikely(!bucket
))
635 __flush_dcache_range((unsigned long) bucket
,
636 ((unsigned long) bucket
+
637 sizeof(struct ino_bucket
)));
639 virt_irq
= virt_irq_alloc(devhandle
, devino
);
640 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
642 set_irq_chip_and_handler_name(virt_irq
, &sun4v_virq
,
646 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
650 /* In order to make the LDC channel startup sequence easier,
651 * especially wrt. locking, we do not let request_irq() enable
654 desc
= irq_desc
+ virt_irq
;
655 desc
->status
|= IRQ_NOAUTOEN
;
657 set_irq_chip_data(virt_irq
, data
);
659 /* Catch accidental accesses to these things. IMAP/ICLR handling
660 * is done by hypervisor calls on sun4v platforms, not by direct
666 cookie
= ~__pa(bucket
);
667 hv_err
= sun4v_vintr_set_cookie(devhandle
, devino
, cookie
);
669 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
670 "err=%lu\n", devhandle
, devino
, hv_err
);
677 void ack_bad_irq(unsigned int virt_irq
)
679 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
684 printk(KERN_CRIT
"Unexpected IRQ from ino[%x] virt_irq[%u]\n",
688 void *hardirq_stack
[NR_CPUS
];
689 void *softirq_stack
[NR_CPUS
];
691 static __attribute__((always_inline
)) void *set_hardirq_stack(void)
693 void *orig_sp
, *sp
= hardirq_stack
[smp_processor_id()];
695 __asm__
__volatile__("mov %%sp, %0" : "=r" (orig_sp
));
697 orig_sp
> (sp
+ THREAD_SIZE
)) {
698 sp
+= THREAD_SIZE
- 192 - STACK_BIAS
;
699 __asm__
__volatile__("mov %0, %%sp" : : "r" (sp
));
704 static __attribute__((always_inline
)) void restore_hardirq_stack(void *orig_sp
)
706 __asm__
__volatile__("mov %0, %%sp" : : "r" (orig_sp
));
709 void handler_irq(int irq
, struct pt_regs
*regs
)
711 unsigned long pstate
, bucket_pa
;
712 struct pt_regs
*old_regs
;
715 clear_softint(1 << irq
);
717 old_regs
= set_irq_regs(regs
);
720 /* Grab an atomic snapshot of the pending IVECs. */
721 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
722 "wrpr %0, %3, %%pstate\n\t"
725 "wrpr %0, 0x0, %%pstate\n\t"
726 : "=&r" (pstate
), "=&r" (bucket_pa
)
727 : "r" (irq_work_pa(smp_processor_id())),
731 orig_sp
= set_hardirq_stack();
734 struct irq_desc
*desc
;
735 unsigned long next_pa
;
736 unsigned int virt_irq
;
738 next_pa
= bucket_get_chain_pa(bucket_pa
);
739 virt_irq
= bucket_get_virt_irq(bucket_pa
);
740 bucket_clear_chain_pa(bucket_pa
);
742 desc
= irq_desc
+ virt_irq
;
744 desc
->handle_irq(virt_irq
, desc
);
749 restore_hardirq_stack(orig_sp
);
752 set_irq_regs(old_regs
);
755 void do_softirq(void)
762 local_irq_save(flags
);
764 if (local_softirq_pending()) {
765 void *orig_sp
, *sp
= softirq_stack
[smp_processor_id()];
767 sp
+= THREAD_SIZE
- 192 - STACK_BIAS
;
769 __asm__
__volatile__("mov %%sp, %0\n\t"
774 __asm__
__volatile__("mov %0, %%sp"
778 local_irq_restore(flags
);
781 static void unhandled_perf_irq(struct pt_regs
*regs
)
783 unsigned long pcr
, pic
;
790 printk(KERN_EMERG
"CPU %d: Got unexpected perf counter IRQ.\n",
792 printk(KERN_EMERG
"CPU %d: PCR[%016lx] PIC[%016lx]\n",
793 smp_processor_id(), pcr
, pic
);
796 /* Almost a direct copy of the powerpc PMC code. */
797 static DEFINE_SPINLOCK(perf_irq_lock
);
798 static void *perf_irq_owner_caller
; /* mostly for debugging */
799 static void (*perf_irq
)(struct pt_regs
*regs
) = unhandled_perf_irq
;
801 /* Invoked from level 15 PIL handler in trap table. */
802 void perfctr_irq(int irq
, struct pt_regs
*regs
)
804 clear_softint(1 << irq
);
808 int register_perfctr_intr(void (*handler
)(struct pt_regs
*))
815 spin_lock(&perf_irq_lock
);
816 if (perf_irq
!= unhandled_perf_irq
) {
817 printk(KERN_WARNING
"register_perfctr_intr: "
818 "perf IRQ busy (reserved by caller %p)\n",
819 perf_irq_owner_caller
);
824 perf_irq_owner_caller
= __builtin_return_address(0);
829 spin_unlock(&perf_irq_lock
);
833 EXPORT_SYMBOL_GPL(register_perfctr_intr
);
835 void release_perfctr_intr(void (*handler
)(struct pt_regs
*))
837 spin_lock(&perf_irq_lock
);
838 perf_irq_owner_caller
= NULL
;
839 perf_irq
= unhandled_perf_irq
;
840 spin_unlock(&perf_irq_lock
);
842 EXPORT_SYMBOL_GPL(release_perfctr_intr
);
844 #ifdef CONFIG_HOTPLUG_CPU
845 void fixup_irqs(void)
849 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
852 spin_lock_irqsave(&irq_desc
[irq
].lock
, flags
);
853 if (irq_desc
[irq
].action
&&
854 !(irq_desc
[irq
].status
& IRQ_PER_CPU
)) {
855 if (irq_desc
[irq
].chip
->set_affinity
)
856 irq_desc
[irq
].chip
->set_affinity(irq
,
857 &irq_desc
[irq
].affinity
);
859 spin_unlock_irqrestore(&irq_desc
[irq
].lock
, flags
);
862 tick_ops
->disable_irq();
873 static struct sun5_timer
*prom_timers
;
874 static u64 prom_limit0
, prom_limit1
;
876 static void map_prom_timers(void)
878 struct device_node
*dp
;
879 const unsigned int *addr
;
881 /* PROM timer node hangs out in the top level of device siblings... */
882 dp
= of_find_node_by_path("/");
885 if (!strcmp(dp
->name
, "counter-timer"))
890 /* Assume if node is not present, PROM uses different tick mechanism
891 * which we should not care about.
894 prom_timers
= (struct sun5_timer
*) 0;
898 /* If PROM is really using this, it must be mapped by him. */
899 addr
= of_get_property(dp
, "address", NULL
);
901 prom_printf("PROM does not have timer mapped, trying to continue.\n");
902 prom_timers
= (struct sun5_timer
*) 0;
905 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
908 static void kill_prom_timer(void)
913 /* Save them away for later. */
914 prom_limit0
= prom_timers
->limit0
;
915 prom_limit1
= prom_timers
->limit1
;
917 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
918 * We turn both off here just to be paranoid.
920 prom_timers
->limit0
= 0;
921 prom_timers
->limit1
= 0;
923 /* Wheee, eat the interrupt packet too... */
924 __asm__
__volatile__(
926 " ldxa [%%g0] %0, %%g1\n"
927 " ldxa [%%g2] %1, %%g1\n"
928 " stxa %%g0, [%%g0] %0\n"
931 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
935 void notrace
init_irqwork_curcpu(void)
937 int cpu
= hard_smp_processor_id();
939 trap_block
[cpu
].irq_worklist_pa
= 0UL;
942 /* Please be very careful with register_one_mondo() and
943 * sun4v_register_mondo_queues().
945 * On SMP this gets invoked from the CPU trampoline before
946 * the cpu has fully taken over the trap table from OBP,
947 * and it's kernel stack + %g6 thread register state is
948 * not fully cooked yet.
950 * Therefore you cannot make any OBP calls, not even prom_printf,
951 * from these two routines.
953 static void __cpuinit
register_one_mondo(unsigned long paddr
, unsigned long type
, unsigned long qmask
)
955 unsigned long num_entries
= (qmask
+ 1) / 64;
956 unsigned long status
;
958 status
= sun4v_cpu_qconf(type
, paddr
, num_entries
);
959 if (status
!= HV_EOK
) {
960 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
961 "err %lu\n", type
, paddr
, num_entries
, status
);
966 void __cpuinit notrace
sun4v_register_mondo_queues(int this_cpu
)
968 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
970 register_one_mondo(tb
->cpu_mondo_pa
, HV_CPU_QUEUE_CPU_MONDO
,
971 tb
->cpu_mondo_qmask
);
972 register_one_mondo(tb
->dev_mondo_pa
, HV_CPU_QUEUE_DEVICE_MONDO
,
973 tb
->dev_mondo_qmask
);
974 register_one_mondo(tb
->resum_mondo_pa
, HV_CPU_QUEUE_RES_ERROR
,
976 register_one_mondo(tb
->nonresum_mondo_pa
, HV_CPU_QUEUE_NONRES_ERROR
,
980 static void __init
alloc_one_mondo(unsigned long *pa_ptr
, unsigned long qmask
)
982 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
983 void *p
= __alloc_bootmem(size
, size
, 0);
985 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
992 static void __init
alloc_one_kbuf(unsigned long *pa_ptr
, unsigned long qmask
)
994 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
995 void *p
= __alloc_bootmem(size
, size
, 0);
998 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
1005 static void __init
init_cpu_send_mondo_info(struct trap_per_cpu
*tb
)
1010 BUILD_BUG_ON((NR_CPUS
* sizeof(u16
)) > (PAGE_SIZE
- 64));
1012 page
= alloc_bootmem_pages(PAGE_SIZE
);
1014 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
1018 tb
->cpu_mondo_block_pa
= __pa(page
);
1019 tb
->cpu_list_pa
= __pa(page
+ 64);
1023 /* Allocate mondo and error queues for all possible cpus. */
1024 static void __init
sun4v_init_mondo_queues(void)
1028 for_each_possible_cpu(cpu
) {
1029 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1031 alloc_one_mondo(&tb
->cpu_mondo_pa
, tb
->cpu_mondo_qmask
);
1032 alloc_one_mondo(&tb
->dev_mondo_pa
, tb
->dev_mondo_qmask
);
1033 alloc_one_mondo(&tb
->resum_mondo_pa
, tb
->resum_qmask
);
1034 alloc_one_kbuf(&tb
->resum_kernel_buf_pa
, tb
->resum_qmask
);
1035 alloc_one_mondo(&tb
->nonresum_mondo_pa
, tb
->nonresum_qmask
);
1036 alloc_one_kbuf(&tb
->nonresum_kernel_buf_pa
,
1037 tb
->nonresum_qmask
);
1041 static void __init
init_send_mondo_info(void)
1045 for_each_possible_cpu(cpu
) {
1046 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1048 init_cpu_send_mondo_info(tb
);
1052 static struct irqaction timer_irq_action
= {
1056 /* Only invoked on boot processor. */
1057 void __init
init_IRQ(void)
1064 size
= sizeof(struct ino_bucket
) * NUM_IVECS
;
1065 ivector_table
= alloc_bootmem(size
);
1066 if (!ivector_table
) {
1067 prom_printf("Fatal error, cannot allocate ivector_table\n");
1070 __flush_dcache_range((unsigned long) ivector_table
,
1071 ((unsigned long) ivector_table
) + size
);
1073 ivector_table_pa
= __pa(ivector_table
);
1075 if (tlb_type
== hypervisor
)
1076 sun4v_init_mondo_queues();
1078 init_send_mondo_info();
1080 if (tlb_type
== hypervisor
) {
1081 /* Load up the boot cpu's entries. */
1082 sun4v_register_mondo_queues(hard_smp_processor_id());
1085 /* We need to clear any IRQ's pending in the soft interrupt
1086 * registers, a spurious one could be left around from the
1087 * PROM timer which we just disabled.
1089 clear_softint(get_softint());
1091 /* Now that ivector table is initialized, it is safe
1092 * to receive IRQ vector traps. We will normally take
1093 * one or two right now, in case some device PROM used
1094 * to boot us wants to speak to us. We just ignore them.
1096 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1097 "or %%g1, %0, %%g1\n\t"
1098 "wrpr %%g1, 0x0, %%pstate"
1103 irq_desc
[0].action
= &timer_irq_action
;