1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/linkage.h>
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/ftrace.h>
24 #include <linux/irq.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
32 #include <asm/iommu.h>
34 #include <asm/oplib.h>
36 #include <asm/timer.h>
38 #include <asm/starfire.h>
39 #include <asm/uaccess.h>
40 #include <asm/cache.h>
41 #include <asm/cpudata.h>
42 #include <asm/auxio.h>
44 #include <asm/hypervisor.h>
45 #include <asm/cacheflush.h>
50 #define NUM_IVECS (IMAP_INR + 1)
52 struct ino_bucket
*ivector_table
;
53 unsigned long ivector_table_pa
;
55 /* On several sun4u processors, it is illegal to mix bypass and
56 * non-bypass accesses. Therefore we access all INO buckets
57 * using bypass accesses only.
59 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa
)
63 __asm__
__volatile__("ldxa [%1] %2, %0"
66 offsetof(struct ino_bucket
,
68 "i" (ASI_PHYS_USE_EC
));
73 static void bucket_clear_chain_pa(unsigned long bucket_pa
)
75 __asm__
__volatile__("stxa %%g0, [%0] %1"
78 offsetof(struct ino_bucket
,
80 "i" (ASI_PHYS_USE_EC
));
83 static unsigned int bucket_get_virt_irq(unsigned long bucket_pa
)
87 __asm__
__volatile__("lduwa [%1] %2, %0"
90 offsetof(struct ino_bucket
,
92 "i" (ASI_PHYS_USE_EC
));
97 static void bucket_set_virt_irq(unsigned long bucket_pa
,
98 unsigned int virt_irq
)
100 __asm__
__volatile__("stwa %0, [%1] %2"
104 offsetof(struct ino_bucket
,
106 "i" (ASI_PHYS_USE_EC
));
109 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
112 unsigned int dev_handle
;
113 unsigned int dev_ino
;
115 } virt_irq_table
[NR_IRQS
];
116 static DEFINE_SPINLOCK(virt_irq_alloc_lock
);
118 unsigned char virt_irq_alloc(unsigned int dev_handle
,
119 unsigned int dev_ino
)
124 BUILD_BUG_ON(NR_IRQS
>= 256);
126 spin_lock_irqsave(&virt_irq_alloc_lock
, flags
);
128 for (ent
= 1; ent
< NR_IRQS
; ent
++) {
129 if (!virt_irq_table
[ent
].in_use
)
132 if (ent
>= NR_IRQS
) {
133 printk(KERN_ERR
"IRQ: Out of virtual IRQs.\n");
136 virt_irq_table
[ent
].dev_handle
= dev_handle
;
137 virt_irq_table
[ent
].dev_ino
= dev_ino
;
138 virt_irq_table
[ent
].in_use
= 1;
141 spin_unlock_irqrestore(&virt_irq_alloc_lock
, flags
);
146 #ifdef CONFIG_PCI_MSI
147 void virt_irq_free(unsigned int virt_irq
)
151 if (virt_irq
>= NR_IRQS
)
154 spin_lock_irqsave(&virt_irq_alloc_lock
, flags
);
156 virt_irq_table
[virt_irq
].in_use
= 0;
158 spin_unlock_irqrestore(&virt_irq_alloc_lock
, flags
);
163 * /proc/interrupts printing:
166 int show_interrupts(struct seq_file
*p
, void *v
)
168 int i
= *(loff_t
*) v
, j
;
169 struct irqaction
* action
;
174 for_each_online_cpu(j
)
175 seq_printf(p
, "CPU%d ",j
);
180 raw_spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
181 action
= irq_desc
[i
].action
;
184 seq_printf(p
, "%3d: ",i
);
186 seq_printf(p
, "%10u ", kstat_irqs(i
));
188 for_each_online_cpu(j
)
189 seq_printf(p
, "%10u ", kstat_irqs_cpu(i
, j
));
191 seq_printf(p
, " %9s", irq_desc
[i
].chip
->name
);
192 seq_printf(p
, " %s", action
->name
);
194 for (action
=action
->next
; action
; action
= action
->next
)
195 seq_printf(p
, ", %s", action
->name
);
199 raw_spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
200 } else if (i
== NR_IRQS
) {
201 seq_printf(p
, "NMI: ");
202 for_each_online_cpu(j
)
203 seq_printf(p
, "%10u ", cpu_data(j
).__nmi_count
);
204 seq_printf(p
, " Non-maskable interrupts\n");
209 static unsigned int sun4u_compute_tid(unsigned long imap
, unsigned long cpuid
)
213 if (this_is_starfire
) {
214 tid
= starfire_translate(imap
, cpuid
);
215 tid
<<= IMAP_TID_SHIFT
;
218 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
221 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
222 if ((ver
>> 32UL) == __JALAPENO_ID
||
223 (ver
>> 32UL) == __SERRANO_ID
) {
224 tid
= cpuid
<< IMAP_TID_SHIFT
;
225 tid
&= IMAP_TID_JBUS
;
227 unsigned int a
= cpuid
& 0x1f;
228 unsigned int n
= (cpuid
>> 5) & 0x1f;
230 tid
= ((a
<< IMAP_AID_SHIFT
) |
231 (n
<< IMAP_NID_SHIFT
));
232 tid
&= (IMAP_AID_SAFARI
|
236 tid
= cpuid
<< IMAP_TID_SHIFT
;
244 struct irq_handler_data
{
248 void (*pre_handler
)(unsigned int, void *, void *);
254 static int irq_choose_cpu(unsigned int virt_irq
, const struct cpumask
*affinity
)
259 cpumask_copy(&mask
, affinity
);
260 if (cpus_equal(mask
, cpu_online_map
)) {
261 cpuid
= map_to_cpu(virt_irq
);
265 cpus_and(tmp
, cpu_online_map
, mask
);
266 cpuid
= cpus_empty(tmp
) ? map_to_cpu(virt_irq
) : first_cpu(tmp
);
272 #define irq_choose_cpu(virt_irq, affinity) \
273 real_hard_smp_processor_id()
276 static void sun4u_irq_enable(unsigned int virt_irq
)
278 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
281 unsigned long cpuid
, imap
, val
;
284 cpuid
= irq_choose_cpu(virt_irq
,
285 irq_desc
[virt_irq
].affinity
);
288 tid
= sun4u_compute_tid(imap
, cpuid
);
290 val
= upa_readq(imap
);
291 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
292 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
293 val
|= tid
| IMAP_VALID
;
294 upa_writeq(val
, imap
);
295 upa_writeq(ICLR_IDLE
, data
->iclr
);
299 static int sun4u_set_affinity(unsigned int virt_irq
,
300 const struct cpumask
*mask
)
302 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
305 unsigned long cpuid
, imap
, val
;
308 cpuid
= irq_choose_cpu(virt_irq
, mask
);
311 tid
= sun4u_compute_tid(imap
, cpuid
);
313 val
= upa_readq(imap
);
314 val
&= ~(IMAP_TID_UPA
| IMAP_TID_JBUS
|
315 IMAP_AID_SAFARI
| IMAP_NID_SAFARI
);
316 val
|= tid
| IMAP_VALID
;
317 upa_writeq(val
, imap
);
318 upa_writeq(ICLR_IDLE
, data
->iclr
);
324 /* Don't do anything. The desc->status check for IRQ_DISABLED in
325 * handler_irq() will skip the handler call and that will leave the
326 * interrupt in the sent state. The next ->enable() call will hit the
327 * ICLR register to reset the state machine.
329 * This scheme is necessary, instead of clearing the Valid bit in the
330 * IMAP register, to handle the case of IMAP registers being shared by
331 * multiple INOs (and thus ICLR registers). Since we use a different
332 * virtual IRQ for each shared IMAP instance, the generic code thinks
333 * there is only one user so it prematurely calls ->disable() on
336 * We have to provide an explicit ->disable() method instead of using
337 * NULL to get the default. The reason is that if the generic code
338 * sees that, it also hooks up a default ->shutdown method which
339 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
341 static void sun4u_irq_disable(unsigned int virt_irq
)
345 static void sun4u_irq_eoi(unsigned int virt_irq
)
347 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
348 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
350 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
354 upa_writeq(ICLR_IDLE
, data
->iclr
);
357 static void sun4v_irq_enable(unsigned int virt_irq
)
359 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
360 unsigned long cpuid
= irq_choose_cpu(virt_irq
,
361 irq_desc
[virt_irq
].affinity
);
364 err
= sun4v_intr_settarget(ino
, cpuid
);
366 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
367 "err(%d)\n", ino
, cpuid
, err
);
368 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
370 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
371 "err(%d)\n", ino
, err
);
372 err
= sun4v_intr_setenabled(ino
, HV_INTR_ENABLED
);
374 printk(KERN_ERR
"sun4v_intr_setenabled(%x): err(%d)\n",
378 static int sun4v_set_affinity(unsigned int virt_irq
,
379 const struct cpumask
*mask
)
381 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
382 unsigned long cpuid
= irq_choose_cpu(virt_irq
, mask
);
385 err
= sun4v_intr_settarget(ino
, cpuid
);
387 printk(KERN_ERR
"sun4v_intr_settarget(%x,%lu): "
388 "err(%d)\n", ino
, cpuid
, err
);
393 static void sun4v_irq_disable(unsigned int virt_irq
)
395 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
398 err
= sun4v_intr_setenabled(ino
, HV_INTR_DISABLED
);
400 printk(KERN_ERR
"sun4v_intr_setenabled(%x): "
401 "err(%d)\n", ino
, err
);
404 static void sun4v_irq_eoi(unsigned int virt_irq
)
406 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
407 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
410 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
413 err
= sun4v_intr_setstate(ino
, HV_INTR_STATE_IDLE
);
415 printk(KERN_ERR
"sun4v_intr_setstate(%x): "
416 "err(%d)\n", ino
, err
);
419 static void sun4v_virq_enable(unsigned int virt_irq
)
421 unsigned long cpuid
, dev_handle
, dev_ino
;
424 cpuid
= irq_choose_cpu(virt_irq
, irq_desc
[virt_irq
].affinity
);
426 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
427 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
429 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
431 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
433 dev_handle
, dev_ino
, cpuid
, err
);
434 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
437 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
438 "HV_INTR_STATE_IDLE): err(%d)\n",
439 dev_handle
, dev_ino
, err
);
440 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
443 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
444 "HV_INTR_ENABLED): err(%d)\n",
445 dev_handle
, dev_ino
, err
);
448 static int sun4v_virt_set_affinity(unsigned int virt_irq
,
449 const struct cpumask
*mask
)
451 unsigned long cpuid
, dev_handle
, dev_ino
;
454 cpuid
= irq_choose_cpu(virt_irq
, mask
);
456 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
457 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
459 err
= sun4v_vintr_set_target(dev_handle
, dev_ino
, cpuid
);
461 printk(KERN_ERR
"sun4v_vintr_set_target(%lx,%lx,%lu): "
463 dev_handle
, dev_ino
, cpuid
, err
);
468 static void sun4v_virq_disable(unsigned int virt_irq
)
470 unsigned long dev_handle
, dev_ino
;
473 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
474 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
476 err
= sun4v_vintr_set_valid(dev_handle
, dev_ino
,
479 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
480 "HV_INTR_DISABLED): err(%d)\n",
481 dev_handle
, dev_ino
, err
);
484 static void sun4v_virq_eoi(unsigned int virt_irq
)
486 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
487 unsigned long dev_handle
, dev_ino
;
490 if (unlikely(desc
->status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
493 dev_handle
= virt_irq_table
[virt_irq
].dev_handle
;
494 dev_ino
= virt_irq_table
[virt_irq
].dev_ino
;
496 err
= sun4v_vintr_set_state(dev_handle
, dev_ino
,
499 printk(KERN_ERR
"sun4v_vintr_set_state(%lx,%lx,"
500 "HV_INTR_STATE_IDLE): err(%d)\n",
501 dev_handle
, dev_ino
, err
);
504 static struct irq_chip sun4u_irq
= {
506 .enable
= sun4u_irq_enable
,
507 .disable
= sun4u_irq_disable
,
508 .eoi
= sun4u_irq_eoi
,
509 .set_affinity
= sun4u_set_affinity
,
512 static struct irq_chip sun4v_irq
= {
514 .enable
= sun4v_irq_enable
,
515 .disable
= sun4v_irq_disable
,
516 .eoi
= sun4v_irq_eoi
,
517 .set_affinity
= sun4v_set_affinity
,
520 static struct irq_chip sun4v_virq
= {
522 .enable
= sun4v_virq_enable
,
523 .disable
= sun4v_virq_disable
,
524 .eoi
= sun4v_virq_eoi
,
525 .set_affinity
= sun4v_virt_set_affinity
,
528 static void pre_flow_handler(unsigned int virt_irq
,
529 struct irq_desc
*desc
)
531 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
532 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
534 data
->pre_handler(ino
, data
->arg1
, data
->arg2
);
536 handle_fasteoi_irq(virt_irq
, desc
);
539 void irq_install_pre_handler(int virt_irq
,
540 void (*func
)(unsigned int, void *, void *),
541 void *arg1
, void *arg2
)
543 struct irq_handler_data
*data
= get_irq_chip_data(virt_irq
);
544 struct irq_desc
*desc
= irq_desc
+ virt_irq
;
546 data
->pre_handler
= func
;
550 desc
->handle_irq
= pre_flow_handler
;
553 unsigned int build_irq(int inofixup
, unsigned long iclr
, unsigned long imap
)
555 struct ino_bucket
*bucket
;
556 struct irq_handler_data
*data
;
557 unsigned int virt_irq
;
560 BUG_ON(tlb_type
== hypervisor
);
562 ino
= (upa_readq(imap
) & (IMAP_IGN
| IMAP_INO
)) + inofixup
;
563 bucket
= &ivector_table
[ino
];
564 virt_irq
= bucket_get_virt_irq(__pa(bucket
));
566 virt_irq
= virt_irq_alloc(0, ino
);
567 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
568 set_irq_chip_and_handler_name(virt_irq
,
574 data
= get_irq_chip_data(virt_irq
);
578 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
579 if (unlikely(!data
)) {
580 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
583 set_irq_chip_data(virt_irq
, data
);
592 static unsigned int sun4v_build_common(unsigned long sysino
,
593 struct irq_chip
*chip
)
595 struct ino_bucket
*bucket
;
596 struct irq_handler_data
*data
;
597 unsigned int virt_irq
;
599 BUG_ON(tlb_type
!= hypervisor
);
601 bucket
= &ivector_table
[sysino
];
602 virt_irq
= bucket_get_virt_irq(__pa(bucket
));
604 virt_irq
= virt_irq_alloc(0, sysino
);
605 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
606 set_irq_chip_and_handler_name(virt_irq
, chip
,
611 data
= get_irq_chip_data(virt_irq
);
615 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
616 if (unlikely(!data
)) {
617 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
620 set_irq_chip_data(virt_irq
, data
);
622 /* Catch accidental accesses to these things. IMAP/ICLR handling
623 * is done by hypervisor calls on sun4v platforms, not by direct
633 unsigned int sun4v_build_irq(u32 devhandle
, unsigned int devino
)
635 unsigned long sysino
= sun4v_devino_to_sysino(devhandle
, devino
);
637 return sun4v_build_common(sysino
, &sun4v_irq
);
640 unsigned int sun4v_build_virq(u32 devhandle
, unsigned int devino
)
642 struct irq_handler_data
*data
;
643 unsigned long hv_err
, cookie
;
644 struct ino_bucket
*bucket
;
645 struct irq_desc
*desc
;
646 unsigned int virt_irq
;
648 bucket
= kzalloc(sizeof(struct ino_bucket
), GFP_ATOMIC
);
649 if (unlikely(!bucket
))
652 /* The only reference we store to the IRQ bucket is
653 * by physical address which kmemleak can't see, tell
654 * it that this object explicitly is not a leak and
657 kmemleak_not_leak(bucket
);
659 __flush_dcache_range((unsigned long) bucket
,
660 ((unsigned long) bucket
+
661 sizeof(struct ino_bucket
)));
663 virt_irq
= virt_irq_alloc(devhandle
, devino
);
664 bucket_set_virt_irq(__pa(bucket
), virt_irq
);
666 set_irq_chip_and_handler_name(virt_irq
, &sun4v_virq
,
670 data
= kzalloc(sizeof(struct irq_handler_data
), GFP_ATOMIC
);
674 /* In order to make the LDC channel startup sequence easier,
675 * especially wrt. locking, we do not let request_irq() enable
678 desc
= irq_desc
+ virt_irq
;
679 desc
->status
|= IRQ_NOAUTOEN
;
681 set_irq_chip_data(virt_irq
, data
);
683 /* Catch accidental accesses to these things. IMAP/ICLR handling
684 * is done by hypervisor calls on sun4v platforms, not by direct
690 cookie
= ~__pa(bucket
);
691 hv_err
= sun4v_vintr_set_cookie(devhandle
, devino
, cookie
);
693 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
694 "err=%lu\n", devhandle
, devino
, hv_err
);
701 void ack_bad_irq(unsigned int virt_irq
)
703 unsigned int ino
= virt_irq_table
[virt_irq
].dev_ino
;
708 printk(KERN_CRIT
"Unexpected IRQ from ino[%x] virt_irq[%u]\n",
712 void *hardirq_stack
[NR_CPUS
];
713 void *softirq_stack
[NR_CPUS
];
715 static __attribute__((always_inline
)) void *set_hardirq_stack(void)
717 void *orig_sp
, *sp
= hardirq_stack
[smp_processor_id()];
719 __asm__
__volatile__("mov %%sp, %0" : "=r" (orig_sp
));
721 orig_sp
> (sp
+ THREAD_SIZE
)) {
722 sp
+= THREAD_SIZE
- 192 - STACK_BIAS
;
723 __asm__
__volatile__("mov %0, %%sp" : : "r" (sp
));
728 static __attribute__((always_inline
)) void restore_hardirq_stack(void *orig_sp
)
730 __asm__
__volatile__("mov %0, %%sp" : : "r" (orig_sp
));
733 void __irq_entry
handler_irq(int irq
, struct pt_regs
*regs
)
735 unsigned long pstate
, bucket_pa
;
736 struct pt_regs
*old_regs
;
739 clear_softint(1 << irq
);
741 old_regs
= set_irq_regs(regs
);
744 /* Grab an atomic snapshot of the pending IVECs. */
745 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
746 "wrpr %0, %3, %%pstate\n\t"
749 "wrpr %0, 0x0, %%pstate\n\t"
750 : "=&r" (pstate
), "=&r" (bucket_pa
)
751 : "r" (irq_work_pa(smp_processor_id())),
755 orig_sp
= set_hardirq_stack();
758 struct irq_desc
*desc
;
759 unsigned long next_pa
;
760 unsigned int virt_irq
;
762 next_pa
= bucket_get_chain_pa(bucket_pa
);
763 virt_irq
= bucket_get_virt_irq(bucket_pa
);
764 bucket_clear_chain_pa(bucket_pa
);
766 desc
= irq_desc
+ virt_irq
;
768 if (!(desc
->status
& IRQ_DISABLED
))
769 desc
->handle_irq(virt_irq
, desc
);
774 restore_hardirq_stack(orig_sp
);
777 set_irq_regs(old_regs
);
780 void do_softirq(void)
787 local_irq_save(flags
);
789 if (local_softirq_pending()) {
790 void *orig_sp
, *sp
= softirq_stack
[smp_processor_id()];
792 sp
+= THREAD_SIZE
- 192 - STACK_BIAS
;
794 __asm__
__volatile__("mov %%sp, %0\n\t"
799 __asm__
__volatile__("mov %0, %%sp"
803 local_irq_restore(flags
);
806 #ifdef CONFIG_HOTPLUG_CPU
807 void fixup_irqs(void)
811 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
814 raw_spin_lock_irqsave(&irq_desc
[irq
].lock
, flags
);
815 if (irq_desc
[irq
].action
&&
816 !(irq_desc
[irq
].status
& IRQ_PER_CPU
)) {
817 if (irq_desc
[irq
].chip
->set_affinity
)
818 irq_desc
[irq
].chip
->set_affinity(irq
,
819 irq_desc
[irq
].affinity
);
821 raw_spin_unlock_irqrestore(&irq_desc
[irq
].lock
, flags
);
824 tick_ops
->disable_irq();
835 static struct sun5_timer
*prom_timers
;
836 static u64 prom_limit0
, prom_limit1
;
838 static void map_prom_timers(void)
840 struct device_node
*dp
;
841 const unsigned int *addr
;
843 /* PROM timer node hangs out in the top level of device siblings... */
844 dp
= of_find_node_by_path("/");
847 if (!strcmp(dp
->name
, "counter-timer"))
852 /* Assume if node is not present, PROM uses different tick mechanism
853 * which we should not care about.
856 prom_timers
= (struct sun5_timer
*) 0;
860 /* If PROM is really using this, it must be mapped by him. */
861 addr
= of_get_property(dp
, "address", NULL
);
863 prom_printf("PROM does not have timer mapped, trying to continue.\n");
864 prom_timers
= (struct sun5_timer
*) 0;
867 prom_timers
= (struct sun5_timer
*) ((unsigned long)addr
[0]);
870 static void kill_prom_timer(void)
875 /* Save them away for later. */
876 prom_limit0
= prom_timers
->limit0
;
877 prom_limit1
= prom_timers
->limit1
;
879 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
880 * We turn both off here just to be paranoid.
882 prom_timers
->limit0
= 0;
883 prom_timers
->limit1
= 0;
885 /* Wheee, eat the interrupt packet too... */
886 __asm__
__volatile__(
888 " ldxa [%%g0] %0, %%g1\n"
889 " ldxa [%%g2] %1, %%g1\n"
890 " stxa %%g0, [%%g0] %0\n"
893 : "i" (ASI_INTR_RECEIVE
), "i" (ASI_INTR_R
)
897 void notrace
init_irqwork_curcpu(void)
899 int cpu
= hard_smp_processor_id();
901 trap_block
[cpu
].irq_worklist_pa
= 0UL;
904 /* Please be very careful with register_one_mondo() and
905 * sun4v_register_mondo_queues().
907 * On SMP this gets invoked from the CPU trampoline before
908 * the cpu has fully taken over the trap table from OBP,
909 * and it's kernel stack + %g6 thread register state is
910 * not fully cooked yet.
912 * Therefore you cannot make any OBP calls, not even prom_printf,
913 * from these two routines.
915 static void __cpuinit notrace
register_one_mondo(unsigned long paddr
, unsigned long type
, unsigned long qmask
)
917 unsigned long num_entries
= (qmask
+ 1) / 64;
918 unsigned long status
;
920 status
= sun4v_cpu_qconf(type
, paddr
, num_entries
);
921 if (status
!= HV_EOK
) {
922 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
923 "err %lu\n", type
, paddr
, num_entries
, status
);
928 void __cpuinit notrace
sun4v_register_mondo_queues(int this_cpu
)
930 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
932 register_one_mondo(tb
->cpu_mondo_pa
, HV_CPU_QUEUE_CPU_MONDO
,
933 tb
->cpu_mondo_qmask
);
934 register_one_mondo(tb
->dev_mondo_pa
, HV_CPU_QUEUE_DEVICE_MONDO
,
935 tb
->dev_mondo_qmask
);
936 register_one_mondo(tb
->resum_mondo_pa
, HV_CPU_QUEUE_RES_ERROR
,
938 register_one_mondo(tb
->nonresum_mondo_pa
, HV_CPU_QUEUE_NONRES_ERROR
,
942 /* Each queue region must be a power of 2 multiple of 64 bytes in
943 * size. The base real address must be aligned to the size of the
944 * region. Thus, an 8KB queue must be 8KB aligned, for example.
946 static void __init
alloc_one_queue(unsigned long *pa_ptr
, unsigned long qmask
)
948 unsigned long size
= PAGE_ALIGN(qmask
+ 1);
949 unsigned long order
= get_order(size
);
952 p
= __get_free_pages(GFP_KERNEL
, order
);
954 prom_printf("SUN4V: Error, cannot allocate queue.\n");
961 static void __init
init_cpu_send_mondo_info(struct trap_per_cpu
*tb
)
966 BUILD_BUG_ON((NR_CPUS
* sizeof(u16
)) > (PAGE_SIZE
- 64));
968 page
= get_zeroed_page(GFP_KERNEL
);
970 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
974 tb
->cpu_mondo_block_pa
= __pa(page
);
975 tb
->cpu_list_pa
= __pa(page
+ 64);
979 /* Allocate mondo and error queues for all possible cpus. */
980 static void __init
sun4v_init_mondo_queues(void)
984 for_each_possible_cpu(cpu
) {
985 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
987 alloc_one_queue(&tb
->cpu_mondo_pa
, tb
->cpu_mondo_qmask
);
988 alloc_one_queue(&tb
->dev_mondo_pa
, tb
->dev_mondo_qmask
);
989 alloc_one_queue(&tb
->resum_mondo_pa
, tb
->resum_qmask
);
990 alloc_one_queue(&tb
->resum_kernel_buf_pa
, tb
->resum_qmask
);
991 alloc_one_queue(&tb
->nonresum_mondo_pa
, tb
->nonresum_qmask
);
992 alloc_one_queue(&tb
->nonresum_kernel_buf_pa
,
997 static void __init
init_send_mondo_info(void)
1001 for_each_possible_cpu(cpu
) {
1002 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1004 init_cpu_send_mondo_info(tb
);
1008 static struct irqaction timer_irq_action
= {
1012 /* Only invoked on boot processor. */
1013 void __init
init_IRQ(void)
1020 size
= sizeof(struct ino_bucket
) * NUM_IVECS
;
1021 ivector_table
= kzalloc(size
, GFP_KERNEL
);
1022 if (!ivector_table
) {
1023 prom_printf("Fatal error, cannot allocate ivector_table\n");
1026 __flush_dcache_range((unsigned long) ivector_table
,
1027 ((unsigned long) ivector_table
) + size
);
1029 ivector_table_pa
= __pa(ivector_table
);
1031 if (tlb_type
== hypervisor
)
1032 sun4v_init_mondo_queues();
1034 init_send_mondo_info();
1036 if (tlb_type
== hypervisor
) {
1037 /* Load up the boot cpu's entries. */
1038 sun4v_register_mondo_queues(hard_smp_processor_id());
1041 /* We need to clear any IRQ's pending in the soft interrupt
1042 * registers, a spurious one could be left around from the
1043 * PROM timer which we just disabled.
1045 clear_softint(get_softint());
1047 /* Now that ivector table is initialized, it is safe
1048 * to receive IRQ vector traps. We will normally take
1049 * one or two right now, in case some device PROM used
1050 * to boot us wants to speak to us. We just ignore them.
1052 __asm__
__volatile__("rdpr %%pstate, %%g1\n\t"
1053 "or %%g1, %0, %%g1\n\t"
1054 "wrpr %%g1, 0x0, %%pstate"
1059 irq_desc
[0].action
= &timer_irq_action
;