1 /* sun4v_ivec.S: Sun4v interrupt vector handling.
3 * Copyright (C) 2006 <davem@davemloft.net>
6 #include <asm/cpudata.h>
7 #include <asm/intr_queue.h>
14 /* Head offset in %g2, tail offset in %g4.
15 * If they are the same, no work.
17 mov INTRQ_CPU_MONDO_HEAD, %g2
18 ldxa [%g2] ASI_QUEUE, %g2
19 mov INTRQ_CPU_MONDO_TAIL, %g4
20 ldxa [%g4] ASI_QUEUE, %g4
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
25 /* Get &trap_block[smp_processor_id()] into %g4. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
29 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
32 /* Now get the cross-call arguments and handler PC, same
35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
36 * high half is context arg to MMU flushes, into %g5
37 * 2nd 64-bit word: 64-bit arg, load into %g1
38 * 3rd 64-bit word: 64-bit arg, load into %g7
40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
47 add %g2, 0x40 - 0x8 - 0x8, %g2
49 /* Update queue head pointer. */
50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
53 mov INTRQ_CPU_MONDO_HEAD, %g4
54 stxa %g2, [%g4] ASI_QUEUE
60 sun4v_cpu_mondo_queue_empty:
64 /* Head offset in %g2, tail offset in %g4. */
65 mov INTRQ_DEVICE_MONDO_HEAD, %g2
66 ldxa [%g2] ASI_QUEUE, %g2
67 mov INTRQ_DEVICE_MONDO_TAIL, %g4
68 ldxa [%g4] ASI_QUEUE, %g4
70 be,pn %xcc, sun4v_dev_mondo_queue_empty
73 /* Get &trap_block[smp_processor_id()] into %g4. */
74 ldxa [%g0] ASI_SCRATCHPAD, %g4
75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
77 /* Get DEV mondo queue base phys address into %g5. */
78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
80 /* Load IVEC into %g3. */
81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
85 /* Update queue head pointer, this frees up some registers. */
86 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
89 mov INTRQ_DEVICE_MONDO_HEAD, %g4
90 stxa %g2, [%g4] ASI_QUEUE
93 TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
95 /* For VIRQs, cookie is encoded as ~bucket_phys_addr */
99 /* Get __pa(&ivector_table[IVEC]) into %g4. */
100 sethi %hi(ivector_table_pa), %g4
101 ldx [%g4 + %lo(ivector_table_pa)], %g4
106 stxa %g2, [%g4] ASI_PHYS_USE_EC
109 /* Signal the interrupt by setting (1 << pil) in %softint. */
110 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
112 sun4v_dev_mondo_queue_empty:
116 /* Head offset in %g2, tail offset in %g4. */
117 mov INTRQ_RESUM_MONDO_HEAD, %g2
118 ldxa [%g2] ASI_QUEUE, %g2
119 mov INTRQ_RESUM_MONDO_TAIL, %g4
120 ldxa [%g4] ASI_QUEUE, %g4
122 be,pn %xcc, sun4v_res_mondo_queue_empty
125 /* Get &trap_block[smp_processor_id()] into %g3. */
126 ldxa [%g0] ASI_SCRATCHPAD, %g3
127 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
129 /* Get RES mondo queue base phys address into %g5. */
130 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
132 /* Get RES kernel buffer base phys address into %g7. */
133 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
135 /* If the first word is non-zero, queue is full. */
136 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
137 brnz,pn %g1, sun4v_res_mondo_queue_full
140 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
142 /* Remember this entry's offset in %g1. */
145 /* Copy 64-byte queue entry into kernel buffer. */
146 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
147 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
149 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
150 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
152 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
153 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
155 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
156 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
158 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
159 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
161 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
162 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
164 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
165 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
167 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
168 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
171 /* Update queue head pointer. */
174 mov INTRQ_RESUM_MONDO_HEAD, %g4
175 stxa %g2, [%g4] ASI_QUEUE
178 /* Disable interrupts and save register state so we can call
179 * C code. The etrap handling will leave %g4 in %l4 for us
183 wrpr %g0, PIL_NORMAL_MAX, %pil
185 ba,pt %xcc, etrap_irq
187 #ifdef CONFIG_TRACE_IRQFLAGS
188 call trace_hardirqs_off
192 add %sp, PTREGS_OFF, %o0
193 call sun4v_resum_error
196 /* Return from trap. */
197 ba,pt %xcc, rtrap_irq
200 sun4v_res_mondo_queue_empty:
203 sun4v_res_mondo_queue_full:
204 /* The queue is full, consolidate our damage by setting
205 * the head equal to the tail. We'll just trap again otherwise.
206 * Call C code to log the event.
208 mov INTRQ_RESUM_MONDO_HEAD, %g2
209 stxa %g4, [%g2] ASI_QUEUE
213 wrpr %g0, PIL_NORMAL_MAX, %pil
214 ba,pt %xcc, etrap_irq
216 #ifdef CONFIG_TRACE_IRQFLAGS
217 call trace_hardirqs_off
220 call sun4v_resum_overflow
221 add %sp, PTREGS_OFF, %o0
223 ba,pt %xcc, rtrap_irq
227 /* Head offset in %g2, tail offset in %g4. */
228 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
229 ldxa [%g2] ASI_QUEUE, %g2
230 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
231 ldxa [%g4] ASI_QUEUE, %g4
233 be,pn %xcc, sun4v_nonres_mondo_queue_empty
236 /* Get &trap_block[smp_processor_id()] into %g3. */
237 ldxa [%g0] ASI_SCRATCHPAD, %g3
238 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
240 /* Get RES mondo queue base phys address into %g5. */
241 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
243 /* Get RES kernel buffer base phys address into %g7. */
244 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
246 /* If the first word is non-zero, queue is full. */
247 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
248 brnz,pn %g1, sun4v_nonres_mondo_queue_full
251 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
253 /* Remember this entry's offset in %g1. */
256 /* Copy 64-byte queue entry into kernel buffer. */
257 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
258 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
260 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
261 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
263 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
264 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
266 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
267 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
269 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
270 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
272 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
273 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
275 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
276 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
278 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
279 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
282 /* Update queue head pointer. */
285 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
286 stxa %g2, [%g4] ASI_QUEUE
289 /* Disable interrupts and save register state so we can call
290 * C code. The etrap handling will leave %g4 in %l4 for us
294 wrpr %g0, PIL_NORMAL_MAX, %pil
296 ba,pt %xcc, etrap_irq
298 #ifdef CONFIG_TRACE_IRQFLAGS
299 call trace_hardirqs_off
303 add %sp, PTREGS_OFF, %o0
304 call sun4v_nonresum_error
307 /* Return from trap. */
308 ba,pt %xcc, rtrap_irq
311 sun4v_nonres_mondo_queue_empty:
314 sun4v_nonres_mondo_queue_full:
315 /* The queue is full, consolidate our damage by setting
316 * the head equal to the tail. We'll just trap again otherwise.
317 * Call C code to log the event.
319 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
320 stxa %g4, [%g2] ASI_QUEUE
324 wrpr %g0, PIL_NORMAL_MAX, %pil
325 ba,pt %xcc, etrap_irq
327 #ifdef CONFIG_TRACE_IRQFLAGS
328 call trace_hardirqs_off
331 call sun4v_nonresum_overflow
332 add %sp, PTREGS_OFF, %o0
334 ba,pt %xcc, rtrap_irq