4265 remove INTERNAL_RELEASE_BUILD
[illumos-gate.git] / usr / src / uts / sun4u / cpu / us3_cheetahplus_asm.s
blob8a6a373cb8f9bdb7acdcda3dd2b64ab257b90b3a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
25 * Assembly code support for the Cheetah+ module
28 #pragma ident "%Z%%M% %I% %E% SMI"
30 #if !defined(lint)
31 #include "assym.h"
32 #endif /* lint */
34 #include <sys/asm_linkage.h>
35 #include <sys/mmu.h>
36 #include <vm/hat_sfmmu.h>
37 #include <sys/machparam.h>
38 #include <sys/machcpuvar.h>
39 #include <sys/machthread.h>
40 #include <sys/machtrap.h>
41 #include <sys/privregs.h>
42 #include <sys/asm_linkage.h>
43 #include <sys/trap.h>
44 #include <sys/cheetahregs.h>
45 #include <sys/us3_module.h>
46 #include <sys/xc_impl.h>
47 #include <sys/intreg.h>
48 #include <sys/async.h>
49 #include <sys/clock.h>
50 #include <sys/cheetahasm.h>
51 #include <sys/cmpregs.h>
53 #ifdef TRAPTRACE
54 #include <sys/traptrace.h>
55 #endif /* TRAPTRACE */
58 #if !defined(lint)
60 /* BEGIN CSTYLED */
63 * Cheetah+ version to reflush an Ecache line by index.
65 * By default we assume the Ecache is 2-way so we flush both
66 * ways. Even if the cache is direct-mapped no harm will come
67 * from performing the flush twice, apart from perhaps a performance
68 * penalty.
70 * XXX - scr2 not used.
72 #define ECACHE_REFLUSH_LINE(ec_set_size, index, scr2) \
73 ldxa [index]ASI_EC_DIAG, %g0; \
74 ldxa [index + ec_set_size]ASI_EC_DIAG, %g0;
77 * Cheetah+ version of ecache_flush_line. Uses Cheetah+ Ecache Displacement
78 * Flush feature.
80 #define ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2) \
81 sub ec_set_size, 1, scr1; \
82 and physaddr, scr1, scr1; \
83 set CHP_ECACHE_IDX_DISP_FLUSH, scr2; \
84 or scr2, scr1, scr1; \
85 ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2)
87 /* END CSTYLED */
90 * Panther version to reflush a line from both the L2 cache and L3
91 * cache by the respective indexes. Flushes all ways of the line from
92 * each cache.
94 * l2_index Index into the L2$ of the line to be flushed. This
95 * register will not be modified by this routine.
96 * l3_index Index into the L3$ of the line to be flushed. This
97 * register will not be modified by this routine.
98 * scr2 scratch register.
99 * scr3 scratch register.
102 #define PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3) \
103 set PN_L2_MAX_SET, scr2; \
104 set PN_L2_SET_SIZE, scr3; \
105 1: \
106 ldxa [l2_index + scr2]ASI_L2_TAG, %g0; \
107 cmp scr2, %g0; \
108 bg,a 1b; \
109 sub scr2, scr3, scr2; \
110 mov 6, scr2; \
111 7: \
112 cmp scr2, %g0; \
113 bg,a 7b; \
114 sub scr2, 1, scr2; \
115 set PN_L3_MAX_SET, scr2; \
116 set PN_L3_SET_SIZE, scr3; \
117 2: \
118 ldxa [l3_index + scr2]ASI_EC_DIAG, %g0; \
119 cmp scr2, %g0; \
120 bg,a 2b; \
121 sub scr2, scr3, scr2;
124 * Panther version of ecache_flush_line. Flushes the line corresponding
125 * to physaddr from both the L2 cache and the L3 cache.
127 * physaddr Input: Physical address to flush.
128 * Output: Physical address to flush (preserved).
129 * l2_idx_out Input: scratch register.
130 * Output: Index into the L2$ of the line to be flushed.
131 * l3_idx_out Input: scratch register.
132 * Output: Index into the L3$ of the line to be flushed.
133 * scr3 scratch register.
134 * scr4 scratch register.
137 #define PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4) \
138 set PN_L3_SET_SIZE, l2_idx_out; \
139 sub l2_idx_out, 1, l2_idx_out; \
140 and physaddr, l2_idx_out, l3_idx_out; \
141 set PN_L3_IDX_DISP_FLUSH, l2_idx_out; \
142 or l2_idx_out, l3_idx_out, l3_idx_out; \
143 set PN_L2_SET_SIZE, l2_idx_out; \
144 sub l2_idx_out, 1, l2_idx_out; \
145 and physaddr, l2_idx_out, l2_idx_out; \
146 set PN_L2_IDX_DISP_FLUSH, scr3; \
147 or l2_idx_out, scr3, l2_idx_out; \
148 PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
150 #endif /* !lint */
153 * Fast ECC error at TL>0 handler
154 * We get here via trap 70 at TL>0->Software trap 0 at TL>0. We enter
155 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
156 * For a complete description of the Fast ECC at TL>0 handling see the
157 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
158 * us3_common_asm.s
160 #if defined(lint)
162 void
163 fast_ecc_tl1_err(void)
166 #else /* lint */
168 .section ".text"
169 .align 64
170 ENTRY_NP(fast_ecc_tl1_err)
173 * This macro turns off the D$/I$ if they are on and saves their
174 * original state in ch_err_tl1_tmp, saves all the %g registers in the
175 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
176 * the %tpc in ch_err_tl1_tpc. At the end of this macro, %g1 will
177 * point to the ch_err_tl1_data structure and the original D$/I$ state
178 * will be saved in ch_err_tl1_tmp. All %g registers except for %g1
179 * will be available.
181 CH_ERR_TL1_FECC_ENTER;
184 * Get the diagnostic logout data. %g4 must be initialized to
185 * current CEEN state, %g5 must point to logout structure in
186 * ch_err_tl1_data_t. %g3 will contain the nesting count upon
187 * return.
189 ldxa [%g0]ASI_ESTATE_ERR, %g4
190 and %g4, EN_REG_CEEN, %g4
191 add %g1, CH_ERR_TL1_LOGOUT, %g5
192 DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4)
195 * If the logout nesting count is exceeded, we're probably
196 * not making any progress, try to panic instead.
198 cmp %g3, CLO_NESTING_MAX
199 bge fecc_tl1_err
203 * Save the current CEEN and NCEEN state in %g7 and turn them off
204 * before flushing the Ecache.
206 ldxa [%g0]ASI_ESTATE_ERR, %g7
207 andn %g7, EN_REG_CEEN | EN_REG_NCEEN, %g5
208 stxa %g5, [%g0]ASI_ESTATE_ERR
209 membar #Sync
212 * Flush the Ecache, using the largest possible cache size with the
213 * smallest possible line size since we can't get the actual sizes
214 * from the cpu_node due to DTLB misses.
216 PN_L2_FLUSHALL(%g3, %g4, %g5)
218 set CH_ECACHE_MAX_SIZE, %g4
219 set CH_ECACHE_MIN_LSIZE, %g5
221 GET_CPU_IMPL(%g6)
222 cmp %g6, PANTHER_IMPL
223 bne %xcc, 2f
225 set PN_L3_SIZE, %g4
227 mov %g6, %g3
228 CHP_ECACHE_FLUSHALL(%g4, %g5, %g3)
231 * Restore CEEN and NCEEN to the previous state.
233 stxa %g7, [%g0]ASI_ESTATE_ERR
234 membar #Sync
237 * If we turned off the D$, then flush it and turn it back on.
239 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3
240 andcc %g3, CH_ERR_TSTATE_DC_ON, %g0
241 bz %xcc, 3f
245 * Flush the D$.
247 ASM_LD(%g4, dcache_size)
248 ASM_LD(%g5, dcache_linesize)
249 CH_DCACHE_FLUSHALL(%g4, %g5, %g6)
252 * Turn the D$ back on.
254 ldxa [%g0]ASI_DCU, %g3
255 or %g3, DCU_DC, %g3
256 stxa %g3, [%g0]ASI_DCU
257 membar #Sync
260 * If we turned off the I$, then flush it and turn it back on.
262 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3
263 andcc %g3, CH_ERR_TSTATE_IC_ON, %g0
264 bz %xcc, 4f
268 * Flush the I$. Panther has different I$ parameters, and we
269 * can't access the logout I$ params without possibly generating
270 * a MMU miss.
272 GET_CPU_IMPL(%g6)
273 set PN_ICACHE_SIZE, %g3
274 set CH_ICACHE_SIZE, %g4
275 mov CH_ICACHE_LSIZE, %g5
276 cmp %g6, PANTHER_IMPL
277 movz %xcc, %g3, %g4
278 movz %xcc, PN_ICACHE_LSIZE, %g5
279 CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3)
282 * Turn the I$ back on. Changing DCU_IC requires flush.
284 ldxa [%g0]ASI_DCU, %g3
285 or %g3, DCU_IC, %g3
286 stxa %g3, [%g0]ASI_DCU
287 flush %g0
290 #ifdef TRAPTRACE
292 * Get current trap trace entry physical pointer.
294 CPU_INDEX(%g6, %g5)
295 sll %g6, TRAPTR_SIZE_SHIFT, %g6
296 set trap_trace_ctl, %g5
297 add %g6, %g5, %g6
298 ld [%g6 + TRAPTR_LIMIT], %g5
299 tst %g5
300 be %icc, skip_traptrace
302 ldx [%g6 + TRAPTR_PBASE], %g5
303 ld [%g6 + TRAPTR_OFFSET], %g4
304 add %g5, %g4, %g5
307 * Create trap trace entry.
309 rd %asi, %g7
310 wr %g0, TRAPTR_ASI, %asi
311 rd STICK, %g4
312 stxa %g4, [%g5 + TRAP_ENT_TICK]%asi
313 rdpr %tl, %g4
314 stha %g4, [%g5 + TRAP_ENT_TL]%asi
315 rdpr %tt, %g4
316 stha %g4, [%g5 + TRAP_ENT_TT]%asi
317 rdpr %tpc, %g4
318 stna %g4, [%g5 + TRAP_ENT_TPC]%asi
319 rdpr %tstate, %g4
320 stxa %g4, [%g5 + TRAP_ENT_TSTATE]%asi
321 stna %sp, [%g5 + TRAP_ENT_SP]%asi
322 stna %g0, [%g5 + TRAP_ENT_TR]%asi
323 wr %g0, %g7, %asi
324 ldxa [%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3
325 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4
326 wr %g0, TRAPTR_ASI, %asi
327 stna %g3, [%g5 + TRAP_ENT_F1]%asi
328 stna %g4, [%g5 + TRAP_ENT_F2]%asi
329 wr %g0, %g7, %asi
330 ldxa [%g1 + CH_ERR_TL1_AFAR]%asi, %g3
331 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4
332 wr %g0, TRAPTR_ASI, %asi
333 stna %g3, [%g5 + TRAP_ENT_F3]%asi
334 stna %g4, [%g5 + TRAP_ENT_F4]%asi
335 wr %g0, %g7, %asi
338 * Advance trap trace pointer.
340 ld [%g6 + TRAPTR_OFFSET], %g5
341 ld [%g6 + TRAPTR_LIMIT], %g4
342 st %g5, [%g6 + TRAPTR_LAST_OFFSET]
343 add %g5, TRAP_ENT_SIZE, %g5
344 sub %g4, TRAP_ENT_SIZE, %g4
345 cmp %g5, %g4
346 movge %icc, 0, %g5
347 st %g5, [%g6 + TRAPTR_OFFSET]
348 skip_traptrace:
349 #endif /* TRAPTRACE */
352 * If nesting count is not zero, skip all the AFSR/AFAR
353 * handling and just do the necessary cache-flushing.
355 ldxa [%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2
356 brnz %g2, 6f
360 * If a UCU or L3_UCU followed by a WDU has occurred go ahead
361 * and panic since a UE will occur (on the retry) before the
362 * UCU and WDU messages are enqueued. On a Panther processor,
363 * we need to also see an L3_WDU before panicking. Note that
364 * we avoid accessing the _EXT ASIs if not on a Panther.
366 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3
367 set 1, %g4
368 sllx %g4, C_AFSR_UCU_SHIFT, %g4
369 btst %g4, %g3 ! UCU in original shadow AFSR?
370 bnz %xcc, 5f
372 GET_CPU_IMPL(%g6)
373 cmp %g6, PANTHER_IMPL
374 bne %xcc, 6f ! not Panther, no UCU, skip the rest
376 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3
377 btst C_AFSR_L3_UCU, %g3 ! L3_UCU in original shadow AFSR_EXT?
378 bz %xcc, 6f ! neither UCU nor L3_UCU was seen
381 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4 ! original AFSR
382 ldxa [%g0]ASI_AFSR, %g3 ! current AFSR
383 or %g3, %g4, %g3 ! %g3 = original + current AFSR
384 set 1, %g4
385 sllx %g4, C_AFSR_WDU_SHIFT, %g4
386 btst %g4, %g3 ! WDU in original or current AFSR?
387 bz %xcc, 6f ! no WDU, skip remaining tests
389 GET_CPU_IMPL(%g6)
390 cmp %g6, PANTHER_IMPL
391 bne %xcc, fecc_tl1_err ! if not Panther, panic (saw UCU, WDU)
393 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g4 ! original AFSR_EXT
394 set ASI_AFSR_EXT_VA, %g6 ! ASI of current AFSR_EXT
395 ldxa [%g6]ASI_AFSR, %g3 ! value of current AFSR_EXT
396 or %g3, %g4, %g3 ! %g3 = original + current AFSR_EXT
397 btst C_AFSR_L3_WDU, %g3 ! L3_WDU in original or current AFSR?
398 bnz %xcc, fecc_tl1_err ! panic (saw L3_WDU and UCU or L3_UCU)
402 * We fall into this macro if we've successfully logged the error in
403 * the ch_err_tl1_data structure and want the PIL15 softint to pick
404 * it up and log it. %g1 must point to the ch_err_tl1_data structure.
405 * Restores the %g registers and issues retry.
407 CH_ERR_TL1_EXIT;
410 * Establish panic exit label.
412 CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
414 SET_SIZE(fast_ecc_tl1_err)
416 #endif /* lint */
419 #if defined(lint)
421 * scrubphys - Pass in the aligned physical memory address
422 * that you want to scrub, along with the ecache set size.
424 * 1) Displacement flush the E$ line corresponding to %addr.
425 * The first ldxa guarantees that the %addr is no longer in
426 * M, O, or E (goes to I or S (if instruction fetch also happens).
427 * 2) "Write" the data using a CAS %addr,%g0,%g0.
428 * The casxa guarantees a transition from I to M or S to M.
429 * 3) Displacement flush the E$ line corresponding to %addr.
430 * The second ldxa pushes the M line out of the ecache, into the
431 * writeback buffers, on the way to memory.
432 * 4) The "membar #Sync" pushes the cache line out of the writeback
433 * buffers onto the bus, on the way to dram finally.
435 * This is a modified version of the algorithm suggested by Gary Lauterbach.
436 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
437 * as modified, but then we found out that for spitfire, if it misses in the
438 * E$ it will probably install as an M, but if it hits in the E$, then it
439 * will stay E, if the store doesn't happen. So the first displacement flush
440 * should ensure that the CAS will miss in the E$. Arrgh.
442 /* ARGSUSED */
443 void
444 scrubphys(uint64_t paddr, int ecache_set_size)
447 #else /* lint */
448 ENTRY(scrubphys)
449 rdpr %pstate, %o4
450 andn %o4, PSTATE_IE | PSTATE_AM, %o5
451 wrpr %o5, %g0, %pstate ! clear IE, AM bits
453 GET_CPU_IMPL(%o5) ! Panther Ecache is flushed differently
454 cmp %o5, PANTHER_IMPL
455 bne scrubphys_1
457 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
458 casxa [%o0]ASI_MEM, %g0, %g0
459 PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
460 b scrubphys_2
462 scrubphys_1:
463 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
464 casxa [%o0]ASI_MEM, %g0, %g0
465 ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
466 scrubphys_2:
467 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
469 retl
470 membar #Sync ! move the data out of the load buffer
471 SET_SIZE(scrubphys)
473 #endif /* lint */
476 #if defined(lint)
478 * clearphys - Pass in the physical memory address of the checkblock
479 * that you want to push out, cleared with a recognizable pattern,
480 * from the ecache.
482 * To ensure that the ecc gets recalculated after the bad data is cleared,
483 * we must write out enough data to fill the w$ line (64 bytes). So we read
484 * in an entire ecache subblock's worth of data, and write it back out.
485 * Then we overwrite the 16 bytes of bad data with the pattern.
487 /* ARGSUSED */
488 void
489 clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
493 #else /* lint */
494 ENTRY(clearphys)
495 /* turn off IE, AM bits */
496 rdpr %pstate, %o4
497 andn %o4, PSTATE_IE | PSTATE_AM, %o5
498 wrpr %o5, %g0, %pstate
500 /* turn off NCEEN */
501 ldxa [%g0]ASI_ESTATE_ERR, %o5
502 andn %o5, EN_REG_NCEEN, %o3
503 stxa %o3, [%g0]ASI_ESTATE_ERR
504 membar #Sync
506 /* align address passed with 64 bytes subblock size */
507 mov CH_ECACHE_SUBBLK_SIZE, %o2
508 andn %o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1
510 /* move the good data into the W$ */
511 clearphys_1:
512 subcc %o2, 8, %o2
513 ldxa [%g1 + %o2]ASI_MEM, %g2
514 bge clearphys_1
515 stxa %g2, [%g1 + %o2]ASI_MEM
517 /* now overwrite the bad data */
518 setx 0xbadecc00badecc01, %g1, %g2
519 stxa %g2, [%o0]ASI_MEM
520 mov 8, %g1
521 stxa %g2, [%o0 + %g1]ASI_MEM
523 GET_CPU_IMPL(%o3) ! Panther Ecache is flushed differently
524 cmp %o3, PANTHER_IMPL
525 bne clearphys_2
527 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1)
528 casxa [%o0]ASI_MEM, %g0, %g0
529 PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
530 b clearphys_3
532 clearphys_2:
533 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
534 casxa [%o0]ASI_MEM, %g0, %g0
535 ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
536 clearphys_3:
537 /* clear the AFSR */
538 ldxa [%g0]ASI_AFSR, %o1
539 stxa %o1, [%g0]ASI_AFSR
540 membar #Sync
542 /* turn NCEEN back on */
543 stxa %o5, [%g0]ASI_ESTATE_ERR
544 membar #Sync
546 /* return and re-enable IE and AM */
547 retl
548 wrpr %g0, %o4, %pstate
549 SET_SIZE(clearphys)
551 #endif /* lint */
554 #if defined(lint)
556 * Cheetah+ Ecache displacement flush the specified line from the E$
558 * For Panther, this means flushing the specified line from both the
559 * L2 cache and L3 cache.
561 * Register usage:
562 * %o0 - 64 bit physical address for flushing
563 * %o1 - Ecache set size
565 /*ARGSUSED*/
566 void
567 ecache_flush_line(uint64_t flushaddr, int ec_set_size)
570 #else /* lint */
571 ENTRY(ecache_flush_line)
573 GET_CPU_IMPL(%o3) ! Panther Ecache is flushed differently
574 cmp %o3, PANTHER_IMPL
575 bne ecache_flush_line_1
578 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
579 b ecache_flush_line_2
581 ecache_flush_line_1:
582 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
583 ecache_flush_line_2:
584 retl
586 SET_SIZE(ecache_flush_line)
587 #endif /* lint */
589 #if defined(lint)
590 void
591 set_afsr_ext(uint64_t afsr_ext)
593 afsr_ext = afsr_ext;
595 #else /* lint */
597 ENTRY(set_afsr_ext)
598 set ASI_AFSR_EXT_VA, %o1
599 stxa %o0, [%o1]ASI_AFSR ! afsr_ext reg
600 membar #Sync
601 retl
603 SET_SIZE(set_afsr_ext)
605 #endif /* lint */
608 #if defined(lint)
610 * The CPU jumps here from the MMU exception handler if an ITLB parity
611 * error is detected and we are running on Panther.
613 * In this routine we collect diagnostic information and write it to our
614 * logout structure (if possible) and clear all ITLB entries that may have
615 * caused our parity trap.
616 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
617 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
618 * send two:
620 * %g2 - Contains the VA whose lookup in the ITLB caused the parity error
621 * %g3 - Contains the tlo_info field of the pn_tlb_logout logout struct,
622 * regardless of whether or not we actually used the logout struct.
624 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
625 * parameters to the data contained in the logout structure in order to
626 * determine whether the logout information is valid for this particular
627 * error or not.
629 void
630 itlb_parity_trap(void)
633 #else /* lint */
635 ENTRY_NP(itlb_parity_trap)
637 * Collect important information about the trap which will be
638 * used as a parameter to the TL0 handler.
640 wr %g0, ASI_IMMU, %asi
641 rdpr %tpc, %g2 ! VA that caused the IMMU trap
642 ldxa [MMU_TAG_ACCESS_EXT]%asi, %g3 ! read the trap VA page size
643 set PN_ITLB_PGSZ_MASK, %g4
644 and %g3, %g4, %g3
645 ldxa [MMU_TAG_ACCESS]%asi, %g4
646 set TAGREAD_CTX_MASK, %g5
647 and %g4, %g5, %g4
648 or %g4, %g3, %g3 ! 'or' in the trap context and
649 mov 1, %g4 ! add the IMMU flag to complete
650 sllx %g4, PN_TLO_INFO_IMMU_SHIFT, %g4
651 or %g4, %g3, %g3 ! the tlo_info field for logout
652 stxa %g0,[MMU_SFSR]%asi ! clear the SFSR
653 membar #Sync
656 * at this point:
657 * %g2 - contains the VA whose lookup caused the trap
658 * %g3 - contains the tlo_info field
660 * Next, we calculate the TLB index value for the failing VA.
662 mov %g2, %g4 ! We need the ITLB index
663 set PN_ITLB_PGSZ_MASK, %g5
664 and %g3, %g5, %g5
665 srlx %g5, PN_ITLB_PGSZ_SHIFT, %g5
666 PN_GET_TLB_INDEX(%g4, %g5) ! %g4 has the index
667 sllx %g4, PN_TLB_ACC_IDX_SHIFT, %g4 ! shift the index into place
668 set PN_ITLB_T512, %g5
669 or %g4, %g5, %g4 ! and add in the TLB ID
672 * at this point:
673 * %g2 - contains the VA whose lookup caused the trap
674 * %g3 - contains the tlo_info field
675 * %g4 - contains the TLB access index value for the
676 * VA/PgSz in question
678 * Check to see if the logout structure is available.
680 set CHPR_TLB_LOGOUT, %g6
681 GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1)
682 set LOGOUT_INVALID_U32, %g6
683 sllx %g6, 32, %g6 ! if our logout structure is
684 set LOGOUT_INVALID_L32, %g5 ! unavailable or if it is
685 or %g5, %g6, %g5 ! already being used, then we
686 ldx [%g1 + PN_TLO_ADDR], %g6 ! don't collect any diagnostic
687 cmp %g6, %g5 ! information before clearing
688 bne itlb_parity_trap_1 ! and logging the error.
692 * Record the logout information. %g4 contains our index + TLB ID
693 * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains
694 * the pointer to our logout struct.
696 stx %g3, [%g1 + PN_TLO_INFO]
697 stx %g2, [%g1 + PN_TLO_ADDR]
698 stx %g2, [%g1 + PN_TLO_PC] ! %tpc == fault addr for IMMU
700 add %g1, PN_TLO_ITLB_TTE, %g1 ! move up the pointer
702 ldxa [%g4]ASI_ITLB_ACCESS, %g5 ! read the data
703 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! store it away
704 ldxa [%g4]ASI_ITLB_TAGREAD, %g5 ! read the tag
705 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! store it away
707 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
708 or %g4, %g6, %g4
709 add %g1, CH_TLO_TTE_SIZE, %g1 ! move up the pointer
711 ldxa [%g4]ASI_ITLB_ACCESS, %g5 ! read the data
712 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! store it away
713 ldxa [%g4]ASI_ITLB_TAGREAD, %g5 ! read the tag
714 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! store it away
716 andn %g4, %g6, %g4 ! back to way 0
718 itlb_parity_trap_1:
720 * at this point:
721 * %g2 - contains the VA whose lookup caused the trap
722 * %g3 - contains the tlo_info field
723 * %g4 - contains the TLB access index value for the
724 * VA/PgSz in question
726 * Here we will clear the errors from the TLB.
728 set MMU_TAG_ACCESS, %g5 ! We write a TTE tag value of
729 stxa %g0, [%g5]ASI_IMMU ! 0 as it will be invalid.
730 stxa %g0, [%g4]ASI_ITLB_ACCESS ! Write the data and tag
731 membar #Sync
733 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
734 or %g4, %g6, %g4
736 stxa %g0, [%g4]ASI_ITLB_ACCESS ! Write same data and tag
737 membar #Sync
739 sethi %hi(FLUSH_ADDR), %g6 ! PRM says we need to issue a
740 flush %g6 ! flush after writing MMU regs
743 * at this point:
744 * %g2 - contains the VA whose lookup caused the trap
745 * %g3 - contains the tlo_info field
747 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
748 * already at PIL 15. */
749 set cpu_tlb_parity_error, %g1
750 rdpr %pil, %g4
751 cmp %g4, PIL_14
752 movl %icc, PIL_14, %g4
753 ba sys_trap
755 SET_SIZE(itlb_parity_trap)
757 #endif /* lint */
759 #if defined(lint)
761 * The CPU jumps here from the MMU exception handler if a DTLB parity
762 * error is detected and we are running on Panther.
764 * In this routine we collect diagnostic information and write it to our
765 * logout structure (if possible) and clear all DTLB entries that may have
766 * caused our parity trap.
767 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
768 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
769 * send two:
771 * %g2 - Contains the VA whose lookup in the DTLB caused the parity error
772 * %g3 - Contains the tlo_info field of the pn_tlb_logout logout struct,
773 * regardless of whether or not we actually used the logout struct.
775 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
776 * parameters to the data contained in the logout structure in order to
777 * determine whether the logout information is valid for this particular
778 * error or not.
780 void
781 dtlb_parity_trap(void)
784 #else /* lint */
786 ENTRY_NP(dtlb_parity_trap)
788 * Collect important information about the trap which will be
789 * used as a parameter to the TL0 handler.
791 wr %g0, ASI_DMMU, %asi
792 ldxa [MMU_SFAR]%asi, %g2 ! VA that caused the IMMU trap
793 ldxa [MMU_TAG_ACCESS_EXT]%asi, %g3 ! read the trap VA page sizes
794 set PN_DTLB_PGSZ_MASK, %g4
795 and %g3, %g4, %g3
796 ldxa [MMU_TAG_ACCESS]%asi, %g4
797 set TAGREAD_CTX_MASK, %g5 ! 'or' in the trap context
798 and %g4, %g5, %g4 ! to complete the tlo_info
799 or %g4, %g3, %g3 ! field for logout
800 stxa %g0,[MMU_SFSR]%asi ! clear the SFSR
801 membar #Sync
804 * at this point:
805 * %g2 - contains the VA whose lookup caused the trap
806 * %g3 - contains the tlo_info field
808 * Calculate the TLB index values for the failing VA. Since the T512
809 * TLBs can be configured for different page sizes, we need to find
810 * the index into each one separately.
812 mov %g2, %g4 ! First we get the DTLB_0 index
813 set PN_DTLB_PGSZ0_MASK, %g5
814 and %g3, %g5, %g5
815 srlx %g5, PN_DTLB_PGSZ0_SHIFT, %g5
816 PN_GET_TLB_INDEX(%g4, %g5) ! %g4 has the DTLB_0 index
817 sllx %g4, PN_TLB_ACC_IDX_SHIFT, %g4 ! shift the index into place
818 set PN_DTLB_T512_0, %g5
819 or %g4, %g5, %g4 ! and add in the TLB ID
821 mov %g2, %g7 ! Next we get the DTLB_1 index
822 set PN_DTLB_PGSZ1_MASK, %g5
823 and %g3, %g5, %g5
824 srlx %g5, PN_DTLB_PGSZ1_SHIFT, %g5
825 PN_GET_TLB_INDEX(%g7, %g5) ! %g7 has the DTLB_1 index
826 sllx %g7, PN_TLB_ACC_IDX_SHIFT, %g7 ! shift the index into place
827 set PN_DTLB_T512_1, %g5
828 or %g7, %g5, %g7 ! and add in the TLB ID
831 * at this point:
832 * %g2 - contains the VA whose lookup caused the trap
833 * %g3 - contains the tlo_info field
834 * %g4 - contains the T512_0 access index value for the
835 * VA/PgSz in question
836 * %g7 - contains the T512_1 access index value for the
837 * VA/PgSz in question
839 * If this trap happened at TL>0, then we don't want to mess
840 * with the normal logout struct since that could caused a TLB
841 * miss.
843 rdpr %tl, %g6 ! read current trap level
844 cmp %g6, 1 ! skip over the tl>1 code
845 ble dtlb_parity_trap_1 ! if TL <= 1.
849 * If we are here, then the trap happened at TL>1. Simply
850 * update our tlo_info field and then skip to the TLB flush
851 * code.
853 mov 1, %g6
854 sllx %g6, PN_TLO_INFO_TL1_SHIFT, %g6
855 or %g6, %g3, %g3
856 ba dtlb_parity_trap_2
859 dtlb_parity_trap_1:
861 * at this point:
862 * %g2 - contains the VA whose lookup caused the trap
863 * %g3 - contains the tlo_info field
864 * %g4 - contains the T512_0 access index value for the
865 * VA/PgSz in question
866 * %g7 - contains the T512_1 access index value for the
867 * VA/PgSz in question
869 * Check to see if the logout structure is available.
871 set CHPR_TLB_LOGOUT, %g6
872 GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2)
873 set LOGOUT_INVALID_U32, %g6
874 sllx %g6, 32, %g6 ! if our logout structure is
875 set LOGOUT_INVALID_L32, %g5 ! unavailable or if it is
876 or %g5, %g6, %g5 ! already being used, then we
877 ldx [%g1 + PN_TLO_ADDR], %g6 ! don't collect any diagnostic
878 cmp %g6, %g5 ! information before clearing
879 bne dtlb_parity_trap_2 ! and logging the error.
883 * Record the logout information. %g4 contains our DTLB_0
884 * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID
885 * both of which will be used for ASI_DTLB_ACCESS and
886 * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout
887 * struct.
889 stx %g3, [%g1 + PN_TLO_INFO]
890 stx %g2, [%g1 + PN_TLO_ADDR]
891 rdpr %tpc, %g5
892 stx %g5, [%g1 + PN_TLO_PC]
894 add %g1, PN_TLO_DTLB_TTE, %g1 ! move up the pointer
896 ldxa [%g4]ASI_DTLB_ACCESS, %g5 ! read the data from DTLB_0
897 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! way 0 and store it away
898 ldxa [%g4]ASI_DTLB_TAGREAD, %g5 ! read the tag from DTLB_0
899 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! way 0 and store it away
901 ldxa [%g7]ASI_DTLB_ACCESS, %g5 ! now repeat for DTLB_1 way 0
902 stx %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
903 ldxa [%g7]ASI_DTLB_TAGREAD, %g5
904 stx %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
906 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
907 or %g4, %g6, %g4 ! of each TLB.
908 or %g7, %g6, %g7
909 add %g1, CH_TLO_TTE_SIZE, %g1 ! move up the pointer
911 ldxa [%g4]ASI_DTLB_ACCESS, %g5 ! read the data from DTLB_0
912 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! way 1 and store it away
913 ldxa [%g4]ASI_DTLB_TAGREAD, %g5 ! read the tag from DTLB_0
914 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! way 1 and store it away
916 ldxa [%g7]ASI_DTLB_ACCESS, %g5 ! now repeat for DTLB_1 way 1
917 stx %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
918 ldxa [%g7]ASI_DTLB_TAGREAD, %g5
919 stx %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
921 andn %g4, %g6, %g4 ! back to way 0
922 andn %g7, %g6, %g7 ! back to way 0
924 dtlb_parity_trap_2:
926 * at this point:
927 * %g2 - contains the VA whose lookup caused the trap
928 * %g3 - contains the tlo_info field
929 * %g4 - contains the T512_0 access index value for the
930 * VA/PgSz in question
931 * %g7 - contains the T512_1 access index value for the
932 * VA/PgSz in question
934 * Here we will clear the errors from the DTLB.
936 set MMU_TAG_ACCESS, %g5 ! We write a TTE tag value of
937 stxa %g0, [%g5]ASI_DMMU ! 0 as it will be invalid.
938 stxa %g0, [%g4]ASI_DTLB_ACCESS ! Write the data and tag.
939 stxa %g0, [%g7]ASI_DTLB_ACCESS ! Now repeat for DTLB_1 way 0
940 membar #Sync
942 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
943 or %g4, %g6, %g4
944 or %g7, %g6, %g7
946 stxa %g0, [%g4]ASI_DTLB_ACCESS ! Write same data and tag.
947 stxa %g0, [%g7]ASI_DTLB_ACCESS ! Now repeat for DTLB_1 way 0
948 membar #Sync
950 sethi %hi(FLUSH_ADDR), %g6 ! PRM says we need to issue a
951 flush %g6 ! flush after writing MMU regs
954 * at this point:
955 * %g2 - contains the VA whose lookup caused the trap
956 * %g3 - contains the tlo_info field
958 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
959 * already at PIL 15. We do this even for TL>1 traps since
960 * those will lead to a system panic.
962 set cpu_tlb_parity_error, %g1
963 rdpr %pil, %g4
964 cmp %g4, PIL_14
965 movl %icc, PIL_14, %g4
966 ba sys_trap
968 SET_SIZE(dtlb_parity_trap)
970 #endif /* lint */
973 #if defined(lint)
975 * Calculates the Panther TLB index based on a virtual address and page size
977 * Register usage:
978 * %o0 - virtual address whose index we want
979 * %o1 - Page Size of the TLB in question as encoded in the
980 * ASI_[D|I]MMU_TAG_ACCESS_EXT register.
982 uint64_t
983 pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
985 return ((va + pg_sz)-(va + pg_sz));
987 #else /* lint */
988 ENTRY(pn_get_tlb_index)
990 PN_GET_TLB_INDEX(%o0, %o1)
992 retl
994 SET_SIZE(pn_get_tlb_index)
995 #endif /* lint */
998 #if defined(lint)
1000 * For Panther CPUs we need to flush the IPB after any I$ or D$
1001 * parity errors are detected.
1003 void
1004 flush_ipb(void)
1005 { return; }
1007 #else /* lint */
1009 ENTRY(flush_ipb)
1010 clr %o0
1012 flush_ipb_1:
1013 stxa %g0, [%o0]ASI_IPB_TAG
1014 membar #Sync
1015 cmp %o0, PN_IPB_TAG_ADDR_MAX
1016 blt flush_ipb_1
1017 add %o0, PN_IPB_TAG_ADDR_LINESIZE, %o0
1019 sethi %hi(FLUSH_ADDR), %o0
1020 flush %o0
1021 retl
1023 SET_SIZE(flush_ipb)
1025 #endif /* lint */