1 /* tlb-miss.S: TLB miss handlers
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sys.h>
13 #include <linux/linkage.h>
15 #include <asm/pgtable.h>
16 #include <asm/spr-regs.h>
18 .section .text..tlbmiss
21 .globl __entry_insn_mmu_miss
22 __entry_insn_mmu_miss:
26 .globl __entry_insn_mmu_exception
27 __entry_insn_mmu_exception:
31 .globl __entry_data_mmu_miss
32 __entry_data_mmu_miss:
36 .globl __entry_data_mmu_exception
37 __entry_data_mmu_exception:
41 ###############################################################################
43 # handle a lookup failure of one sort or another in a kernel TLB handler
45 # GR29 - faulting address
48 ###############################################################################
49 .type __tlb_kernel_fault,@function
51 # see if we're supposed to re-enable single-step mode upon return
52 sethi.p %hi(__break_tlb_miss_return_break),gr30
53 setlo %lo(__break_tlb_miss_return_break),gr30
56 subcc gr31,gr30,gr0,icc0
57 beq icc0,#0,__tlb_kernel_fault_sstep
61 movgs gr29,scr2 /* save EAR0 value */
62 sethi.p %hi(__kernel_current_task),gr29
63 setlo %lo(__kernel_current_task),gr29
64 ldi.p @(gr29,#0),gr29 /* restore GR29 */
66 bra __entry_kernel_handle_mmu_fault
68 # we've got to re-enable single-stepping
69 __tlb_kernel_fault_sstep:
70 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
71 setlo %lo(__break_tlb_miss_real_return_info),gr30
78 movgs gr29,scr2 /* save EAR0 value */
79 sethi.p %hi(__kernel_current_task),gr29
80 setlo %lo(__kernel_current_task),gr29
81 ldi.p @(gr29,#0),gr29 /* restore GR29 */
82 bra __entry_kernel_handle_mmu_fault_sstep
84 .size __tlb_kernel_fault, .-__tlb_kernel_fault
86 ###############################################################################
88 # handle a lookup failure of one sort or another in a user TLB handler
90 # GR28 - faulting address
93 ###############################################################################
94 .type __tlb_user_fault,@function
96 # see if we're supposed to re-enable single-step mode upon return
97 sethi.p %hi(__break_tlb_miss_return_break),gr30
98 setlo %lo(__break_tlb_miss_return_break),gr30
100 subcc gr31,gr30,gr0,icc0
101 beq icc0,#0,__tlb_user_fault_sstep
105 bra __entry_uspace_handle_mmu_fault
107 # we've got to re-enable single-stepping
108 __tlb_user_fault_sstep:
109 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
110 setlo %lo(__break_tlb_miss_real_return_info),gr30
116 bra __entry_uspace_handle_mmu_fault_sstep
118 .size __tlb_user_fault, .-__tlb_user_fault
120 ###############################################################################
122 # Kernel instruction TLB miss handler
124 # GR1 - kernel stack pointer
125 # GR28 - saved exception frame pointer
126 # GR29 - faulting address
128 # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
129 # DAMR3 - mapped page directory
130 # DAMR4 - mapped page table as matched by SCR0
132 ###############################################################################
133 .globl __entry_kernel_insn_tlb_miss
134 .type __entry_kernel_insn_tlb_miss,@function
135 __entry_kernel_insn_tlb_miss:
137 sethi.p %hi(0xe1200004),gr30
138 setlo %lo(0xe1200004),gr30
140 sethi.p %hi(0xffc00100),gr30
141 setlo %lo(0xffc00100),gr30
146 movsg ccr,gr30 /* save CCR */
149 # see if the cached page table mapping is appropriate
150 srlicc.p gr31,#26,gr0,icc0
152 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
153 bne icc0,#0,__itlb_k_PTD_miss
156 # access the PTD with EAR0[25:14]
157 # - DAMLR4 points to the virtual address of the appropriate page table
158 # - the PTD holds 4096 PTEs
159 # - the PTD must be accessed uncached
160 # - the PTE must be marked accessed if it was valid
165 ldi @(gr31,#0),gr30 /* fetch the PTE */
166 andicc gr30,#_PAGE_PRESENT,gr0,icc0
167 ori.p gr30,#_PAGE_ACCESSED,gr30
168 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
169 sti.p gr30,@(gr31,#0) /* update the PTE */
170 andi gr30,#~_PAGE_ACCESSED,gr30
172 # we're using IAMR1 as an extra TLB entry
173 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
174 # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
175 # - IAMPR1 has no WP bit, and we mustn't lose WP information
177 andicc gr31,#xAMPRx_V,gr0,icc0
178 setlos.p 0xfffff000,gr31
179 beq icc0,#0,__itlb_k_nopunt /* punt not required */
182 movgs gr31,tplr /* set TPLR.CXN */
183 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
186 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
188 movsg iamlr1,gr31 /* set TPLR.CXN */
190 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
191 movsg tpxr,gr31 /* check the TLB write error flag */
192 andicc.p gr31,#TPXR_E,gr0,icc0
193 setlos #0xfffff000,gr31
194 bne icc0,#0,__tlb_kernel_fault
198 # assemble the new TLB entry
202 movgs gr29,iamlr1 /* xAMLR = address | context number */
207 # return, restoring registers
210 sethi.p %hi(__kernel_current_task),gr29
211 setlo %lo(__kernel_current_task),gr29
214 beq icc0,#3,0 /* prevent icache prefetch */
216 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
217 # appropriate page table and map that instead
218 # - access the PGD with EAR0[31:26]
219 # - DAMLR3 points to the virtual address of the page directory
220 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
222 srli gr29,#26,gr31 /* calculate PGE offset */
223 slli gr31,#8,gr31 /* and clear bottom bits */
226 ld @(gr31,gr30),gr30 /* access the PGE */
228 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
229 andicc gr30,#xAMPRx_SS,gr0,icc1
231 # map this PTD instead and record coverage address
232 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
233 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
235 bne icc1,#0,__itlb_k_bigpage
239 # we can now resume normal service
241 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
242 bra __itlb_k_PTD_mapped
248 .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
250 ###############################################################################
252 # Kernel data TLB miss handler
254 # GR1 - kernel stack pointer
255 # GR28 - saved exception frame pointer
256 # GR29 - faulting address
258 # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
259 # DAMR3 - mapped page directory
260 # DAMR5 - mapped page table as matched by SCR1
262 ###############################################################################
263 .globl __entry_kernel_data_tlb_miss
264 .type __entry_kernel_data_tlb_miss,@function
265 __entry_kernel_data_tlb_miss:
267 sethi.p %hi(0xe1200004),gr30
268 setlo %lo(0xe1200004),gr30
270 sethi.p %hi(0xffc00100),gr30
271 setlo %lo(0xffc00100),gr30
276 movsg ccr,gr30 /* save CCR */
279 # see if the cached page table mapping is appropriate
280 srlicc.p gr31,#26,gr0,icc0
282 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
283 bne icc0,#0,__dtlb_k_PTD_miss
286 # access the PTD with EAR0[25:14]
287 # - DAMLR5 points to the virtual address of the appropriate page table
288 # - the PTD holds 4096 PTEs
289 # - the PTD must be accessed uncached
290 # - the PTE must be marked accessed if it was valid
295 ldi @(gr31,#0),gr30 /* fetch the PTE */
296 andicc gr30,#_PAGE_PRESENT,gr0,icc0
297 ori.p gr30,#_PAGE_ACCESSED,gr30
298 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
299 sti.p gr30,@(gr31,#0) /* update the PTE */
300 andi gr30,#~_PAGE_ACCESSED,gr30
302 # we're using DAMR1 as an extra TLB entry
303 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
304 # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
306 andicc gr31,#xAMPRx_V,gr0,icc0
307 setlos.p 0xfffff000,gr31
308 beq icc0,#0,__dtlb_k_nopunt /* punt not required */
311 movgs gr31,tplr /* set TPLR.CXN */
312 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
315 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
317 movsg damlr1,gr31 /* set TPLR.CXN */
319 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
320 movsg tpxr,gr31 /* check the TLB write error flag */
321 andicc.p gr31,#TPXR_E,gr0,icc0
322 setlos #0xfffff000,gr31
323 bne icc0,#0,__tlb_kernel_fault
327 # assemble the new TLB entry
331 movgs gr29,iamlr1 /* xAMLR = address | context number */
336 # return, restoring registers
339 sethi.p %hi(__kernel_current_task),gr29
340 setlo %lo(__kernel_current_task),gr29
343 beq icc0,#3,0 /* prevent icache prefetch */
345 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
346 # appropriate page table and map that instead
347 # - access the PGD with EAR0[31:26]
348 # - DAMLR3 points to the virtual address of the page directory
349 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
351 srli gr29,#26,gr31 /* calculate PGE offset */
352 slli gr31,#8,gr31 /* and clear bottom bits */
355 ld @(gr31,gr30),gr30 /* access the PGE */
357 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
358 andicc gr30,#xAMPRx_SS,gr0,icc1
360 # map this PTD instead and record coverage address
361 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
362 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
364 bne icc1,#0,__dtlb_k_bigpage
368 # we can now resume normal service
370 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
371 bra __dtlb_k_PTD_mapped
377 .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
379 ###############################################################################
381 # Userspace instruction TLB miss handler (with PGE prediction)
383 # GR28 - faulting address
385 # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
386 # DAMR3 - mapped page directory
387 # DAMR4 - mapped page table as matched by SCR0
389 ###############################################################################
390 .globl __entry_user_insn_tlb_miss
391 .type __entry_user_insn_tlb_miss,@function
392 __entry_user_insn_tlb_miss:
394 sethi.p %hi(0xe1200004),gr30
395 setlo %lo(0xe1200004),gr30
397 sethi.p %hi(0xffc00100),gr30
398 setlo %lo(0xffc00100),gr30
403 movsg ccr,gr30 /* save CCR */
406 # see if the cached page table mapping is appropriate
407 srlicc.p gr31,#26,gr0,icc0
409 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
410 bne icc0,#0,__itlb_u_PTD_miss
413 # access the PTD with EAR0[25:14]
414 # - DAMLR4 points to the virtual address of the appropriate page table
415 # - the PTD holds 4096 PTEs
416 # - the PTD must be accessed uncached
417 # - the PTE must be marked accessed if it was valid
422 ldi @(gr31,#0),gr30 /* fetch the PTE */
423 andicc gr30,#_PAGE_PRESENT,gr0,icc0
424 ori.p gr30,#_PAGE_ACCESSED,gr30
425 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
426 sti.p gr30,@(gr31,#0) /* update the PTE */
427 andi gr30,#~_PAGE_ACCESSED,gr30
429 # we're using IAMR1/DAMR1 as an extra TLB entry
430 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
432 andicc gr31,#xAMPRx_V,gr0,icc0
433 setlos.p 0xfffff000,gr31
434 beq icc0,#0,__itlb_u_nopunt /* punt not required */
438 movsg damlr1,gr31 /* set TPLR.CXN */
440 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
441 movsg tpxr,gr31 /* check the TLB write error flag */
442 andicc.p gr31,#TPXR_E,gr0,icc0
443 setlos #0xfffff000,gr31
444 bne icc0,#0,__tlb_user_fault
448 # assemble the new TLB entry
452 movgs gr28,iamlr1 /* xAMLR = address | context number */
457 # return, restoring registers
461 beq icc0,#3,0 /* prevent icache prefetch */
463 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
464 # appropriate page table and map that instead
465 # - access the PGD with EAR0[31:26]
466 # - DAMLR3 points to the virtual address of the page directory
467 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
469 srli gr28,#26,gr31 /* calculate PGE offset */
470 slli gr31,#8,gr31 /* and clear bottom bits */
473 ld @(gr31,gr30),gr30 /* access the PGE */
475 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
476 andicc gr30,#xAMPRx_SS,gr0,icc1
478 # map this PTD instead and record coverage address
479 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
480 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
482 bne icc1,#0,__itlb_u_bigpage
486 # we can now resume normal service
488 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
489 bra __itlb_u_PTD_mapped
495 .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
497 ###############################################################################
499 # Userspace data TLB miss handler
501 # GR28 - faulting address
503 # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
504 # DAMR3 - mapped page directory
505 # DAMR5 - mapped page table as matched by SCR1
507 ###############################################################################
508 .globl __entry_user_data_tlb_miss
509 .type __entry_user_data_tlb_miss,@function
510 __entry_user_data_tlb_miss:
512 sethi.p %hi(0xe1200004),gr30
513 setlo %lo(0xe1200004),gr30
515 sethi.p %hi(0xffc00100),gr30
516 setlo %lo(0xffc00100),gr30
521 movsg ccr,gr30 /* save CCR */
524 # see if the cached page table mapping is appropriate
525 srlicc.p gr31,#26,gr0,icc0
527 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
528 bne icc0,#0,__dtlb_u_PTD_miss
531 # access the PTD with EAR0[25:14]
532 # - DAMLR5 points to the virtual address of the appropriate page table
533 # - the PTD holds 4096 PTEs
534 # - the PTD must be accessed uncached
535 # - the PTE must be marked accessed if it was valid
542 ldi @(gr31,#0),gr30 /* fetch the PTE */
543 andicc gr30,#_PAGE_PRESENT,gr0,icc0
544 ori.p gr30,#_PAGE_ACCESSED,gr30
545 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
546 sti.p gr30,@(gr31,#0) /* update the PTE */
547 andi gr30,#~_PAGE_ACCESSED,gr30
549 # we're using DAMR1 as an extra TLB entry
550 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
552 andicc gr31,#xAMPRx_V,gr0,icc0
553 setlos.p 0xfffff000,gr31
554 beq icc0,#0,__dtlb_u_nopunt /* punt not required */
558 movsg damlr1,gr31 /* set TPLR.CXN */
560 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
561 movsg tpxr,gr31 /* check the TLB write error flag */
562 andicc.p gr31,#TPXR_E,gr0,icc0
563 setlos #0xfffff000,gr31
564 bne icc0,#0,__tlb_user_fault
568 # assemble the new TLB entry
572 movgs gr28,iamlr1 /* xAMLR = address | context number */
577 # return, restoring registers
581 beq icc0,#3,0 /* prevent icache prefetch */
583 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
584 # appropriate page table and map that instead
585 # - first of all, check the insn PGE cache - we may well get a hit there
586 # - access the PGD with EAR0[31:26]
587 # - DAMLR3 points to the virtual address of the page directory
588 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
590 movsg scr0,gr31 /* consult the insn-PGE-cache key */
592 srlicc gr31,#26,gr0,icc0
593 srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
594 bne icc0,#0,__dtlb_u_iPGE_miss
596 # what we're looking for is covered by the insn-PGE-cache
600 bra __dtlb_u_using_iPTD
603 srli gr28,#26,gr31 /* calculate PGE offset */
604 slli gr31,#8,gr31 /* and clear bottom bits */
607 ld @(gr31,gr30),gr30 /* access the PGE */
609 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
610 andicc gr30,#xAMPRx_SS,gr0,icc1
612 # map this PTD instead and record coverage address
613 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
614 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
616 bne icc1,#0,__dtlb_u_bigpage
620 # we can now resume normal service
622 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
623 bra __dtlb_u_PTD_mapped
629 .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss