1 /* tlb-miss.S: TLB miss handlers
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sys.h>
13 #include <linux/linkage.h>
15 #include <asm/pgtable.h>
16 #include <asm/spr-regs.h>
18 .section .text..tlbmiss
21 .globl __entry_insn_mmu_miss
22 __entry_insn_mmu_miss:
26 .globl __entry_insn_mmu_exception
27 __entry_insn_mmu_exception:
31 .globl __entry_data_mmu_miss
32 __entry_data_mmu_miss:
36 .globl __entry_data_mmu_exception
37 __entry_data_mmu_exception:
41 ###############################################################################
43 # handle a lookup failure of one sort or another in a kernel TLB handler
45 # GR29 - faulting address
48 ###############################################################################
49 .type __tlb_kernel_fault,@function
51 # see if we're supposed to re-enable single-step mode upon return
52 sethi.p %hi(__break_tlb_miss_return_break),gr30
53 setlo %lo(__break_tlb_miss_return_break),gr30
56 subcc gr31,gr30,gr0,icc0
57 beq icc0,#0,__tlb_kernel_fault_sstep
61 movgs gr29,scr2 /* save EAR0 value */
62 sethi.p %hi(__kernel_current_task),gr29
63 setlo %lo(__kernel_current_task),gr29
64 ldi.p @(gr29,#0),gr29 /* restore GR29 */
66 bra __entry_kernel_handle_mmu_fault
68 # we've got to re-enable single-stepping
69 __tlb_kernel_fault_sstep:
70 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
71 setlo %lo(__break_tlb_miss_real_return_info),gr30
78 movgs gr29,scr2 /* save EAR0 value */
79 sethi.p %hi(__kernel_current_task),gr29
80 setlo %lo(__kernel_current_task),gr29
81 ldi.p @(gr29,#0),gr29 /* restore GR29 */
82 bra __entry_kernel_handle_mmu_fault_sstep
84 .size __tlb_kernel_fault, .-__tlb_kernel_fault
86 ###############################################################################
88 # handle a lookup failure of one sort or another in a user TLB handler
90 # GR28 - faulting address
93 ###############################################################################
94 .type __tlb_user_fault,@function
96 # see if we're supposed to re-enable single-step mode upon return
97 sethi.p %hi(__break_tlb_miss_return_break),gr30
98 setlo %lo(__break_tlb_miss_return_break),gr30
100 subcc gr31,gr30,gr0,icc0
101 beq icc0,#0,__tlb_user_fault_sstep
105 bra __entry_uspace_handle_mmu_fault
107 # we've got to re-enable single-stepping
108 __tlb_user_fault_sstep:
109 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
110 setlo %lo(__break_tlb_miss_real_return_info),gr30
116 bra __entry_uspace_handle_mmu_fault_sstep
118 .size __tlb_user_fault, .-__tlb_user_fault
120 ###############################################################################
122 # Kernel instruction TLB miss handler
124 # GR1 - kernel stack pointer
125 # GR28 - saved exception frame pointer
126 # GR29 - faulting address
128 # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
129 # DAMR3 - mapped page directory
130 # DAMR4 - mapped page table as matched by SCR0
132 ###############################################################################
133 .globl __entry_kernel_insn_tlb_miss
134 .type __entry_kernel_insn_tlb_miss,@function
135 __entry_kernel_insn_tlb_miss:
137 movsg ccr,gr30 /* save CCR */
140 # see if the cached page table mapping is appropriate
141 srlicc.p gr31,#26,gr0,icc0
143 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
144 bne icc0,#0,__itlb_k_PTD_miss
147 # access the PTD with EAR0[25:14]
148 # - DAMLR4 points to the virtual address of the appropriate page table
149 # - the PTD holds 4096 PTEs
150 # - the PTD must be accessed uncached
151 # - the PTE must be marked accessed if it was valid
156 ldi @(gr31,#0),gr30 /* fetch the PTE */
157 andicc gr30,#_PAGE_PRESENT,gr0,icc0
158 ori.p gr30,#_PAGE_ACCESSED,gr30
159 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
160 sti.p gr30,@(gr31,#0) /* update the PTE */
161 andi gr30,#~_PAGE_ACCESSED,gr30
163 # we're using IAMR1 as an extra TLB entry
164 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
165 # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
166 # - IAMPR1 has no WP bit, and we mustn't lose WP information
168 andicc gr31,#xAMPRx_V,gr0,icc0
169 setlos.p 0xfffff000,gr31
170 beq icc0,#0,__itlb_k_nopunt /* punt not required */
173 movgs gr31,tplr /* set TPLR.CXN */
174 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
177 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
179 movsg iamlr1,gr31 /* set TPLR.CXN */
181 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
182 movsg tpxr,gr31 /* check the TLB write error flag */
183 andicc.p gr31,#TPXR_E,gr0,icc0
184 setlos #0xfffff000,gr31
185 bne icc0,#0,__tlb_kernel_fault
189 # assemble the new TLB entry
193 movgs gr29,iamlr1 /* xAMLR = address | context number */
198 # return, restoring registers
201 sethi.p %hi(__kernel_current_task),gr29
202 setlo %lo(__kernel_current_task),gr29
205 beq icc0,#3,0 /* prevent icache prefetch */
207 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
208 # appropriate page table and map that instead
209 # - access the PGD with EAR0[31:26]
210 # - DAMLR3 points to the virtual address of the page directory
211 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
213 srli gr29,#26,gr31 /* calculate PGE offset */
214 slli gr31,#8,gr31 /* and clear bottom bits */
217 ld @(gr31,gr30),gr30 /* access the PGE */
219 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
220 andicc gr30,#xAMPRx_SS,gr0,icc1
222 # map this PTD instead and record coverage address
223 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
224 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
226 bne icc1,#0,__itlb_k_bigpage
230 # we can now resume normal service
232 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
233 bra __itlb_k_PTD_mapped
239 .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
241 ###############################################################################
243 # Kernel data TLB miss handler
245 # GR1 - kernel stack pointer
246 # GR28 - saved exception frame pointer
247 # GR29 - faulting address
249 # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
250 # DAMR3 - mapped page directory
251 # DAMR5 - mapped page table as matched by SCR1
253 ###############################################################################
254 .globl __entry_kernel_data_tlb_miss
255 .type __entry_kernel_data_tlb_miss,@function
256 __entry_kernel_data_tlb_miss:
258 movsg ccr,gr30 /* save CCR */
261 # see if the cached page table mapping is appropriate
262 srlicc.p gr31,#26,gr0,icc0
264 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
265 bne icc0,#0,__dtlb_k_PTD_miss
268 # access the PTD with EAR0[25:14]
269 # - DAMLR5 points to the virtual address of the appropriate page table
270 # - the PTD holds 4096 PTEs
271 # - the PTD must be accessed uncached
272 # - the PTE must be marked accessed if it was valid
277 ldi @(gr31,#0),gr30 /* fetch the PTE */
278 andicc gr30,#_PAGE_PRESENT,gr0,icc0
279 ori.p gr30,#_PAGE_ACCESSED,gr30
280 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
281 sti.p gr30,@(gr31,#0) /* update the PTE */
282 andi gr30,#~_PAGE_ACCESSED,gr30
284 # we're using DAMR1 as an extra TLB entry
285 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
286 # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
288 andicc gr31,#xAMPRx_V,gr0,icc0
289 setlos.p 0xfffff000,gr31
290 beq icc0,#0,__dtlb_k_nopunt /* punt not required */
293 movgs gr31,tplr /* set TPLR.CXN */
294 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
297 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
299 movsg damlr1,gr31 /* set TPLR.CXN */
301 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
302 movsg tpxr,gr31 /* check the TLB write error flag */
303 andicc.p gr31,#TPXR_E,gr0,icc0
304 setlos #0xfffff000,gr31
305 bne icc0,#0,__tlb_kernel_fault
309 # assemble the new TLB entry
313 movgs gr29,iamlr1 /* xAMLR = address | context number */
318 # return, restoring registers
321 sethi.p %hi(__kernel_current_task),gr29
322 setlo %lo(__kernel_current_task),gr29
325 beq icc0,#3,0 /* prevent icache prefetch */
327 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
328 # appropriate page table and map that instead
329 # - access the PGD with EAR0[31:26]
330 # - DAMLR3 points to the virtual address of the page directory
331 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
333 srli gr29,#26,gr31 /* calculate PGE offset */
334 slli gr31,#8,gr31 /* and clear bottom bits */
337 ld @(gr31,gr30),gr30 /* access the PGE */
339 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
340 andicc gr30,#xAMPRx_SS,gr0,icc1
342 # map this PTD instead and record coverage address
343 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
344 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
346 bne icc1,#0,__dtlb_k_bigpage
350 # we can now resume normal service
352 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
353 bra __dtlb_k_PTD_mapped
359 .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
361 ###############################################################################
363 # Userspace instruction TLB miss handler (with PGE prediction)
365 # GR28 - faulting address
367 # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
368 # DAMR3 - mapped page directory
369 # DAMR4 - mapped page table as matched by SCR0
371 ###############################################################################
372 .globl __entry_user_insn_tlb_miss
373 .type __entry_user_insn_tlb_miss,@function
374 __entry_user_insn_tlb_miss:
376 movsg ccr,gr30 /* save CCR */
379 # see if the cached page table mapping is appropriate
380 srlicc.p gr31,#26,gr0,icc0
382 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
383 bne icc0,#0,__itlb_u_PTD_miss
386 # access the PTD with EAR0[25:14]
387 # - DAMLR4 points to the virtual address of the appropriate page table
388 # - the PTD holds 4096 PTEs
389 # - the PTD must be accessed uncached
390 # - the PTE must be marked accessed if it was valid
395 ldi @(gr31,#0),gr30 /* fetch the PTE */
396 andicc gr30,#_PAGE_PRESENT,gr0,icc0
397 ori.p gr30,#_PAGE_ACCESSED,gr30
398 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
399 sti.p gr30,@(gr31,#0) /* update the PTE */
400 andi gr30,#~_PAGE_ACCESSED,gr30
402 # we're using IAMR1/DAMR1 as an extra TLB entry
403 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
405 andicc gr31,#xAMPRx_V,gr0,icc0
406 setlos.p 0xfffff000,gr31
407 beq icc0,#0,__itlb_u_nopunt /* punt not required */
411 movsg damlr1,gr31 /* set TPLR.CXN */
413 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
414 movsg tpxr,gr31 /* check the TLB write error flag */
415 andicc.p gr31,#TPXR_E,gr0,icc0
416 setlos #0xfffff000,gr31
417 bne icc0,#0,__tlb_user_fault
421 # assemble the new TLB entry
425 movgs gr28,iamlr1 /* xAMLR = address | context number */
430 # return, restoring registers
434 beq icc0,#3,0 /* prevent icache prefetch */
436 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
437 # appropriate page table and map that instead
438 # - access the PGD with EAR0[31:26]
439 # - DAMLR3 points to the virtual address of the page directory
440 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
442 srli gr28,#26,gr31 /* calculate PGE offset */
443 slli gr31,#8,gr31 /* and clear bottom bits */
446 ld @(gr31,gr30),gr30 /* access the PGE */
448 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
449 andicc gr30,#xAMPRx_SS,gr0,icc1
451 # map this PTD instead and record coverage address
452 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
453 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
455 bne icc1,#0,__itlb_u_bigpage
459 # we can now resume normal service
461 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
462 bra __itlb_u_PTD_mapped
468 .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
470 ###############################################################################
472 # Userspace data TLB miss handler
474 # GR28 - faulting address
476 # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
477 # DAMR3 - mapped page directory
478 # DAMR5 - mapped page table as matched by SCR1
480 ###############################################################################
481 .globl __entry_user_data_tlb_miss
482 .type __entry_user_data_tlb_miss,@function
483 __entry_user_data_tlb_miss:
485 movsg ccr,gr30 /* save CCR */
488 # see if the cached page table mapping is appropriate
489 srlicc.p gr31,#26,gr0,icc0
491 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
492 bne icc0,#0,__dtlb_u_PTD_miss
495 # access the PTD with EAR0[25:14]
496 # - DAMLR5 points to the virtual address of the appropriate page table
497 # - the PTD holds 4096 PTEs
498 # - the PTD must be accessed uncached
499 # - the PTE must be marked accessed if it was valid
506 ldi @(gr31,#0),gr30 /* fetch the PTE */
507 andicc gr30,#_PAGE_PRESENT,gr0,icc0
508 ori.p gr30,#_PAGE_ACCESSED,gr30
509 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
510 sti.p gr30,@(gr31,#0) /* update the PTE */
511 andi gr30,#~_PAGE_ACCESSED,gr30
513 # we're using DAMR1 as an extra TLB entry
514 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
516 andicc gr31,#xAMPRx_V,gr0,icc0
517 setlos.p 0xfffff000,gr31
518 beq icc0,#0,__dtlb_u_nopunt /* punt not required */
522 movsg damlr1,gr31 /* set TPLR.CXN */
524 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
525 movsg tpxr,gr31 /* check the TLB write error flag */
526 andicc.p gr31,#TPXR_E,gr0,icc0
527 setlos #0xfffff000,gr31
528 bne icc0,#0,__tlb_user_fault
532 # assemble the new TLB entry
536 movgs gr28,iamlr1 /* xAMLR = address | context number */
541 # return, restoring registers
545 beq icc0,#3,0 /* prevent icache prefetch */
547 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
548 # appropriate page table and map that instead
549 # - first of all, check the insn PGE cache - we may well get a hit there
550 # - access the PGD with EAR0[31:26]
551 # - DAMLR3 points to the virtual address of the page directory
552 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
554 movsg scr0,gr31 /* consult the insn-PGE-cache key */
556 srlicc gr31,#26,gr0,icc0
557 srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
558 bne icc0,#0,__dtlb_u_iPGE_miss
560 # what we're looking for is covered by the insn-PGE-cache
564 bra __dtlb_u_using_iPTD
567 srli gr28,#26,gr31 /* calculate PGE offset */
568 slli gr31,#8,gr31 /* and clear bottom bits */
571 ld @(gr31,gr30),gr30 /* access the PGE */
573 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
574 andicc gr30,#xAMPRx_SS,gr0,icc1
576 # map this PTD instead and record coverage address
577 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
578 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
580 bne icc1,#0,__dtlb_u_bigpage
584 # we can now resume normal service
586 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
587 bra __dtlb_u_PTD_mapped
593 .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss