2 * Copyright (C) 2006 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <arch/mm/tlb.h>
38 #include <arch/mm/page.h>
39 #include <arch/mm/vhpt.h>
40 #include <arch/barrier.h>
41 #include <arch/interrupt.h>
42 #include <arch/pal/pal.h>
48 #include <interrupt.h>
50 /** Invalidate all TLB entries. */
51 void tlb_invalidate_all(void)
55 __u32 count1
, count2
, stride1
, stride2
;
59 adr
= PAL_PTCE_INFO_BASE();
60 count1
= PAL_PTCE_INFO_COUNT1();
61 count2
= PAL_PTCE_INFO_COUNT2();
62 stride1
= PAL_PTCE_INFO_STRIDE1();
63 stride2
= PAL_PTCE_INFO_STRIDE2();
65 ipl
= interrupts_disable();
67 for(i
= 0; i
< count1
; i
++) {
68 for(j
= 0; j
< count2
; j
++) {
79 interrupts_restore(ipl
);
84 vhpt_invalidate_all();
88 /** Invalidate entries belonging to an address space.
90 * @param asid Address space identifier.
92 void tlb_invalidate_asid(asid_t asid
)
98 void tlb_invalidate_pages(asid_t asid
, __address page
, count_t cnt
)
101 bool restore_rr
= false;
108 rr
.word
= rr_read(VA2VRN(va
));
109 if ((restore_rr
= (rr
.map
.rid
!= ASID2RID(asid
, VA2VRN(va
))))) {
111 * The selected region register does not contain required RID.
112 * Save the old content of the register and replace the RID.
117 rr0
.map
.rid
= ASID2RID(asid
, VA2VRN(va
));
118 rr_write(VA2VRN(va
), rr0
.word
);
133 /*cnt=((cnt-1)/4)+1;*/
137 case 2: /*cnt 16-63*/
138 /*cnt=((cnt-1)/16)+1;*/
142 case 3: /*cnt 64-255*/
143 /*cnt=((cnt-1)/64)+1;*/
147 case 4: /*cnt 256-1023*/
148 /*cnt=((cnt-1)/256)+1;*/
152 case 5: /*cnt 1024-4095*/
153 /*cnt=((cnt-1)/1024)+1;*/
157 case 6: /*cnt 4096-16383*/
158 /*cnt=((cnt-1)/4096)+1;*/
162 case 7: /*cnt 16384-65535*/
163 case 8: /*cnt 65536-(256K-1)*/
164 /*cnt=((cnt-1)/16384)+1;*/
169 /*cnt=((cnt-1)/(16384*16))+1;*/
175 for(; va
<(page
+cnt
*(PAGE_SIZE
)); va
+= (1<<ps
)) {
179 : "r" (va
), "r" (ps
<<2)
186 rr_write(VA2VRN(va
), rr
.word
);
192 /** Insert data into data translation cache.
194 * @param va Virtual page address.
195 * @param asid Address space identifier.
196 * @param entry The rest of TLB entry as required by TLB insertion format.
198 void dtc_mapping_insert(__address va
, asid_t asid
, tlb_entry_t entry
)
200 tc_mapping_insert(va
, asid
, entry
, true);
203 /** Insert data into instruction translation cache.
205 * @param va Virtual page address.
206 * @param asid Address space identifier.
207 * @param entry The rest of TLB entry as required by TLB insertion format.
209 void itc_mapping_insert(__address va
, asid_t asid
, tlb_entry_t entry
)
211 tc_mapping_insert(va
, asid
, entry
, false);
214 /** Insert data into instruction or data translation cache.
216 * @param va Virtual page address.
217 * @param asid Address space identifier.
218 * @param entry The rest of TLB entry as required by TLB insertion format.
219 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
221 void tc_mapping_insert(__address va
, asid_t asid
, tlb_entry_t entry
, bool dtc
)
224 bool restore_rr
= false;
226 rr
.word
= rr_read(VA2VRN(va
));
227 if ((restore_rr
= (rr
.map
.rid
!= ASID2RID(asid
, VA2VRN(va
))))) {
229 * The selected region register does not contain required RID.
230 * Save the old content of the register and replace the RID.
235 rr0
.map
.rid
= ASID2RID(asid
, VA2VRN(va
));
236 rr_write(VA2VRN(va
), rr0
.word
);
243 "rsm %0;;\n" /* PSR_IC_MASK */
246 "mov cr.ifa=%1\n" /* va */
247 "mov cr.itir=%2;;\n" /* entry.word[1] */
248 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
254 : "i" (PSR_IC_MASK
), "r" (va
), "r" (entry
.word
[1]), "r" (entry
.word
[0]), "r" (dtc
)
259 rr_write(VA2VRN(va
), rr
.word
);
265 /** Insert data into instruction translation register.
267 * @param va Virtual page address.
268 * @param asid Address space identifier.
269 * @param entry The rest of TLB entry as required by TLB insertion format.
270 * @param tr Translation register.
272 void itr_mapping_insert(__address va
, asid_t asid
, tlb_entry_t entry
, index_t tr
)
274 tr_mapping_insert(va
, asid
, entry
, false, tr
);
277 /** Insert data into data translation register.
279 * @param va Virtual page address.
280 * @param asid Address space identifier.
281 * @param entry The rest of TLB entry as required by TLB insertion format.
282 * @param tr Translation register.
284 void dtr_mapping_insert(__address va
, asid_t asid
, tlb_entry_t entry
, index_t tr
)
286 tr_mapping_insert(va
, asid
, entry
, true, tr
);
289 /** Insert data into instruction or data translation register.
291 * @param va Virtual page address.
292 * @param asid Address space identifier.
293 * @param entry The rest of TLB entry as required by TLB insertion format.
294 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
295 * @param tr Translation register.
297 void tr_mapping_insert(__address va
, asid_t asid
, tlb_entry_t entry
, bool dtr
, index_t tr
)
300 bool restore_rr
= false;
302 rr
.word
= rr_read(VA2VRN(va
));
303 if ((restore_rr
= (rr
.map
.rid
!= ASID2RID(asid
, VA2VRN(va
))))) {
305 * The selected region register does not contain required RID.
306 * Save the old content of the register and replace the RID.
311 rr0
.map
.rid
= ASID2RID(asid
, VA2VRN(va
));
312 rr_write(VA2VRN(va
), rr0
.word
);
319 "rsm %0;;\n" /* PSR_IC_MASK */
322 "mov cr.ifa=%1\n" /* va */
323 "mov cr.itir=%2;;\n" /* entry.word[1] */
324 "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
325 "(p6) itr.i itr[%4]=%3;;\n"
326 "(p7) itr.d dtr[%4]=%3;;\n"
330 : "i" (PSR_IC_MASK
), "r" (va
), "r" (entry
.word
[1]), "r" (entry
.word
[0]), "r" (tr
), "r" (dtr
)
335 rr_write(VA2VRN(va
), rr
.word
);
341 /** Insert data into DTLB.
343 * @param page Virtual page address including VRN bits.
344 * @param frame Physical frame address.
345 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
346 * @param tr Translation register if dtr is true, ignored otherwise.
348 void dtlb_kernel_mapping_insert(__address page
, __address frame
, bool dtr
, index_t tr
)
355 entry
.p
= true; /* present */
356 entry
.ma
= MA_WRITEBACK
;
357 entry
.a
= true; /* already accessed */
358 entry
.d
= true; /* already dirty */
359 entry
.pl
= PL_KERNEL
;
360 entry
.ar
= AR_READ
| AR_WRITE
;
361 entry
.ppn
= frame
>> PPN_SHIFT
;
362 entry
.ps
= PAGE_WIDTH
;
365 dtr_mapping_insert(page
, ASID_KERNEL
, entry
, tr
);
367 dtc_mapping_insert(page
, ASID_KERNEL
, entry
);
370 /** Purge kernel entries from DTR.
372 * Purge DTR entries used by the kernel.
374 * @param page Virtual page address including VRN bits.
375 * @param width Width of the purge in bits.
377 void dtr_purge(__address page
, count_t width
)
379 __asm__
volatile ("ptr.d %0, %1\n" : : "r" (page
), "r" (width
<<2));
383 /** Copy content of PTE into data translation cache.
387 void dtc_pte_copy(pte_t
*t
)
395 entry
.ma
= t
->c
? MA_WRITEBACK
: MA_UNCACHEABLE
;
398 entry
.pl
= t
->k
? PL_KERNEL
: PL_USER
;
399 entry
.ar
= t
->w
? AR_WRITE
: AR_READ
;
400 entry
.ppn
= t
->frame
>> PPN_SHIFT
;
401 entry
.ps
= PAGE_WIDTH
;
403 dtc_mapping_insert(t
->page
, t
->as
->asid
, entry
);
405 vhpt_mapping_insert(t
->page
, t
->as
->asid
, entry
);
409 /** Copy content of PTE into instruction translation cache.
413 void itc_pte_copy(pte_t
*t
)
423 entry
.ma
= t
->c
? MA_WRITEBACK
: MA_UNCACHEABLE
;
425 entry
.pl
= t
->k
? PL_KERNEL
: PL_USER
;
426 entry
.ar
= t
->x
? (AR_EXECUTE
| AR_READ
) : AR_READ
;
427 entry
.ppn
= t
->frame
>> PPN_SHIFT
;
428 entry
.ps
= PAGE_WIDTH
;
430 itc_mapping_insert(t
->page
, t
->as
->asid
, entry
);
432 vhpt_mapping_insert(t
->page
, t
->as
->asid
, entry
);
436 /** Instruction TLB fault handler for faults with VHPT turned off.
438 * @param vector Interruption vector.
439 * @param istate Structure with saved interruption state.
441 void alternate_instruction_tlb_fault(__u64 vector
, istate_t
*istate
)
448 va
= istate
->cr_ifa
; /* faulting address */
449 rr
.word
= rr_read(VA2VRN(va
));
452 page_table_lock(AS
, true);
453 t
= page_mapping_find(AS
, va
);
456 * The mapping was found in software page hash table.
457 * Insert it into data translation cache.
460 page_table_unlock(AS
, true);
463 * Forward the page fault to address space page fault handler.
465 page_table_unlock(AS
, true);
466 if (as_page_fault(va
, PF_ACCESS_EXEC
, istate
) == AS_PF_FAULT
) {
467 fault_if_from_uspace(istate
,"Page fault at %P",va
);
468 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__
, va
, rid
, istate
->cr_iip
);
473 /** Data TLB fault handler for faults with VHPT turned off.
475 * @param vector Interruption vector.
476 * @param istate Structure with saved interruption state.
478 void alternate_data_tlb_fault(__u64 vector
, istate_t
*istate
)
485 va
= istate
->cr_ifa
; /* faulting address */
486 rr
.word
= rr_read(VA2VRN(va
));
488 if (RID2ASID(rid
) == ASID_KERNEL
) {
489 if (VA2VRN(va
) == VRN_KERNEL
) {
491 * Provide KA2PA(identity) mapping for faulting piece of
492 * kernel address space.
494 dtlb_kernel_mapping_insert(va
, KA2PA(va
), false, 0);
499 page_table_lock(AS
, true);
500 t
= page_mapping_find(AS
, va
);
503 * The mapping was found in software page hash table.
504 * Insert it into data translation cache.
507 page_table_unlock(AS
, true);
510 * Forward the page fault to address space page fault handler.
512 page_table_unlock(AS
, true);
513 if (as_page_fault(va
, PF_ACCESS_READ
, istate
) == AS_PF_FAULT
) {
514 fault_if_from_uspace(istate
,"Page fault at %P",va
);
515 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__
, va
, rid
, istate
->cr_iip
);
520 /** Data nested TLB fault handler.
522 * This fault should not occur.
524 * @param vector Interruption vector.
525 * @param istate Structure with saved interruption state.
527 void data_nested_tlb_fault(__u64 vector
, istate_t
*istate
)
529 panic("%s\n", __FUNCTION__
);
532 /** Data Dirty bit fault handler.
534 * @param vector Interruption vector.
535 * @param istate Structure with saved interruption state.
537 void data_dirty_bit_fault(__u64 vector
, istate_t
*istate
)
544 va
= istate
->cr_ifa
; /* faulting address */
545 rr
.word
= rr_read(VA2VRN(va
));
548 page_table_lock(AS
, true);
549 t
= page_mapping_find(AS
, va
);
551 if (t
&& t
->p
&& t
->w
) {
553 * Update the Dirty bit in page tables and reinsert
554 * the mapping into DTC.
559 if (as_page_fault(va
, PF_ACCESS_WRITE
, istate
) == AS_PF_FAULT
) {
560 fault_if_from_uspace(istate
,"Page fault at %P",va
);
561 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__
, va
, rid
, istate
->cr_iip
);
566 page_table_unlock(AS
, true);
569 /** Instruction access bit fault handler.
571 * @param vector Interruption vector.
572 * @param istate Structure with saved interruption state.
574 void instruction_access_bit_fault(__u64 vector
, istate_t
*istate
)
581 va
= istate
->cr_ifa
; /* faulting address */
582 rr
.word
= rr_read(VA2VRN(va
));
585 page_table_lock(AS
, true);
586 t
= page_mapping_find(AS
, va
);
588 if (t
&& t
->p
&& t
->x
) {
590 * Update the Accessed bit in page tables and reinsert
591 * the mapping into ITC.
596 if (as_page_fault(va
, PF_ACCESS_EXEC
, istate
) == AS_PF_FAULT
) {
597 fault_if_from_uspace(istate
,"Page fault at %P",va
);
598 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__
, va
, rid
, istate
->cr_iip
);
603 page_table_unlock(AS
, true);
606 /** Data access bit fault handler.
608 * @param vector Interruption vector.
609 * @param istate Structure with saved interruption state.
611 void data_access_bit_fault(__u64 vector
, istate_t
*istate
)
618 va
= istate
->cr_ifa
; /* faulting address */
619 rr
.word
= rr_read(VA2VRN(va
));
622 page_table_lock(AS
, true);
623 t
= page_mapping_find(AS
, va
);
627 * Update the Accessed bit in page tables and reinsert
628 * the mapping into DTC.
633 if (as_page_fault(va
, PF_ACCESS_READ
, istate
) == AS_PF_FAULT
) {
634 fault_if_from_uspace(istate
,"Page fault at %P",va
);
635 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__
, va
, rid
, istate
->cr_iip
);
640 page_table_unlock(AS
, true);
643 /** Page not present fault handler.
645 * @param vector Interruption vector.
646 * @param istate Structure with saved interruption state.
648 void page_not_present(__u64 vector
, istate_t
*istate
)
655 va
= istate
->cr_ifa
; /* faulting address */
656 rr
.word
= rr_read(VA2VRN(va
));
659 page_table_lock(AS
, true);
660 t
= page_mapping_find(AS
, va
);
665 * If the Present bit is set in page hash table, just copy it
666 * and update ITC/DTC.
672 page_table_unlock(AS
, true);
674 page_table_unlock(AS
, true);
675 if (as_page_fault(va
, PF_ACCESS_READ
, istate
) == AS_PF_FAULT
) {
676 fault_if_from_uspace(istate
,"Page fault at %P",va
);
677 panic("%s: va=%p, rid=%d\n", __FUNCTION__
, va
, rid
);