4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
13 * Chris Zankel <chris@zankel.net>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/system.h>
22 #include <asm/cacheflush.h>
25 static inline void __flush_itlb_all (void)
29 for (w
= 0; w
< ITLB_ARF_WAYS
; w
++) {
30 for (i
= 0; i
< (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2
); i
++) {
31 int e
= w
+ (i
<< PAGE_SHIFT
);
32 invalidate_itlb_entry_no_isync(e
);
35 asm volatile ("isync\n");
38 static inline void __flush_dtlb_all (void)
42 for (w
= 0; w
< DTLB_ARF_WAYS
; w
++) {
43 for (i
= 0; i
< (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2
); i
++) {
44 int e
= w
+ (i
<< PAGE_SHIFT
);
45 invalidate_dtlb_entry_no_isync(e
);
48 asm volatile ("isync\n");
52 void flush_tlb_all (void)
58 /* If mm is current, we simply assign the current task a new ASID, thus,
59 * invalidating all previous tlb entries. If mm is someone else's user mapping,
60 * wie invalidate the context, thus, when that user mapping is swapped in,
61 * a new context will be assigned to it.
64 void flush_tlb_mm(struct mm_struct
*mm
)
66 if (mm
== current
->active_mm
) {
68 local_save_flags(flags
);
69 __get_new_mmu_context(mm
);
70 __load_mmu_context(mm
);
71 local_irq_restore(flags
);
77 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
78 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
79 #if _ITLB_ENTRIES > _DTLB_ENTRIES
80 # define _TLB_ENTRIES _ITLB_ENTRIES
82 # define _TLB_ENTRIES _DTLB_ENTRIES
85 void flush_tlb_range (struct vm_area_struct
*vma
,
86 unsigned long start
, unsigned long end
)
88 struct mm_struct
*mm
= vma
->vm_mm
;
91 if (mm
->context
== NO_CONTEXT
)
95 printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
96 (unsigned long)mm
->context
, start
, end
);
98 local_save_flags(flags
);
100 if (end
-start
+ (PAGE_SIZE
-1) <= _TLB_ENTRIES
<< PAGE_SHIFT
) {
101 int oldpid
= get_rasid_register();
102 set_rasid_register (ASID_INSERT(mm
->context
));
104 if (vma
->vm_flags
& VM_EXEC
)
106 invalidate_itlb_mapping(start
);
107 invalidate_dtlb_mapping(start
);
112 invalidate_dtlb_mapping(start
);
116 set_rasid_register(oldpid
);
120 local_irq_restore(flags
);
123 void flush_tlb_page (struct vm_area_struct
*vma
, unsigned long page
)
125 struct mm_struct
* mm
= vma
->vm_mm
;
129 if(mm
->context
== NO_CONTEXT
)
132 local_save_flags(flags
);
134 oldpid
= get_rasid_register();
136 if (vma
->vm_flags
& VM_EXEC
)
137 invalidate_itlb_mapping(page
);
138 invalidate_dtlb_mapping(page
);
140 set_rasid_register(oldpid
);
142 local_irq_restore(flags
);