1 #ifndef _M68K_TLBFLUSH_H
2 #define _M68K_TLBFLUSH_H
7 #include <asm/current.h>
9 static inline void flush_tlb_kernel_page(void *addr
)
11 if (CPU_IS_040_OR_060
) {
12 mm_segment_t old_fs
= get_fs();
14 __asm__
__volatile__(".chip 68040\n\t"
20 __asm__
__volatile__("pflush #4,#4,(%0)" : : "a" (addr
));
24 * flush all user-space atc entries.
26 static inline void __flush_tlb(void)
28 if (CPU_IS_040_OR_060
)
29 __asm__
__volatile__(".chip 68040\n\t"
33 __asm__
__volatile__("pflush #0,#4");
36 static inline void __flush_tlb040_one(unsigned long addr
)
38 __asm__
__volatile__(".chip 68040\n\t"
44 static inline void __flush_tlb_one(unsigned long addr
)
46 if (CPU_IS_040_OR_060
)
47 __flush_tlb040_one(addr
);
49 __asm__
__volatile__("pflush #0,#4,(%0)" : : "a" (addr
));
52 #define flush_tlb() __flush_tlb()
55 * flush all atc entries (both kernel and user-space entries).
57 static inline void flush_tlb_all(void)
59 if (CPU_IS_040_OR_060
)
60 __asm__
__volatile__(".chip 68040\n\t"
64 __asm__
__volatile__("pflusha");
67 static inline void flush_tlb_mm(struct mm_struct
*mm
)
69 if (mm
== current
->active_mm
)
73 static inline void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
75 if (vma
->vm_mm
== current
->active_mm
) {
76 mm_segment_t old_fs
= get_fs();
78 __flush_tlb_one(addr
);
83 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
84 unsigned long start
, unsigned long end
)
86 if (vma
->vm_mm
== current
->active_mm
)
90 static inline void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
99 extern char sun3_reserved_pmeg
[SUN3_PMEGS_NUM
];
100 extern unsigned long pmeg_vaddr
[SUN3_PMEGS_NUM
];
101 extern unsigned char pmeg_alloc
[SUN3_PMEGS_NUM
];
102 extern unsigned char pmeg_ctx
[SUN3_PMEGS_NUM
];
104 /* Flush all userspace mappings one by one... (why no flush command,
106 static inline void flush_tlb_all(void)
109 unsigned char ctx
, oldctx
;
111 oldctx
= sun3_get_context();
112 for(addr
= 0x00000000; addr
< TASK_SIZE
; addr
+= SUN3_PMEG_SIZE
) {
113 for(ctx
= 0; ctx
< 8; ctx
++) {
114 sun3_put_context(ctx
);
115 sun3_put_segmap(addr
, SUN3_INVALID_PMEG
);
119 sun3_put_context(oldctx
);
120 /* erase all of the userspace pmeg maps, we've clobbered them
122 for(addr
= 0; addr
< SUN3_INVALID_PMEG
; addr
++) {
123 if(pmeg_alloc
[addr
] == 1) {
124 pmeg_alloc
[addr
] = 0;
126 pmeg_vaddr
[addr
] = 0;
132 /* Clear user TLB entries within the context named in mm */
133 static inline void flush_tlb_mm (struct mm_struct
*mm
)
135 unsigned char oldctx
;
139 oldctx
= sun3_get_context();
140 sun3_put_context(mm
->context
);
142 for(i
= 0; i
< TASK_SIZE
; i
+= SUN3_PMEG_SIZE
) {
143 seg
= sun3_get_segmap(i
);
144 if(seg
== SUN3_INVALID_PMEG
)
147 sun3_put_segmap(i
, SUN3_INVALID_PMEG
);
153 sun3_put_context(oldctx
);
157 /* Flush a single TLB page. In this case, we're limited to flushing a
159 static inline void flush_tlb_page (struct vm_area_struct
*vma
,
162 unsigned char oldctx
;
165 oldctx
= sun3_get_context();
166 sun3_put_context(vma
->vm_mm
->context
);
167 addr
&= ~SUN3_PMEG_MASK
;
168 if((i
= sun3_get_segmap(addr
)) != SUN3_INVALID_PMEG
)
173 sun3_put_segmap (addr
, SUN3_INVALID_PMEG
);
175 sun3_put_context(oldctx
);
178 /* Flush a range of pages from TLB. */
180 static inline void flush_tlb_range (struct vm_area_struct
*vma
,
181 unsigned long start
, unsigned long end
)
183 struct mm_struct
*mm
= vma
->vm_mm
;
184 unsigned char seg
, oldctx
;
186 start
&= ~SUN3_PMEG_MASK
;
188 oldctx
= sun3_get_context();
189 sun3_put_context(mm
->context
);
193 if((seg
= sun3_get_segmap(start
)) == SUN3_INVALID_PMEG
)
195 if(pmeg_ctx
[seg
] == mm
->context
) {
200 sun3_put_segmap(start
, SUN3_INVALID_PMEG
);
202 start
+= SUN3_PMEG_SIZE
;
206 static inline void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
211 /* Flush kernel page from TLB. */
212 static inline void flush_tlb_kernel_page (unsigned long addr
)
214 sun3_put_segmap (addr
& ~(SUN3_PMEG_SIZE
- 1), SUN3_INVALID_PMEG
);
219 #endif /* _M68K_TLBFLUSH_H */