4 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
5 * Unfortunately, that doesn't apply to PA-RISC. */
7 #include <asm/processor.h>
8 #include <asm/fixmap.h>
9 #include <linux/threads.h>
11 #include <asm/pgtable.h>
12 #include <asm/cache.h>
15 /* Internal use D/I cache flushing routines... */
16 /* XXX: these functions must not access memory between f[di]ce instructions. */
18 static inline void __flush_dcache_range(unsigned long start
, unsigned long size
)
21 register unsigned long count
= (size
/ L1_CACHE_BYTES
);
22 register unsigned long loop
= cache_info
.dc_loop
;
23 register unsigned long i
, j
;
25 if (size
> 64 * 1024) {
26 /* Just punt and clear the whole damn thing */
31 for(i
= 0; i
<= count
; i
++, start
+= L1_CACHE_BYTES
)
32 for(j
= 0; j
< loop
; j
++)
40 static inline void __flush_icache_range(unsigned long start
, unsigned long size
)
43 register unsigned long count
= (size
/ L1_CACHE_BYTES
);
44 register unsigned long loop
= cache_info
.ic_loop
;
45 register unsigned long i
, j
;
47 if (size
> 64 * 1024) {
48 /* Just punt and clear the whole damn thing */
49 flush_instruction_cache();
53 for(i
= 0; i
<= count
; i
++, start
+= L1_CACHE_BYTES
)
54 for(j
= 0; j
< loop
; j
++)
57 flush_instruction_cache();
62 flush_kernel_dcache_range(unsigned long start
, unsigned long size
)
64 register unsigned long end
= start
+ size
;
65 register unsigned long i
;
67 start
&= ~(L1_CACHE_BYTES
- 1);
68 for (i
= start
; i
< end
; i
+= L1_CACHE_BYTES
) {
71 asm volatile("sync" : : );
72 asm volatile("syncdma" : : );
75 extern void __flush_page_to_ram(unsigned long address
);
77 #define flush_cache_all() flush_all_caches()
78 #define flush_cache_mm(foo) flush_all_caches()
81 /* This is how I think the cache flushing should be done -- mrw */
82 extern inline void flush_cache_mm(struct mm_struct
*mm
) {
83 if (mm
== current
->mm
) {
84 flush_user_dcache_range(mm
->start_data
, mm
->end_data
);
85 flush_user_icache_range(mm
->start_code
, mm
->end_code
);
87 flush_other_dcache_range(mm
->context
, mm
->start_data
, mm
->end_data
);
88 flush_other_icache_range(mm
->context
, mm
->start_code
, mm
->end_code
);
93 #define flush_cache_range(mm, start, end) do { \
94 __flush_dcache_range(start, (unsigned long)end - (unsigned long)start); \
95 __flush_icache_range(start, (unsigned long)end - (unsigned long)start); \
98 #define flush_cache_page(vma, vmaddr) do { \
99 __flush_dcache_range(vmaddr, PAGE_SIZE); \
100 __flush_icache_range(vmaddr, PAGE_SIZE); \
103 #define flush_page_to_ram(page) \
104 __flush_page_to_ram((unsigned long)page_address(page))
106 #define flush_icache_range(start, end) \
107 __flush_icache_range(start, end - start)
109 #define flush_icache_page(vma, page) \
110 __flush_icache_range(page_address(page), PAGE_SIZE)
112 #define flush_dcache_page(page) \
113 __flush_dcache_range(page_address(page), PAGE_SIZE)
115 /* TLB flushing routines.... */
117 extern void flush_data_tlb(void);
118 extern void flush_instruction_tlb(void);
120 #define flush_tlb() do { \
122 flush_instruction_tlb(); \
125 #define flush_tlb_all() flush_tlb() /* XXX p[id]tlb */
127 extern __inline__
void flush_tlb_pgtables(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
131 static inline void flush_instruction_tlb_range(unsigned long start
,
135 register unsigned long count
= (size
/ PAGE_SIZE
);
136 register unsigned long loop
= cache_info
.it_loop
;
137 register unsigned long i
, j
;
139 for(i
= 0; i
<= count
; i
++, start
+= PAGE_SIZE
)
140 for(j
= 0; j
< loop
; j
++)
143 flush_instruction_tlb();
147 static inline void flush_data_tlb_range(unsigned long start
,
151 register unsigned long count
= (size
/ PAGE_SIZE
);
152 register unsigned long loop
= cache_info
.dt_loop
;
153 register unsigned long i
, j
;
155 for(i
= 0; i
<= count
; i
++, start
+= PAGE_SIZE
)
156 for(j
= 0; j
< loop
; j
++)
165 static inline void __flush_tlb_range(unsigned long space
, unsigned long start
,
168 unsigned long old_sr1
;
176 flush_data_tlb_range(start
, size
);
177 flush_instruction_tlb_range(start
, size
);
182 extern void __flush_tlb_space(unsigned long space
);
184 static inline void flush_tlb_mm(struct mm_struct
*mm
)
187 __flush_tlb_space(mm
->context
);
193 static inline void flush_tlb_page(struct vm_area_struct
*vma
,
196 __flush_tlb_range(vma
->vm_mm
->context
, addr
, PAGE_SIZE
);
200 static inline void flush_tlb_range(struct mm_struct
*mm
,
201 unsigned long start
, unsigned long end
)
203 __flush_tlb_range(mm
->context
, start
, end
- start
);
207 * NOTE: Many of the below macros use PT_NLEVELS because
208 * it is convenient that PT_NLEVELS == LOG2(pte size in bytes),
209 * i.e. we use 3 level page tables when we use 8 byte pte's
210 * (for 64 bit) and 2 level page tables when we use 4 byte pte's
215 #define PT_INITIAL 4 /* Number of initial page tables */
218 #define PT_INITIAL 2 /* Number of initial page tables */
221 /* Definitions for 1st level */
223 #define PGDIR_SHIFT (PAGE_SHIFT + (PT_NLEVELS - 1)*(PAGE_SHIFT - PT_NLEVELS))
224 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
225 #define PGDIR_MASK (~(PGDIR_SIZE-1))
226 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT - PT_NLEVELS))
227 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
229 /* Definitions for 2nd level */
231 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PT_NLEVELS))
232 #define PMD_SIZE (1UL << PMD_SHIFT)
233 #define PMD_MASK (~(PMD_SIZE-1))
235 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT - PT_NLEVELS))
237 #define PTRS_PER_PMD 1
240 /* Definitions for 3rd level */
242 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT - PT_NLEVELS))
245 #define get_pgd_fast get_pgd_slow
246 #define free_pgd_fast free_pgd_slow
248 extern __inline__ pgd_t
*get_pgd_slow(void)
250 extern unsigned long gateway_pgd_offset
;
251 extern unsigned long gateway_pgd_entry
;
252 pgd_t
*ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
);
255 memset (ret
, 0, PTRS_PER_PGD
* sizeof(pgd_t
));
257 /* Install HP-UX and Linux gateway page translations */
259 pgd_val(*(ret
+ gateway_pgd_offset
)) = gateway_pgd_entry
;
264 extern __inline__
void free_pgd_slow(pgd_t
*pgd
)
266 free_page((unsigned long)pgd
);
271 /* Three Level Page Table Support for pmd's */
273 extern __inline__ pmd_t
*get_pmd_fast(void)
275 return NULL
; /* la la */
279 extern __inline__
void free_pmd_fast(pmd_t
*pmd
)
283 #define free_pmd_fast free_pmd_slow
286 extern __inline__ pmd_t
*get_pmd_slow(void)
288 pmd_t
*pmd
= (pmd_t
*) __get_free_page(GFP_KERNEL
);
295 extern __inline__
void free_pmd_slow(pmd_t
*pmd
)
297 free_page((unsigned long)pmd
);
300 extern void __bad_pgd(pgd_t
*pgd
);
302 extern inline pmd_t
* pmd_alloc(pgd_t
*pgd
, unsigned long address
)
304 address
= (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
310 return (pmd_t
*) pgd_page(*pgd
) + address
;
313 pmd_t
*page
= get_pmd_fast();
316 page
= get_pmd_slow();
318 if (pgd_none(*pgd
)) {
319 pgd_val(*pgd
) = _PAGE_TABLE
+ __pa((unsigned long)page
);
320 return page
+ address
;
336 /* Two Level Page Table Support for pmd's */
338 extern inline pmd_t
* pmd_alloc(pgd_t
* pgd
, unsigned long address
)
340 return (pmd_t
*) pgd
;
343 extern inline void free_pmd_fast(pmd_t
* pmd
)
349 extern __inline__ pte_t
*get_pte_fast(void)
351 return NULL
; /* la la */
355 extern __inline__
void free_pte_fast(pte_t
*pte
)
359 #define free_pte_fast free_pte_slow
362 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
364 extern __inline__
void free_pte_slow(pte_t
*pte
)
366 free_page((unsigned long)pte
);
369 #define pmd_alloc_kernel pmd_alloc
370 #define pte_alloc_kernel pte_alloc
372 #define pte_free(pte) free_pte_fast(pte)
373 #define pmd_free(pmd) free_pmd_fast(pmd)
374 #define pgd_free(pgd) free_pgd_fast(pgd)
375 #define pgd_alloc() get_pgd_fast()
377 extern void __bad_pmd(pmd_t
*pmd
);
379 extern inline pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
381 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
387 return (pte_t
*) pmd_page(*pmd
) + address
;
390 pte_t
*page
= get_pte_fast();
393 return get_pte_slow(pmd
, address
);
394 pmd_val(*pmd
) = _PAGE_TABLE
+ __pa((unsigned long)page
);
395 return page
+ address
;
402 extern int do_check_pgt_cache(int, int);