1 #ifndef _ASM_POWERPC_TLBFLUSH_H
2 #define _ASM_POWERPC_TLBFLUSH_H
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
10 * - flush_tlb_range(vma, start, end) flushes a range of pages
11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
22 * TLB flushing for software loaded TLB chips
24 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
25 * flush_tlb_kernel_range are best implemented as tlbia vs
31 extern void _tlbie(unsigned long address
, unsigned int pid
);
33 #if defined(CONFIG_40x) || defined(CONFIG_8xx)
34 #define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
35 #else /* CONFIG_44x || CONFIG_FSL_BOOKE */
36 extern void _tlbia(void);
39 static inline void flush_tlb_mm(struct mm_struct
*mm
)
44 static inline void flush_tlb_page(struct vm_area_struct
*vma
,
47 _tlbie(vmaddr
, vma
? vma
->vm_mm
->context
.id
: 0);
50 static inline void flush_tlb_page_nohash(struct vm_area_struct
*vma
,
53 _tlbie(vmaddr
, vma
? vma
->vm_mm
->context
.id
: 0);
56 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
57 unsigned long start
, unsigned long end
)
62 static inline void flush_tlb_kernel_range(unsigned long start
,
68 #elif defined(CONFIG_PPC32)
70 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
72 extern void _tlbie(unsigned long address
);
73 extern void _tlbia(void);
75 extern void flush_tlb_mm(struct mm_struct
*mm
);
76 extern void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
77 extern void flush_tlb_page_nohash(struct vm_area_struct
*vma
, unsigned long addr
);
78 extern void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
80 extern void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
84 * TLB flushing for 64-bit has-MMU CPUs
87 #include <linux/percpu.h>
90 #define PPC64_TLB_BATCH_NR 192
92 struct ppc64_tlb_batch
{
96 real_pte_t pte
[PPC64_TLB_BATCH_NR
];
97 unsigned long vaddr
[PPC64_TLB_BATCH_NR
];
101 DECLARE_PER_CPU(struct ppc64_tlb_batch
, ppc64_tlb_batch
);
103 extern void __flush_tlb_pending(struct ppc64_tlb_batch
*batch
);
105 extern void hpte_need_flush(struct mm_struct
*mm
, unsigned long addr
,
106 pte_t
*ptep
, unsigned long pte
, int huge
);
108 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
110 static inline void arch_enter_lazy_mmu_mode(void)
112 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
117 static inline void arch_leave_lazy_mmu_mode(void)
119 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
122 __flush_tlb_pending(batch
);
126 #define arch_flush_lazy_mmu_mode() do {} while (0)
129 extern void flush_hash_page(unsigned long va
, real_pte_t pte
, int psize
,
130 int ssize
, int local
);
131 extern void flush_hash_range(unsigned long number
, int local
);
134 static inline void flush_tlb_mm(struct mm_struct
*mm
)
138 static inline void flush_tlb_page(struct vm_area_struct
*vma
,
139 unsigned long vmaddr
)
143 static inline void flush_tlb_page_nohash(struct vm_area_struct
*vma
,
144 unsigned long vmaddr
)
148 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
149 unsigned long start
, unsigned long end
)
153 static inline void flush_tlb_kernel_range(unsigned long start
,
158 /* Private function for use by PCI IO mapping code */
159 extern void __flush_hash_table_range(struct mm_struct
*mm
, unsigned long start
,
166 * This gets called at the end of handling a page fault, when
167 * the kernel has put a new PTE into the page table for the process.
168 * We use it to ensure coherency between the i-cache and d-cache
169 * for the page which has just been mapped in.
170 * On machines which use an MMU hash table, we use this to put a
171 * corresponding HPTE into the hash table ahead of time, instead of
172 * waiting for the inevitable extra hash-table miss exception.
174 extern void update_mmu_cache(struct vm_area_struct
*, unsigned long, pte_t
);
176 #endif /*__KERNEL__ */
177 #endif /* _ASM_POWERPC_TLBFLUSH_H */