4 #include <linux/swap.h>
5 #include <asm/percpu.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
9 #define tlb_start_vma(tlb, vma) do { } while (0)
10 #define tlb_end_vma(tlb, vma) do { } while (0)
11 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
13 /* struct mmu_gather is an opaque type used by the mm code for passing around
14 * any data needed by arch specific code for tlb_remove_page.
18 unsigned int need_flush
; /* Really unmapped some ptes? */
21 unsigned int fullmm
; /* non-zero means full mm flush */
24 /* Users of the generic TLB shootdown code must declare this storage space. */
25 DECLARE_PER_CPU(struct mmu_gather
, mmu_gathers
);
27 static inline void __tlb_remove_tlb_entry(struct mmu_gather
*tlb
, pte_t
*ptep
,
28 unsigned long address
)
30 if (tlb
->start
> address
)
32 if (tlb
->end
< address
+ PAGE_SIZE
)
33 tlb
->end
= address
+ PAGE_SIZE
;
36 static inline void init_tlb_gather(struct mmu_gather
*tlb
)
40 tlb
->start
= TASK_SIZE
;
50 * Return a pointer to an initialized struct mmu_gather.
52 static inline struct mmu_gather
*
53 tlb_gather_mmu(struct mm_struct
*mm
, unsigned int full_mm_flush
)
55 struct mmu_gather
*tlb
= &get_cpu_var(mmu_gathers
);
58 tlb
->fullmm
= full_mm_flush
;
65 extern void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
69 tlb_flush_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
74 flush_tlb_mm_range(tlb
->mm
, tlb
->start
, tlb
->end
);
79 * Called at the end of the shootdown operation to free up any resources
83 tlb_finish_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
85 tlb_flush_mmu(tlb
, start
, end
);
87 /* keep the page table cache within bounds */
90 put_cpu_var(mmu_gathers
);
94 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
95 * while handling the additional races in SMP caused by other CPUs
96 * caching valid mappings in their TLBs.
98 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
101 free_page_and_swap_cache(page
);
106 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
108 * Record the fact that pte's were really umapped in ->need_flush, so we can
109 * later optimise away the tlb invalidate. This helps when userspace is
110 * unmapping already-unmapped pages, which happens quite a lot.
112 #define tlb_remove_tlb_entry(tlb, ptep, address) \
114 tlb->need_flush = 1; \
115 __tlb_remove_tlb_entry(tlb, ptep, address); \
118 #define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
120 #define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp)
122 #define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp)
124 #define tlb_migrate_finish(mm) do {} while (0)