1 /* include/asm-generic/tlb.h
3 * Generic TLB shootdown code
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 #ifndef _ASM_GENERIC__TLB_H
14 #define _ASM_GENERIC__TLB_H
16 #include <linux/swap.h>
17 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
21 * For UP we don't need to worry about TLB flush
22 * and page free order so much..
25 #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
27 #define tlb_fast_mode(tlb) 1
30 /* struct mmu_gather is an opaque type used by the mm code for passing around
31 * any data needed by arch specific code for tlb_remove_page.
35 unsigned int nr
; /* set to ~0U means fast mode */
36 unsigned int max
; /* nr < max */
37 unsigned int need_flush
;/* Really unmapped some ptes? */
38 unsigned int fullmm
; /* non-zero means full mm flush */
39 #ifdef HAVE_ARCH_MMU_GATHER
40 struct arch_mmu_gather arch
;
43 struct page
* local
[8];
46 static inline void __tlb_alloc_pages(struct mmu_gather
*tlb
)
48 unsigned long addr
= __get_free_pages(GFP_ATOMIC
, 0);
51 tlb
->pages
= (void *)addr
;
52 tlb
->max
= PAGE_SIZE
/ sizeof(struct page
*);
57 * Return a pointer to an initialized struct mmu_gather.
60 tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
, unsigned int full_mm_flush
)
64 tlb
->max
= ARRAY_SIZE(tlb
->local
);
65 tlb
->pages
= tlb
->local
;
67 if (num_online_cpus() > 1) {
69 __tlb_alloc_pages(tlb
);
70 } else /* Use fast mode if only one CPU is online */
73 tlb
->fullmm
= full_mm_flush
;
75 #ifdef HAVE_ARCH_MMU_GATHER
76 tlb
->arch
= ARCH_MMU_GATHER_INIT
;
81 tlb_flush_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
87 if (!tlb_fast_mode(tlb
)) {
88 free_pages_and_swap_cache(tlb
->pages
, tlb
->nr
);
90 if (tlb
->pages
== tlb
->local
)
91 __tlb_alloc_pages(tlb
);
96 * Called at the end of the shootdown operation to free up any resources
100 tlb_finish_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
102 tlb_flush_mmu(tlb
, start
, end
);
104 /* keep the page table cache within bounds */
107 if (tlb
->pages
!= tlb
->local
)
108 free_pages((unsigned long)tlb
->pages
, 0);
112 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
113 * handling the additional races in SMP caused by other CPUs caching valid
114 * mappings in their TLBs.
116 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
119 if (tlb_fast_mode(tlb
)) {
120 free_page_and_swap_cache(page
);
123 tlb
->pages
[tlb
->nr
++] = page
;
124 if (tlb
->nr
>= tlb
->max
)
125 tlb_flush_mmu(tlb
, 0, 0);
129 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
131 * Record the fact that pte's were really umapped in ->need_flush, so we can
132 * later optimise away the tlb invalidate. This helps when userspace is
133 * unmapping already-unmapped pages, which happens quite a lot.
135 #define tlb_remove_tlb_entry(tlb, ptep, address) \
137 tlb->need_flush = 1; \
138 __tlb_remove_tlb_entry(tlb, ptep, address); \
141 #define pte_free_tlb(tlb, ptep) \
143 tlb->need_flush = 1; \
144 __pte_free_tlb(tlb, ptep); \
147 #ifndef __ARCH_HAS_4LEVEL_HACK
148 #define pud_free_tlb(tlb, pudp) \
150 tlb->need_flush = 1; \
151 __pud_free_tlb(tlb, pudp); \
155 #define pmd_free_tlb(tlb, pmdp) \
157 tlb->need_flush = 1; \
158 __pmd_free_tlb(tlb, pmdp); \
161 #define tlb_migrate_finish(mm) do {} while (0)
163 #endif /* _ASM_GENERIC__TLB_H */