Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / arch / powerpc / mm / tlb_64.c
blobe2d867ce1c7eb46fb259993ebf8f99c0a177bca9
1 /*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/init.h>
27 #include <linux/percpu.h>
28 #include <linux/hardirq.h>
29 #include <asm/pgalloc.h>
30 #include <asm/tlbflush.h>
31 #include <asm/tlb.h>
32 #include <asm/bug.h>
34 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
36 /* This is declared as we are using the more or less generic
37 * include/asm-powerpc/tlb.h file -- tgall
39 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
41 unsigned long pte_freelist_forced_free;
43 struct pte_freelist_batch
45 struct rcu_head rcu;
46 unsigned int index;
47 pgtable_free_t tables[0];
50 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
51 unsigned long pte_freelist_forced_free;
53 #define PTE_FREELIST_SIZE \
54 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
55 / sizeof(pgtable_free_t))
57 static void pte_free_smp_sync(void *arg)
59 /* Do nothing, just ensure we sync with all CPUs */
62 /* This is only called when we are critically out of memory
63 * (and fail to get a page in pte_free_tlb).
65 static void pgtable_free_now(pgtable_free_t pgf)
67 pte_freelist_forced_free++;
69 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
71 pgtable_free(pgf);
74 static void pte_free_rcu_callback(struct rcu_head *head)
76 struct pte_freelist_batch *batch =
77 container_of(head, struct pte_freelist_batch, rcu);
78 unsigned int i;
80 for (i = 0; i < batch->index; i++)
81 pgtable_free(batch->tables[i]);
83 free_page((unsigned long)batch);
86 static void pte_free_submit(struct pte_freelist_batch *batch)
88 INIT_RCU_HEAD(&batch->rcu);
89 call_rcu(&batch->rcu, pte_free_rcu_callback);
92 void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
94 /* This is safe since tlb_gather_mmu has disabled preemption */
95 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
96 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
98 if (atomic_read(&tlb->mm->mm_users) < 2 ||
99 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
100 pgtable_free(pgf);
101 return;
104 if (*batchp == NULL) {
105 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
106 if (*batchp == NULL) {
107 pgtable_free_now(pgf);
108 return;
110 (*batchp)->index = 0;
112 (*batchp)->tables[(*batchp)->index++] = pgf;
113 if ((*batchp)->index == PTE_FREELIST_SIZE) {
114 pte_free_submit(*batchp);
115 *batchp = NULL;
120 * A linux PTE was changed and the corresponding hash table entry
121 * neesd to be flushed. This function will either perform the flush
122 * immediately or will batch it up if the current CPU has an active
123 * batch on it.
125 * Must be called from within some kind of spinlock/non-preempt region...
127 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
128 pte_t *ptep, unsigned long pte, int huge)
130 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
131 unsigned long vsid, vaddr;
132 unsigned int psize;
133 int ssize;
134 real_pte_t rpte;
135 int i;
137 i = batch->index;
139 /* We mask the address for the base page size. Huge pages will
140 * have applied their own masking already
142 addr &= PAGE_MASK;
144 /* Get page size (maybe move back to caller).
146 * NOTE: when using special 64K mappings in 4K environment like
147 * for SPEs, we obtain the page size from the slice, which thus
148 * must still exist (and thus the VMA not reused) at the time
149 * of this call
151 if (huge) {
152 #ifdef CONFIG_HUGETLB_PAGE
153 psize = mmu_huge_psize;
154 #else
155 BUG();
156 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
157 #endif
158 } else
159 psize = pte_pagesize_index(mm, addr, pte);
161 /* Build full vaddr */
162 if (!is_kernel_addr(addr)) {
163 ssize = user_segment_size(addr);
164 vsid = get_vsid(mm->context.id, addr, ssize);
165 WARN_ON(vsid == 0);
166 } else {
167 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
168 ssize = mmu_kernel_ssize;
170 vaddr = hpt_va(addr, vsid, ssize);
171 rpte = __real_pte(__pte(pte), ptep);
174 * Check if we have an active batch on this CPU. If not, just
175 * flush now and return. For now, we don global invalidates
176 * in that case, might be worth testing the mm cpu mask though
177 * and decide to use local invalidates instead...
179 if (!batch->active) {
180 flush_hash_page(vaddr, rpte, psize, ssize, 0);
181 return;
185 * This can happen when we are in the middle of a TLB batch and
186 * we encounter memory pressure (eg copy_page_range when it tries
187 * to allocate a new pte). If we have to reclaim memory and end
188 * up scanning and resetting referenced bits then our batch context
189 * will change mid stream.
191 * We also need to ensure only one page size is present in a given
192 * batch
194 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
195 batch->ssize != ssize)) {
196 __flush_tlb_pending(batch);
197 i = 0;
199 if (i == 0) {
200 batch->mm = mm;
201 batch->psize = psize;
202 batch->ssize = ssize;
204 batch->pte[i] = rpte;
205 batch->vaddr[i] = vaddr;
206 batch->index = ++i;
207 if (i >= PPC64_TLB_BATCH_NR)
208 __flush_tlb_pending(batch);
212 * This function is called when terminating an mmu batch or when a batch
213 * is full. It will perform the flush of all the entries currently stored
214 * in a batch.
216 * Must be called from within some kind of spinlock/non-preempt region...
218 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
220 cpumask_t tmp;
221 int i, local = 0;
223 i = batch->index;
224 tmp = cpumask_of_cpu(smp_processor_id());
225 if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
226 local = 1;
227 if (i == 1)
228 flush_hash_page(batch->vaddr[0], batch->pte[0],
229 batch->psize, batch->ssize, local);
230 else
231 flush_hash_range(i, local);
232 batch->index = 0;
235 void pte_free_finish(void)
237 /* This is safe since tlb_gather_mmu has disabled preemption */
238 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
240 if (*batchp == NULL)
241 return;
242 pte_free_submit(*batchp);
243 *batchp = NULL;
247 * __flush_hash_table_range - Flush all HPTEs for a given address range
248 * from the hash table (and the TLB). But keeps
249 * the linux PTEs intact.
251 * @mm : mm_struct of the target address space (generally init_mm)
252 * @start : starting address
253 * @end : ending address (not included in the flush)
255 * This function is mostly to be used by some IO hotplug code in order
256 * to remove all hash entries from a given address range used to map IO
257 * space on a removed PCI-PCI bidge without tearing down the full mapping
258 * since 64K pages may overlap with other bridges when using 64K pages
259 * with 4K HW pages on IO space.
261 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
262 * and is implemented for small size rather than speed.
264 #ifdef CONFIG_HOTPLUG
266 void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
267 unsigned long end)
269 unsigned long flags;
271 start = _ALIGN_DOWN(start, PAGE_SIZE);
272 end = _ALIGN_UP(end, PAGE_SIZE);
274 BUG_ON(!mm->pgd);
276 /* Note: Normally, we should only ever use a batch within a
277 * PTE locked section. This violates the rule, but will work
278 * since we don't actually modify the PTEs, we just flush the
279 * hash while leaving the PTEs intact (including their reference
280 * to being hashed). This is not the most performance oriented
281 * way to do things but is fine for our needs here.
283 local_irq_save(flags);
284 arch_enter_lazy_mmu_mode();
285 for (; start < end; start += PAGE_SIZE) {
286 pte_t *ptep = find_linux_pte(mm->pgd, start);
287 unsigned long pte;
289 if (ptep == NULL)
290 continue;
291 pte = pte_val(*ptep);
292 if (!(pte & _PAGE_HASHPTE))
293 continue;
294 hpte_need_flush(mm, start, ptep, pte, 0);
296 arch_leave_lazy_mmu_mode();
297 local_irq_restore(flags);
300 #endif /* CONFIG_HOTPLUG */