added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / arch / powerpc / mm / tlb_hash64.c
blob774adfd9bda3499b5c22b1949da03a5be7ec03f9
1 /*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/init.h>
27 #include <linux/percpu.h>
28 #include <linux/hardirq.h>
29 #include <asm/pgalloc.h>
30 #include <asm/tlbflush.h>
31 #include <asm/tlb.h>
32 #include <asm/bug.h>
33 #include <asm/machdep.h>
35 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 * A linux PTE was changed and the corresponding hash table entry
39 * neesd to be flushed. This function will either perform the flush
40 * immediately or will batch it up if the current CPU has an active
41 * batch on it.
43 * Must be called from within some kind of spinlock/non-preempt region...
45 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
46 pte_t *ptep, unsigned long pte, int huge)
48 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
49 unsigned long vsid, vaddr;
50 unsigned int psize;
51 int ssize;
52 real_pte_t rpte;
53 int i;
55 i = batch->index;
57 /* We mask the address for the base page size. Huge pages will
58 * have applied their own masking already
60 addr &= PAGE_MASK;
62 /* Get page size (maybe move back to caller).
64 * NOTE: when using special 64K mappings in 4K environment like
65 * for SPEs, we obtain the page size from the slice, which thus
66 * must still exist (and thus the VMA not reused) at the time
67 * of this call
69 if (huge) {
70 #ifdef CONFIG_HUGETLB_PAGE
71 psize = get_slice_psize(mm, addr);;
72 #else
73 BUG();
74 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
75 #endif
76 } else
77 psize = pte_pagesize_index(mm, addr, pte);
79 /* Build full vaddr */
80 if (!is_kernel_addr(addr)) {
81 ssize = user_segment_size(addr);
82 vsid = get_vsid(mm->context.id, addr, ssize);
83 WARN_ON(vsid == 0);
84 } else {
85 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
86 ssize = mmu_kernel_ssize;
88 vaddr = hpt_va(addr, vsid, ssize);
89 rpte = __real_pte(__pte(pte), ptep);
92 * Check if we have an active batch on this CPU. If not, just
93 * flush now and return. For now, we don global invalidates
94 * in that case, might be worth testing the mm cpu mask though
95 * and decide to use local invalidates instead...
97 if (!batch->active) {
98 flush_hash_page(vaddr, rpte, psize, ssize, 0);
99 put_cpu_var(ppc64_tlb_batch);
100 return;
104 * This can happen when we are in the middle of a TLB batch and
105 * we encounter memory pressure (eg copy_page_range when it tries
106 * to allocate a new pte). If we have to reclaim memory and end
107 * up scanning and resetting referenced bits then our batch context
108 * will change mid stream.
110 * We also need to ensure only one page size is present in a given
111 * batch
113 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
114 batch->ssize != ssize)) {
115 __flush_tlb_pending(batch);
116 i = 0;
118 if (i == 0) {
119 batch->mm = mm;
120 batch->psize = psize;
121 batch->ssize = ssize;
123 batch->pte[i] = rpte;
124 batch->vaddr[i] = vaddr;
125 batch->index = ++i;
127 #ifdef CONFIG_PREEMPT_RT
129 * Since flushing tlb needs expensive hypervisor call(s) on celleb,
130 * always flush it on RT to reduce scheduling latency.
132 if (machine_is(celleb)) {
133 __flush_tlb_pending(batch);
134 put_cpu_var(ppc64_tlb_batch);
135 return;
137 #endif /* CONFIG_PREEMPT_RT */
139 if (i >= PPC64_TLB_BATCH_NR)
140 __flush_tlb_pending(batch);
141 put_cpu_var(ppc64_tlb_batch);
145 * This function is called when terminating an mmu batch or when a batch
146 * is full. It will perform the flush of all the entries currently stored
147 * in a batch.
149 * Must be called from within some kind of spinlock/non-preempt region...
151 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
153 cpumask_t tmp;
154 int i, local = 0;
156 i = batch->index;
157 tmp = cpumask_of_cpu(smp_processor_id());
158 if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
159 local = 1;
160 if (i == 1)
161 flush_hash_page(batch->vaddr[0], batch->pte[0],
162 batch->psize, batch->ssize, local);
163 else
164 flush_hash_range(i, local);
165 batch->index = 0;
169 * __flush_hash_table_range - Flush all HPTEs for a given address range
170 * from the hash table (and the TLB). But keeps
171 * the linux PTEs intact.
173 * @mm : mm_struct of the target address space (generally init_mm)
174 * @start : starting address
175 * @end : ending address (not included in the flush)
177 * This function is mostly to be used by some IO hotplug code in order
178 * to remove all hash entries from a given address range used to map IO
179 * space on a removed PCI-PCI bidge without tearing down the full mapping
180 * since 64K pages may overlap with other bridges when using 64K pages
181 * with 4K HW pages on IO space.
183 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
184 * and is implemented for small size rather than speed.
186 #ifdef CONFIG_HOTPLUG
188 void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
189 unsigned long end)
191 unsigned long flags;
193 start = _ALIGN_DOWN(start, PAGE_SIZE);
194 end = _ALIGN_UP(end, PAGE_SIZE);
196 BUG_ON(!mm->pgd);
198 /* Note: Normally, we should only ever use a batch within a
199 * PTE locked section. This violates the rule, but will work
200 * since we don't actually modify the PTEs, we just flush the
201 * hash while leaving the PTEs intact (including their reference
202 * to being hashed). This is not the most performance oriented
203 * way to do things but is fine for our needs here.
205 local_irq_save(flags);
206 arch_enter_lazy_mmu_mode();
207 for (; start < end; start += PAGE_SIZE) {
208 pte_t *ptep = find_linux_pte(mm->pgd, start);
209 unsigned long pte;
211 if (ptep == NULL)
212 continue;
213 pte = pte_val(*ptep);
214 if (!(pte & _PAGE_HASHPTE))
215 continue;
216 hpte_need_flush(mm, start, ptep, pte, 0);
218 arch_leave_lazy_mmu_mode();
219 local_irq_restore(flags);
222 #endif /* CONFIG_HOTPLUG */