[PATCH] ppc64: support 64k pages
[linux-2.6/sactl.git] / arch / powerpc / mm / tlb_64.c
blob53e31b834ace00f75dbc686321d9ddbeeab8eff7
1 /*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/init.h>
29 #include <linux/percpu.h>
30 #include <linux/hardirq.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <asm/tlb.h>
34 #include <asm/bug.h>
36 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 /* This is declared as we are using the more or less generic
39 * include/asm-ppc64/tlb.h file -- tgall
41 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
43 unsigned long pte_freelist_forced_free;
45 struct pte_freelist_batch
47 struct rcu_head rcu;
48 unsigned int index;
49 pgtable_free_t tables[0];
52 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
53 unsigned long pte_freelist_forced_free;
55 #define PTE_FREELIST_SIZE \
56 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
57 / sizeof(pgtable_free_t))
59 #ifdef CONFIG_SMP
60 static void pte_free_smp_sync(void *arg)
62 /* Do nothing, just ensure we sync with all CPUs */
64 #endif
66 /* This is only called when we are critically out of memory
67 * (and fail to get a page in pte_free_tlb).
69 static void pgtable_free_now(pgtable_free_t pgf)
71 pte_freelist_forced_free++;
73 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
75 pgtable_free(pgf);
78 static void pte_free_rcu_callback(struct rcu_head *head)
80 struct pte_freelist_batch *batch =
81 container_of(head, struct pte_freelist_batch, rcu);
82 unsigned int i;
84 for (i = 0; i < batch->index; i++)
85 pgtable_free(batch->tables[i]);
87 free_page((unsigned long)batch);
90 static void pte_free_submit(struct pte_freelist_batch *batch)
92 INIT_RCU_HEAD(&batch->rcu);
93 call_rcu(&batch->rcu, pte_free_rcu_callback);
96 void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
98 /* This is safe as we are holding page_table_lock */
99 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
100 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
102 if (atomic_read(&tlb->mm->mm_users) < 2 ||
103 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
104 pgtable_free(pgf);
105 return;
108 if (*batchp == NULL) {
109 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
110 if (*batchp == NULL) {
111 pgtable_free_now(pgf);
112 return;
114 (*batchp)->index = 0;
116 (*batchp)->tables[(*batchp)->index++] = pgf;
117 if ((*batchp)->index == PTE_FREELIST_SIZE) {
118 pte_free_submit(*batchp);
119 *batchp = NULL;
124 * Update the MMU hash table to correspond with a change to
125 * a Linux PTE. If wrprot is true, it is permissible to
126 * change the existing HPTE to read-only rather than removing it
127 * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
129 void hpte_update(struct mm_struct *mm, unsigned long addr,
130 pte_t *ptep, unsigned long pte, int huge)
132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
133 unsigned long vsid;
134 unsigned int psize = mmu_virtual_psize;
135 int i;
137 i = batch->index;
139 /* We mask the address for the base page size. Huge pages will
140 * have applied their own masking already
142 addr &= PAGE_MASK;
144 /* Get page size (maybe move back to caller) */
145 if (huge) {
146 #ifdef CONFIG_HUGETLB_PAGE
147 psize = mmu_huge_psize;
148 #else
149 BUG();
150 #endif
154 * This can happen when we are in the middle of a TLB batch and
155 * we encounter memory pressure (eg copy_page_range when it tries
156 * to allocate a new pte). If we have to reclaim memory and end
157 * up scanning and resetting referenced bits then our batch context
158 * will change mid stream.
160 * We also need to ensure only one page size is present in a given
161 * batch
163 if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
164 flush_tlb_pending();
165 i = 0;
167 if (i == 0) {
168 batch->mm = mm;
169 batch->psize = psize;
171 if (addr < KERNELBASE) {
172 vsid = get_vsid(mm->context.id, addr);
173 WARN_ON(vsid == 0);
174 } else
175 vsid = get_kernel_vsid(addr);
176 batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
177 batch->pte[i] = __real_pte(__pte(pte), ptep);
178 batch->index = ++i;
179 if (i >= PPC64_TLB_BATCH_NR)
180 flush_tlb_pending();
183 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
185 int i;
186 int cpu;
187 cpumask_t tmp;
188 int local = 0;
190 BUG_ON(in_interrupt());
192 cpu = get_cpu();
193 i = batch->index;
194 tmp = cpumask_of_cpu(cpu);
195 if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
196 local = 1;
198 if (i == 1)
199 flush_hash_page(batch->vaddr[0], batch->pte[0],
200 batch->psize, local);
201 else
202 flush_hash_range(i, local);
203 batch->index = 0;
204 put_cpu();
207 void pte_free_finish(void)
209 /* This is safe as we are holding page_table_lock */
210 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
212 if (*batchp == NULL)
213 return;
214 pte_free_submit(*batchp);
215 *batchp = NULL;