2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
25 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <asm/pgalloc.h>
31 #include <asm/tlbflush.h>
35 DEFINE_PER_CPU(struct ppc64_tlb_batch
, ppc64_tlb_batch
);
37 /* This is declared as we are using the more or less generic
38 * include/asm-powerpc/tlb.h file -- tgall
40 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
41 DEFINE_PER_CPU(struct pte_freelist_batch
*, pte_freelist_cur
);
42 unsigned long pte_freelist_forced_free
;
44 struct pte_freelist_batch
48 pgtable_free_t tables
[0];
51 DEFINE_PER_CPU(struct pte_freelist_batch
*, pte_freelist_cur
);
52 unsigned long pte_freelist_forced_free
;
54 #define PTE_FREELIST_SIZE \
55 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
56 / sizeof(pgtable_free_t))
59 static void pte_free_smp_sync(void *arg
)
61 /* Do nothing, just ensure we sync with all CPUs */
65 /* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
68 static void pgtable_free_now(pgtable_free_t pgf
)
70 pte_freelist_forced_free
++;
72 smp_call_function(pte_free_smp_sync
, NULL
, 0, 1);
77 static void pte_free_rcu_callback(struct rcu_head
*head
)
79 struct pte_freelist_batch
*batch
=
80 container_of(head
, struct pte_freelist_batch
, rcu
);
83 for (i
= 0; i
< batch
->index
; i
++)
84 pgtable_free(batch
->tables
[i
]);
86 free_page((unsigned long)batch
);
89 static void pte_free_submit(struct pte_freelist_batch
*batch
)
91 INIT_RCU_HEAD(&batch
->rcu
);
92 call_rcu(&batch
->rcu
, pte_free_rcu_callback
);
95 void pgtable_free_tlb(struct mmu_gather
*tlb
, pgtable_free_t pgf
)
97 /* This is safe since tlb_gather_mmu has disabled preemption */
98 cpumask_t local_cpumask
= cpumask_of_cpu(smp_processor_id());
99 struct pte_freelist_batch
**batchp
= &__get_cpu_var(pte_freelist_cur
);
101 if (atomic_read(&tlb
->mm
->mm_users
) < 2 ||
102 cpus_equal(tlb
->mm
->cpu_vm_mask
, local_cpumask
)) {
107 if (*batchp
== NULL
) {
108 *batchp
= (struct pte_freelist_batch
*)__get_free_page(GFP_ATOMIC
);
109 if (*batchp
== NULL
) {
110 pgtable_free_now(pgf
);
113 (*batchp
)->index
= 0;
115 (*batchp
)->tables
[(*batchp
)->index
++] = pgf
;
116 if ((*batchp
)->index
== PTE_FREELIST_SIZE
) {
117 pte_free_submit(*batchp
);
123 * Update the MMU hash table to correspond with a change to
124 * a Linux PTE. If wrprot is true, it is permissible to
125 * change the existing HPTE to read-only rather than removing it
126 * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
128 void hpte_update(struct mm_struct
*mm
, unsigned long addr
,
129 pte_t
*ptep
, unsigned long pte
, int huge
)
131 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
138 /* We mask the address for the base page size. Huge pages will
139 * have applied their own masking already
143 /* Get page size (maybe move back to caller) */
145 #ifdef CONFIG_HUGETLB_PAGE
146 psize
= mmu_huge_psize
;
149 psize
= pte_pagesize_index(pte
); /* shutup gcc */
152 psize
= pte_pagesize_index(pte
);
155 * This can happen when we are in the middle of a TLB batch and
156 * we encounter memory pressure (eg copy_page_range when it tries
157 * to allocate a new pte). If we have to reclaim memory and end
158 * up scanning and resetting referenced bits then our batch context
159 * will change mid stream.
161 * We also need to ensure only one page size is present in a given
164 if (i
!= 0 && (mm
!= batch
->mm
|| batch
->psize
!= psize
)) {
170 batch
->psize
= psize
;
172 if (!is_kernel_addr(addr
)) {
173 vsid
= get_vsid(mm
->context
.id
, addr
);
176 vsid
= get_kernel_vsid(addr
);
177 batch
->vaddr
[i
] = (vsid
<< 28 ) | (addr
& 0x0fffffff);
178 batch
->pte
[i
] = __real_pte(__pte(pte
), ptep
);
180 if (i
>= PPC64_TLB_BATCH_NR
)
184 void __flush_tlb_pending(struct ppc64_tlb_batch
*batch
)
191 BUG_ON(in_interrupt());
195 tmp
= cpumask_of_cpu(cpu
);
196 if (cpus_equal(batch
->mm
->cpu_vm_mask
, tmp
))
200 flush_hash_page(batch
->vaddr
[0], batch
->pte
[0],
201 batch
->psize
, local
);
203 flush_hash_range(i
, local
);
208 void pte_free_finish(void)
210 /* This is safe since tlb_gather_mmu has disabled preemption */
211 struct pte_freelist_batch
**batchp
= &__get_cpu_var(pte_freelist_cur
);
215 pte_free_submit(*batchp
);