2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
24 #include <linux/config.h>
25 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <asm/pgalloc.h>
31 #include <asm/tlbflush.h>
33 #include <linux/highmem.h>
35 DEFINE_PER_CPU(struct ppc64_tlb_batch
, ppc64_tlb_batch
);
37 /* This is declared as we are using the more or less generic
38 * include/asm-ppc64/tlb.h file -- tgall
40 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
41 DEFINE_PER_CPU(struct pte_freelist_batch
*, pte_freelist_cur
);
42 unsigned long pte_freelist_forced_free
;
44 void __pte_free_tlb(struct mmu_gather
*tlb
, struct page
*ptepage
)
46 /* This is safe as we are holding page_table_lock */
47 cpumask_t local_cpumask
= cpumask_of_cpu(smp_processor_id());
48 struct pte_freelist_batch
**batchp
= &__get_cpu_var(pte_freelist_cur
);
50 if (atomic_read(&tlb
->mm
->mm_users
) < 2 ||
51 cpus_equal(tlb
->mm
->cpu_vm_mask
, local_cpumask
)) {
56 if (*batchp
== NULL
) {
57 *batchp
= (struct pte_freelist_batch
*)__get_free_page(GFP_ATOMIC
);
58 if (*batchp
== NULL
) {
59 pte_free_now(ptepage
);
64 (*batchp
)->pages
[(*batchp
)->index
++] = ptepage
;
65 if ((*batchp
)->index
== PTE_FREELIST_SIZE
) {
66 pte_free_submit(*batchp
);
72 * Update the MMU hash table to correspond with a change to
73 * a Linux PTE. If wrprot is true, it is permissible to
74 * change the existing HPTE to read-only rather than removing it
75 * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
77 void hpte_update(struct mm_struct
*mm
, unsigned long addr
,
78 unsigned long pte
, int wrprot
)
81 unsigned long context
= 0;
82 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
84 if (REGION_ID(addr
) == USER_REGION_ID
)
85 context
= mm
->context
.id
;
89 * This can happen when we are in the middle of a TLB batch and
90 * we encounter memory pressure (eg copy_page_range when it tries
91 * to allocate a new pte). If we have to reclaim memory and end
92 * up scanning and resetting referenced bits then our batch context
93 * will change mid stream.
95 if (unlikely(i
!= 0 && context
!= batch
->context
)) {
101 batch
->context
= context
;
104 batch
->pte
[i
] = __pte(pte
);
105 batch
->addr
[i
] = addr
;
107 if (i
>= PPC64_TLB_BATCH_NR
)
111 void __flush_tlb_pending(struct ppc64_tlb_batch
*batch
)
118 BUG_ON(in_interrupt());
122 tmp
= cpumask_of_cpu(cpu
);
123 if (cpus_equal(batch
->mm
->cpu_vm_mask
, tmp
))
127 flush_hash_page(batch
->context
, batch
->addr
[0], batch
->pte
[0],
130 flush_hash_range(batch
->context
, i
, local
);
136 static void pte_free_smp_sync(void *arg
)
138 /* Do nothing, just ensure we sync with all CPUs */
142 /* This is only called when we are critically out of memory
143 * (and fail to get a page in pte_free_tlb).
145 void pte_free_now(struct page
*ptepage
)
147 pte_freelist_forced_free
++;
149 smp_call_function(pte_free_smp_sync
, NULL
, 0, 1);
154 static void pte_free_rcu_callback(struct rcu_head
*head
)
156 struct pte_freelist_batch
*batch
=
157 container_of(head
, struct pte_freelist_batch
, rcu
);
160 for (i
= 0; i
< batch
->index
; i
++)
161 pte_free(batch
->pages
[i
]);
162 free_page((unsigned long)batch
);
165 void pte_free_submit(struct pte_freelist_batch
*batch
)
167 INIT_RCU_HEAD(&batch
->rcu
);
168 call_rcu(&batch
->rcu
, pte_free_rcu_callback
);
171 void pte_free_finish(void)
173 /* This is safe as we are holding page_table_lock */
174 struct pte_freelist_batch
**batchp
= &__get_cpu_var(pte_freelist_cur
);
178 pte_free_submit(*batchp
);