2 * MMU context allocation for 64-bit kernels.
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
19 #include <linux/pkeys.h>
20 #include <linux/spinlock.h>
21 #include <linux/idr.h>
22 #include <linux/export.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
29 static DEFINE_IDA(mmu_context_ida
);
31 static int alloc_context_id(int min_id
, int max_id
)
33 return ida_alloc_range(&mmu_context_ida
, min_id
, max_id
, GFP_KERNEL
);
36 void hash__reserve_context_id(int id
)
38 int result
= ida_alloc_range(&mmu_context_ida
, id
, id
, GFP_KERNEL
);
40 WARN(result
!= id
, "mmu: Failed to reserve context id %d (rc %d)\n", id
, result
);
43 int hash__alloc_context_id(void)
47 if (mmu_has_feature(MMU_FTR_68_BIT_VA
))
48 max
= MAX_USER_CONTEXT
;
50 max
= MAX_USER_CONTEXT_65BIT_VA
;
52 return alloc_context_id(MIN_USER_CONTEXT
, max
);
54 EXPORT_SYMBOL_GPL(hash__alloc_context_id
);
56 static int hash__init_new_context(struct mm_struct
*mm
)
60 index
= hash__alloc_context_id();
65 * The old code would re-promote on fork, we don't do that when using
66 * slices as it could cause problem promoting slices that have been
69 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
70 * explicitly against context.id == 0. This ensures that we properly
71 * initialize context slice details for newly allocated mm's (which will
72 * have id == 0) and don't alter context slice inherited via fork (which
75 * We should not be calling init_new_context() on init_mm. Hence a
76 * check against 0 is OK.
78 if (mm
->context
.id
== 0)
79 slice_init_new_context_exec(mm
);
81 subpage_prot_init_new_context(mm
);
87 static int radix__init_new_context(struct mm_struct
*mm
)
89 unsigned long rts_field
;
92 max_id
= (1 << mmu_pid_bits
) - 1;
93 index
= alloc_context_id(mmu_base_pid
, max_id
);
98 * set the process table entry,
100 rts_field
= radix__get_tree_size();
101 process_tb
[index
].prtb0
= cpu_to_be64(rts_field
| __pa(mm
->pgd
) | RADIX_PGD_INDEX_SIZE
);
104 * Order the above store with subsequent update of the PID
105 * register (at which point HW can start loading/caching
106 * the entry) and the corresponding load by the MMU from
109 asm volatile("ptesync;isync" : : : "memory");
111 mm
->context
.npu_context
= NULL
;
116 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
121 index
= radix__init_new_context(mm
);
123 index
= hash__init_new_context(mm
);
128 mm
->context
.id
= index
;
130 mm
->context
.pte_frag
= NULL
;
131 mm
->context
.pmd_frag
= NULL
;
132 #ifdef CONFIG_SPAPR_TCE_IOMMU
135 atomic_set(&mm
->context
.active_cpus
, 0);
136 atomic_set(&mm
->context
.copros
, 0);
141 void __destroy_context(int context_id
)
143 ida_free(&mmu_context_ida
, context_id
);
145 EXPORT_SYMBOL_GPL(__destroy_context
);
147 static void destroy_contexts(mm_context_t
*ctx
)
149 int index
, context_id
;
151 for (index
= 0; index
< ARRAY_SIZE(ctx
->extended_id
); index
++) {
152 context_id
= ctx
->extended_id
[index
];
154 ida_free(&mmu_context_ida
, context_id
);
158 static void pte_frag_destroy(void *pte_frag
)
163 page
= virt_to_page(pte_frag
);
164 /* drop all the pending references */
165 count
= ((unsigned long)pte_frag
& ~PAGE_MASK
) >> PTE_FRAG_SIZE_SHIFT
;
166 /* We allow PTE_FRAG_NR fragments from a PTE page */
167 if (atomic_sub_and_test(PTE_FRAG_NR
- count
, &page
->pt_frag_refcount
)) {
168 pgtable_page_dtor(page
);
173 static void pmd_frag_destroy(void *pmd_frag
)
178 page
= virt_to_page(pmd_frag
);
179 /* drop all the pending references */
180 count
= ((unsigned long)pmd_frag
& ~PAGE_MASK
) >> PMD_FRAG_SIZE_SHIFT
;
181 /* We allow PTE_FRAG_NR fragments from a PTE page */
182 if (atomic_sub_and_test(PMD_FRAG_NR
- count
, &page
->pt_frag_refcount
)) {
183 pgtable_pmd_page_dtor(page
);
188 static void destroy_pagetable_cache(struct mm_struct
*mm
)
192 frag
= mm
->context
.pte_frag
;
194 pte_frag_destroy(frag
);
196 frag
= mm
->context
.pmd_frag
;
198 pmd_frag_destroy(frag
);
202 void destroy_context(struct mm_struct
*mm
)
204 #ifdef CONFIG_SPAPR_TCE_IOMMU
205 WARN_ON_ONCE(!list_empty(&mm
->context
.iommu_group_mem_list
));
208 WARN_ON(process_tb
[mm
->context
.id
].prtb0
!= 0);
210 subpage_prot_free(mm
);
211 destroy_contexts(&mm
->context
);
212 mm
->context
.id
= MMU_NO_CONTEXT
;
215 void arch_exit_mmap(struct mm_struct
*mm
)
217 destroy_pagetable_cache(mm
);
219 if (radix_enabled()) {
221 * Radix doesn't have a valid bit in the process table
222 * entries. However we know that at least P9 implementation
223 * will avoid caching an entry with an invalid RTS field,
224 * and 0 is invalid. So this will do.
226 * This runs before the "fullmm" tlb flush in exit_mmap,
227 * which does a RIC=2 tlbie to clear the process table
228 * entry. See the "fullmm" comments in tlb-radix.c.
230 * No barrier required here after the store because
231 * this process will do the invalidate, which starts with
234 process_tb
[mm
->context
.id
].prtb0
= 0;
238 #ifdef CONFIG_PPC_RADIX_MMU
239 void radix__switch_mmu_context(struct mm_struct
*prev
, struct mm_struct
*next
)
241 mtspr(SPRN_PID
, next
->context
.id
);