cxgbe/t4_tom: Read the chip's DDP page sizes and save them in a
[freebsd-src.git] / sys / powerpc / ps3 / mmu_ps3.c
blob64a44ddc08fe33eb5246d156d7755a35201bfd6c
1 /*-
2 * Copyright (C) 2010 Nathan Whitehorn
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
23 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/ktr.h>
32 #include <sys/lock.h>
33 #include <sys/msgbuf.h>
34 #include <sys/mutex.h>
35 #include <sys/proc.h>
36 #include <sys/sysctl.h>
37 #include <sys/systm.h>
38 #include <sys/vmmeter.h>
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_object.h>
46 #include <vm/vm_extern.h>
47 #include <vm/vm_pageout.h>
48 #include <vm/uma.h>
50 #include <powerpc/aim/mmu_oea64.h>
52 #include "mmu_if.h"
53 #include "moea64_if.h"
54 #include "ps3-hvcall.h"
56 #define VSID_HASH_MASK 0x0000007fffffffffUL
57 #define PTESYNC() __asm __volatile("ptesync")
59 extern int ps3fb_remap(void);
61 static uint64_t mps3_vas_id;
64 * Kernel MMU interface
67 static void mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
68 vm_offset_t kernelend);
69 static void mps3_cpu_bootstrap(mmu_t mmup, int ap);
70 static int64_t mps3_pte_synch(mmu_t, struct pvo_entry *);
71 static int64_t mps3_pte_clear(mmu_t, struct pvo_entry *, uint64_t ptebit);
72 static int64_t mps3_pte_unset(mmu_t, struct pvo_entry *);
73 static int mps3_pte_insert(mmu_t, struct pvo_entry *);
76 static mmu_method_t mps3_methods[] = {
77 MMUMETHOD(mmu_bootstrap, mps3_bootstrap),
78 MMUMETHOD(mmu_cpu_bootstrap, mps3_cpu_bootstrap),
80 MMUMETHOD(moea64_pte_synch, mps3_pte_synch),
81 MMUMETHOD(moea64_pte_clear, mps3_pte_clear),
82 MMUMETHOD(moea64_pte_unset, mps3_pte_unset),
83 MMUMETHOD(moea64_pte_insert, mps3_pte_insert),
85 { 0, 0 }
88 MMU_DEF_INHERIT(ps3_mmu, "mmu_ps3", mps3_methods, 0, oea64_mmu);
90 static struct mtx mps3_table_lock;
92 static void
93 mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
95 uint64_t final_pteg_count;
97 mtx_init(&mps3_table_lock, "page table", NULL, MTX_DEF);
99 moea64_early_bootstrap(mmup, kernelstart, kernelend);
101 lv1_construct_virtual_address_space(
102 20 /* log_2(moea64_pteg_count) */, 2 /* n page sizes */,
103 (24UL << 56) | (16UL << 48) /* page sizes 16 MB + 64 KB */,
104 &mps3_vas_id, &final_pteg_count
107 moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
109 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
110 moea64_late_bootstrap(mmup, kernelstart, kernelend);
113 static void
114 mps3_cpu_bootstrap(mmu_t mmup, int ap)
116 struct slb *slb = PCPU_GET(slb);
117 register_t seg0;
118 int i;
120 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
123 * Destroy the loader's address space if we are coming up for
124 * the first time, and redo the FB mapping so we can continue
125 * having a console.
128 if (!ap)
129 lv1_destruct_virtual_address_space(0);
131 lv1_select_virtual_address_space(mps3_vas_id);
133 if (!ap)
134 ps3fb_remap();
137 * Install kernel SLB entries
140 __asm __volatile ("slbia");
141 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0));
142 for (i = 0; i < 64; i++) {
143 if (!(slb[i].slbe & SLBE_VALID))
144 continue;
146 __asm __volatile ("slbmte %0, %1" ::
147 "r"(slb[i].slbv), "r"(slb[i].slbe));
151 static int64_t
152 mps3_pte_synch_locked(struct pvo_entry *pvo)
154 uint64_t halfbucket[4], rcbits;
156 PTESYNC();
157 lv1_read_htab_entries(mps3_vas_id, pvo->pvo_pte.slot & ~0x3UL,
158 &halfbucket[0], &halfbucket[1], &halfbucket[2], &halfbucket[3],
159 &rcbits);
161 /* Check if present in page table */
162 if ((halfbucket[pvo->pvo_pte.slot & 0x3] & LPTE_AVPN_MASK) !=
163 ((pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
164 LPTE_AVPN_MASK))
165 return (-1);
166 if (!(halfbucket[pvo->pvo_pte.slot & 0x3] & LPTE_VALID))
167 return (-1);
170 * rcbits contains the low 12 bits of each PTE's 2nd part,
171 * spaced at 16-bit intervals
174 return ((rcbits >> ((3 - (pvo->pvo_pte.slot & 0x3))*16)) &
175 (LPTE_CHG | LPTE_REF));
178 static int64_t
179 mps3_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
181 int64_t retval;
183 mtx_lock(&mps3_table_lock);
184 retval = mps3_pte_synch_locked(pvo);
185 mtx_unlock(&mps3_table_lock);
187 return (retval);
190 static int64_t
191 mps3_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
193 int64_t refchg;
194 struct lpte pte;
196 mtx_lock(&mps3_table_lock);
198 refchg = mps3_pte_synch_locked(pvo);
199 if (refchg < 0) {
200 mtx_unlock(&mps3_table_lock);
201 return (refchg);
204 moea64_pte_from_pvo(pvo, &pte);
206 pte.pte_lo |= refchg;
207 pte.pte_lo &= ~ptebit;
208 /* XXX: race on RC bits between write and sync. Anything to do? */
209 lv1_write_htab_entry(mps3_vas_id, pvo->pvo_pte.slot, pte.pte_hi,
210 pte.pte_lo);
211 mtx_unlock(&mps3_table_lock);
213 return (refchg);
216 static int64_t
217 mps3_pte_unset(mmu_t mmu, struct pvo_entry *pvo)
219 int64_t refchg;
221 mtx_lock(&mps3_table_lock);
222 refchg = mps3_pte_synch_locked(pvo);
223 if (refchg < 0) {
224 moea64_pte_overflow--;
225 mtx_unlock(&mps3_table_lock);
226 return (-1);
228 /* XXX: race on RC bits between unset and sync. Anything to do? */
229 lv1_write_htab_entry(mps3_vas_id, pvo->pvo_pte.slot, 0, 0);
230 mtx_unlock(&mps3_table_lock);
231 moea64_pte_valid--;
233 return (refchg & (LPTE_REF | LPTE_CHG));
236 static int
237 mps3_pte_insert(mmu_t mmu, struct pvo_entry *pvo)
239 int result;
240 struct lpte pte, evicted;
241 uint64_t index;
243 if (pvo->pvo_vaddr & PVO_HID) {
244 /* Hypercall needs primary PTEG */
245 pvo->pvo_vaddr &= ~PVO_HID;
246 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
249 pvo->pvo_pte.slot &= ~7UL;
250 moea64_pte_from_pvo(pvo, &pte);
251 evicted.pte_hi = 0;
252 PTESYNC();
253 mtx_lock(&mps3_table_lock);
254 result = lv1_insert_htab_entry(mps3_vas_id, pvo->pvo_pte.slot,
255 pte.pte_hi, pte.pte_lo, LPTE_LOCKED | LPTE_WIRED, 0,
256 &index, &evicted.pte_hi, &evicted.pte_lo);
257 mtx_unlock(&mps3_table_lock);
259 if (result != 0) {
260 /* No freeable slots in either PTEG? We're hosed. */
261 panic("mps3_pte_insert: overflow (%d)", result);
262 return (-1);
266 * See where we ended up.
268 if ((index & ~7UL) != pvo->pvo_pte.slot)
269 pvo->pvo_vaddr |= PVO_HID;
270 pvo->pvo_pte.slot = index;
272 moea64_pte_valid++;
274 if (evicted.pte_hi) {
275 KASSERT((evicted.pte_hi & (LPTE_WIRED | LPTE_LOCKED)) == 0,
276 ("Evicted a wired PTE"));
277 moea64_pte_valid--;
278 moea64_pte_overflow++;
281 return (0);