KVM: enable PCI multiple-segments for pass-through device
[linux-2.6.git] / arch / powerpc / kvm / book3s_64_mmu.c
blob512dcff77554aa6d76bd3b432b2b1aa0cdb7cd93
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
26 #include <asm/tlbflush.h>
27 #include <asm/kvm_ppc.h>
28 #include <asm/kvm_book3s.h>
30 /* #define DEBUG_MMU */
32 #ifdef DEBUG_MMU
33 #define dprintk(X...) printk(KERN_INFO X)
34 #else
35 #define dprintk(X...) do { } while(0)
36 #endif
38 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
40 kvmppc_set_msr(vcpu, MSR_SF);
43 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
44 struct kvmppc_vcpu_book3s *vcpu_book3s,
45 gva_t eaddr)
47 int i;
48 u64 esid = GET_ESID(eaddr);
49 u64 esid_1t = GET_ESID_1T(eaddr);
51 for (i = 0; i < vcpu_book3s->slb_nr; i++) {
52 u64 cmp_esid = esid;
54 if (!vcpu_book3s->slb[i].valid)
55 continue;
57 if (vcpu_book3s->slb[i].tb)
58 cmp_esid = esid_1t;
60 if (vcpu_book3s->slb[i].esid == cmp_esid)
61 return &vcpu_book3s->slb[i];
64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
65 eaddr, esid, esid_1t);
66 for (i = 0; i < vcpu_book3s->slb_nr; i++) {
67 if (vcpu_book3s->slb[i].vsid)
68 dprintk(" %d: %c%c%c %llx %llx\n", i,
69 vcpu_book3s->slb[i].valid ? 'v' : ' ',
70 vcpu_book3s->slb[i].large ? 'l' : ' ',
71 vcpu_book3s->slb[i].tb ? 't' : ' ',
72 vcpu_book3s->slb[i].esid,
73 vcpu_book3s->slb[i].vsid);
76 return NULL;
79 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
80 bool data)
82 struct kvmppc_slb *slb;
84 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr);
85 if (!slb)
86 return 0;
88 if (slb->tb)
89 return (((u64)eaddr >> 12) & 0xfffffff) |
90 (((u64)slb->vsid) << 28);
92 return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16);
95 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
97 return slbe->large ? 24 : 12;
100 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
102 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
103 return ((eaddr & 0xfffffff) >> p);
106 static hva_t kvmppc_mmu_book3s_64_get_pteg(
107 struct kvmppc_vcpu_book3s *vcpu_book3s,
108 struct kvmppc_slb *slbe, gva_t eaddr,
109 bool second)
111 u64 hash, pteg, htabsize;
112 u32 page;
113 hva_t r;
115 page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
116 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
118 hash = slbe->vsid ^ page;
119 if (second)
120 hash = ~hash;
121 hash &= ((1ULL << 39ULL) - 1ULL);
122 hash &= htabsize;
123 hash <<= 7ULL;
125 pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
126 pteg |= hash;
128 dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
129 page, vcpu_book3s->sdr1, pteg, slbe->vsid);
131 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
132 if (kvm_is_error_hva(r))
133 return r;
134 return r | (pteg & ~PAGE_MASK);
137 static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
139 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
140 u64 avpn;
142 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
143 avpn |= slbe->vsid << (28 - p);
145 if (p < 24)
146 avpn >>= ((80 - p) - 56) - 8;
147 else
148 avpn <<= 8;
150 return avpn;
153 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
154 struct kvmppc_pte *gpte, bool data)
156 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
157 struct kvmppc_slb *slbe;
158 hva_t ptegp;
159 u64 pteg[16];
160 u64 avpn = 0;
161 int i;
162 u8 key = 0;
163 bool found = false;
164 bool perm_err = false;
165 int second = 0;
167 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
168 if (!slbe)
169 goto no_seg_found;
171 do_second:
172 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
173 if (kvm_is_error_hva(ptegp))
174 goto no_page_found;
176 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
178 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
179 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
180 goto no_page_found;
183 if ((vcpu->arch.msr & MSR_PR) && slbe->Kp)
184 key = 4;
185 else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks)
186 key = 4;
188 for (i=0; i<16; i+=2) {
189 u64 v = pteg[i];
190 u64 r = pteg[i+1];
192 /* Valid check */
193 if (!(v & HPTE_V_VALID))
194 continue;
195 /* Hash check */
196 if ((v & HPTE_V_SECONDARY) != second)
197 continue;
199 /* AVPN compare */
200 if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) {
201 u8 pp = (r & HPTE_R_PP) | key;
202 int eaddr_mask = 0xFFF;
204 gpte->eaddr = eaddr;
205 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
206 eaddr,
207 data);
208 if (slbe->large)
209 eaddr_mask = 0xFFFFFF;
210 gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
211 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
212 gpte->may_read = false;
213 gpte->may_write = false;
215 switch (pp) {
216 case 0:
217 case 1:
218 case 2:
219 case 6:
220 gpte->may_write = true;
221 /* fall through */
222 case 3:
223 case 5:
224 case 7:
225 gpte->may_read = true;
226 break;
229 if (!gpte->may_read) {
230 perm_err = true;
231 continue;
234 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
235 "-> 0x%llx\n",
236 eaddr, avpn, gpte->vpage, gpte->raddr);
237 found = true;
238 break;
242 /* Update PTE R and C bits, so the guest's swapper knows we used the
243 * page */
244 if (found) {
245 u32 oldr = pteg[i+1];
247 if (gpte->may_read) {
248 /* Set the accessed flag */
249 pteg[i+1] |= HPTE_R_R;
251 if (gpte->may_write) {
252 /* Set the dirty flag */
253 pteg[i+1] |= HPTE_R_C;
254 } else {
255 dprintk("KVM: Mapping read-only page!\n");
258 /* Write back into the PTEG */
259 if (pteg[i+1] != oldr)
260 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
262 return 0;
263 } else {
264 dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
265 "ptegp=0x%lx)\n",
266 eaddr, to_book3s(vcpu)->sdr1, ptegp);
267 for (i = 0; i < 16; i += 2)
268 dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n",
269 i, pteg[i], pteg[i+1], avpn);
271 if (!second) {
272 second = HPTE_V_SECONDARY;
273 goto do_second;
278 no_page_found:
281 if (perm_err)
282 return -EPERM;
284 return -ENOENT;
286 no_seg_found:
288 dprintk("KVM MMU: Trigger segment fault\n");
289 return -EINVAL;
292 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
294 struct kvmppc_vcpu_book3s *vcpu_book3s;
295 u64 esid, esid_1t;
296 int slb_nr;
297 struct kvmppc_slb *slbe;
299 dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
301 vcpu_book3s = to_book3s(vcpu);
303 esid = GET_ESID(rb);
304 esid_1t = GET_ESID_1T(rb);
305 slb_nr = rb & 0xfff;
307 if (slb_nr > vcpu_book3s->slb_nr)
308 return;
310 slbe = &vcpu_book3s->slb[slb_nr];
312 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
313 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
314 slbe->esid = slbe->tb ? esid_1t : esid;
315 slbe->vsid = rs >> 12;
316 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
317 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
318 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
319 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
320 slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
322 slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
323 slbe->origv = rs;
325 /* Map the new segment */
326 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
329 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
331 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
332 struct kvmppc_slb *slbe;
334 if (slb_nr > vcpu_book3s->slb_nr)
335 return 0;
337 slbe = &vcpu_book3s->slb[slb_nr];
339 return slbe->orige;
342 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
344 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
345 struct kvmppc_slb *slbe;
347 if (slb_nr > vcpu_book3s->slb_nr)
348 return 0;
350 slbe = &vcpu_book3s->slb[slb_nr];
352 return slbe->origv;
355 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
357 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
358 struct kvmppc_slb *slbe;
360 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
362 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea);
364 if (!slbe)
365 return;
367 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
369 slbe->valid = false;
371 kvmppc_mmu_map_segment(vcpu, ea);
374 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
376 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
377 int i;
379 dprintk("KVM MMU: slbia()\n");
381 for (i = 1; i < vcpu_book3s->slb_nr; i++)
382 vcpu_book3s->slb[i].valid = false;
384 if (vcpu->arch.msr & MSR_IR) {
385 kvmppc_mmu_flush_segments(vcpu);
386 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
390 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
391 ulong value)
393 u64 rb = 0, rs = 0;
396 * According to Book3 2.01 mtsrin is implemented as:
398 * The SLB entry specified by (RB)32:35 is loaded from register
399 * RS, as follows.
401 * SLBE Bit Source SLB Field
403 * 0:31 0x0000_0000 ESID-0:31
404 * 32:35 (RB)32:35 ESID-32:35
405 * 36 0b1 V
406 * 37:61 0x00_0000|| 0b0 VSID-0:24
407 * 62:88 (RS)37:63 VSID-25:51
408 * 89:91 (RS)33:35 Ks Kp N
409 * 92 (RS)36 L ((RS)36 must be 0b0)
410 * 93 0b0 C
413 dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
415 /* ESID = srnum */
416 rb |= (srnum & 0xf) << 28;
417 /* Set the valid bit */
418 rb |= 1 << 27;
419 /* Index = ESID */
420 rb |= srnum;
422 /* VSID = VSID */
423 rs |= (value & 0xfffffff) << 12;
424 /* flags = flags */
425 rs |= ((value >> 28) & 0x7) << 9;
427 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
430 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
431 bool large)
433 u64 mask = 0xFFFFFFFFFULL;
435 dprintk("KVM MMU: tlbie(0x%lx)\n", va);
437 if (large)
438 mask = 0xFFFFFF000ULL;
439 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
442 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
443 u64 *vsid)
445 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
446 case 0:
447 *vsid = (VSID_REAL >> 16) | esid;
448 break;
449 case MSR_IR:
450 *vsid = (VSID_REAL_IR >> 16) | esid;
451 break;
452 case MSR_DR:
453 *vsid = (VSID_REAL_DR >> 16) | esid;
454 break;
455 case MSR_DR|MSR_IR:
457 ulong ea;
458 struct kvmppc_slb *slb;
459 ea = esid << SID_SHIFT;
460 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
461 if (slb)
462 *vsid = slb->vsid;
463 else
464 return -ENOENT;
466 break;
468 default:
469 BUG();
470 break;
473 return 0;
476 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
478 return (to_book3s(vcpu)->hid[5] & 0x80);
481 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
483 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
485 mmu->mfsrin = NULL;
486 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
487 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
488 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
489 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
490 mmu->slbie = kvmppc_mmu_book3s_64_slbie;
491 mmu->slbia = kvmppc_mmu_book3s_64_slbia;
492 mmu->xlate = kvmppc_mmu_book3s_64_xlate;
493 mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
494 mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
495 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
496 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
497 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
499 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;