hw/arm/xlnx-zynqmp: Remove 'hw/arm/boot.h' from header
[qemu/ar7.git] / hw / ppc / spapr_softmmu.c
blob278666317ef22cb8b8ed8d6832fbef81009cb3b3
1 #include "qemu/osdep.h"
2 #include "qemu/cutils.h"
3 #include "qemu/memalign.h"
4 #include "qemu/error-report.h"
5 #include "cpu.h"
6 #include "helper_regs.h"
7 #include "hw/ppc/spapr.h"
8 #include "mmu-hash64.h"
9 #include "mmu-book3s-v3.h"
12 static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
15 * hash value/pteg group index is normalized by HPT mask
17 if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
18 return false;
20 return true;
23 static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
24 target_ulong opcode, target_ulong *args)
26 target_ulong flags = args[0];
27 target_ulong ptex = args[1];
28 target_ulong pteh = args[2];
29 target_ulong ptel = args[3];
30 unsigned apshift;
31 target_ulong raddr;
32 target_ulong slot;
33 const ppc_hash_pte64_t *hptes;
35 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
36 if (!apshift) {
37 /* Bad page size encoding */
38 return H_PARAMETER;
41 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
43 if (is_ram_address(spapr, raddr)) {
44 /* Regular RAM - should have WIMG=0010 */
45 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
46 return H_PARAMETER;
48 } else {
49 target_ulong wimg_flags;
50 /* Looks like an IO address */
51 /* FIXME: What WIMG combinations could be sensible for IO?
52 * For now we allow WIMG=010x, but are there others? */
53 /* FIXME: Should we check against registered IO addresses? */
54 wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
56 if (wimg_flags != HPTE64_R_I &&
57 wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
58 return H_PARAMETER;
62 pteh &= ~0x60ULL;
64 if (!valid_ptex(cpu, ptex)) {
65 return H_PARAMETER;
68 slot = ptex & 7ULL;
69 ptex = ptex & ~7ULL;
71 if (likely((flags & H_EXACT) == 0)) {
72 hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
73 for (slot = 0; slot < 8; slot++) {
74 if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
75 break;
78 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
79 if (slot == 8) {
80 return H_PTEG_FULL;
82 } else {
83 hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
84 if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
85 ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
86 return H_PTEG_FULL;
88 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
91 spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
93 args[0] = ptex + slot;
94 return H_SUCCESS;
97 typedef enum {
98 REMOVE_SUCCESS = 0,
99 REMOVE_NOT_FOUND = 1,
100 REMOVE_PARM = 2,
101 REMOVE_HW = 3,
102 } RemoveResult;
104 static RemoveResult remove_hpte(PowerPCCPU *cpu
105 , target_ulong ptex,
106 target_ulong avpn,
107 target_ulong flags,
108 target_ulong *vp, target_ulong *rp)
110 const ppc_hash_pte64_t *hptes;
111 target_ulong v, r;
113 if (!valid_ptex(cpu, ptex)) {
114 return REMOVE_PARM;
117 hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
118 v = ppc_hash64_hpte0(cpu, hptes, 0);
119 r = ppc_hash64_hpte1(cpu, hptes, 0);
120 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
122 if ((v & HPTE64_V_VALID) == 0 ||
123 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
124 ((flags & H_ANDCOND) && (v & avpn) != 0)) {
125 return REMOVE_NOT_FOUND;
127 *vp = v;
128 *rp = r;
129 spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
130 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
131 return REMOVE_SUCCESS;
134 static target_ulong h_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
135 target_ulong opcode, target_ulong *args)
137 CPUPPCState *env = &cpu->env;
138 target_ulong flags = args[0];
139 target_ulong ptex = args[1];
140 target_ulong avpn = args[2];
141 RemoveResult ret;
143 ret = remove_hpte(cpu, ptex, avpn, flags,
144 &args[0], &args[1]);
146 switch (ret) {
147 case REMOVE_SUCCESS:
148 check_tlb_flush(env, true);
149 return H_SUCCESS;
151 case REMOVE_NOT_FOUND:
152 return H_NOT_FOUND;
154 case REMOVE_PARM:
155 return H_PARAMETER;
157 case REMOVE_HW:
158 return H_HARDWARE;
161 g_assert_not_reached();
164 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
165 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
166 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
167 #define H_BULK_REMOVE_END 0xc000000000000000ULL
168 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
169 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
170 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
171 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
172 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
173 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
174 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
175 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
176 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
177 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
178 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
180 #define H_BULK_REMOVE_MAX_BATCH 4
182 static target_ulong h_bulk_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
183 target_ulong opcode, target_ulong *args)
185 CPUPPCState *env = &cpu->env;
186 int i;
187 target_ulong rc = H_SUCCESS;
189 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
190 target_ulong *tsh = &args[i*2];
191 target_ulong tsl = args[i*2 + 1];
192 target_ulong v, r, ret;
194 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
195 break;
196 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
197 return H_PARAMETER;
200 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
201 *tsh |= H_BULK_REMOVE_RESPONSE;
203 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
204 *tsh |= H_BULK_REMOVE_PARM;
205 return H_PARAMETER;
208 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
209 (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
210 &v, &r);
212 *tsh |= ret << 60;
214 switch (ret) {
215 case REMOVE_SUCCESS:
216 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
217 break;
219 case REMOVE_PARM:
220 rc = H_PARAMETER;
221 goto exit;
223 case REMOVE_HW:
224 rc = H_HARDWARE;
225 goto exit;
228 exit:
229 check_tlb_flush(env, true);
231 return rc;
234 static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
235 target_ulong opcode, target_ulong *args)
237 CPUPPCState *env = &cpu->env;
238 target_ulong flags = args[0];
239 target_ulong ptex = args[1];
240 target_ulong avpn = args[2];
241 const ppc_hash_pte64_t *hptes;
242 target_ulong v, r;
244 if (!valid_ptex(cpu, ptex)) {
245 return H_PARAMETER;
248 hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
249 v = ppc_hash64_hpte0(cpu, hptes, 0);
250 r = ppc_hash64_hpte1(cpu, hptes, 0);
251 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
253 if ((v & HPTE64_V_VALID) == 0 ||
254 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
255 return H_NOT_FOUND;
258 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
259 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
260 r |= (flags << 55) & HPTE64_R_PP0;
261 r |= (flags << 48) & HPTE64_R_KEY_HI;
262 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
263 spapr_store_hpte(cpu, ptex,
264 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
265 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
266 /* Flush the tlb */
267 check_tlb_flush(env, true);
268 /* Don't need a memory barrier, due to qemu's global lock */
269 spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
270 return H_SUCCESS;
273 static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
274 target_ulong opcode, target_ulong *args)
276 target_ulong flags = args[0];
277 target_ulong ptex = args[1];
278 int i, ridx, n_entries = 1;
279 const ppc_hash_pte64_t *hptes;
281 if (!valid_ptex(cpu, ptex)) {
282 return H_PARAMETER;
285 if (flags & H_READ_4) {
286 /* Clear the two low order bits */
287 ptex &= ~(3ULL);
288 n_entries = 4;
291 hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
292 for (i = 0, ridx = 0; i < n_entries; i++) {
293 args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
294 args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
296 ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
298 return H_SUCCESS;
301 struct SpaprPendingHpt {
302 /* These fields are read-only after initialization */
303 int shift;
304 QemuThread thread;
306 /* These fields are protected by the BQL */
307 bool complete;
309 /* These fields are private to the preparation thread if
310 * !complete, otherwise protected by the BQL */
311 int ret;
312 void *hpt;
315 static void free_pending_hpt(SpaprPendingHpt *pending)
317 if (pending->hpt) {
318 qemu_vfree(pending->hpt);
321 g_free(pending);
324 static void *hpt_prepare_thread(void *opaque)
326 SpaprPendingHpt *pending = opaque;
327 size_t size = 1ULL << pending->shift;
329 pending->hpt = qemu_try_memalign(size, size);
330 if (pending->hpt) {
331 memset(pending->hpt, 0, size);
332 pending->ret = H_SUCCESS;
333 } else {
334 pending->ret = H_NO_MEM;
337 qemu_mutex_lock_iothread();
339 if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
340 /* Ready to go */
341 pending->complete = true;
342 } else {
343 /* We've been cancelled, clean ourselves up */
344 free_pending_hpt(pending);
347 qemu_mutex_unlock_iothread();
348 return NULL;
351 /* Must be called with BQL held */
352 static void cancel_hpt_prepare(SpaprMachineState *spapr)
354 SpaprPendingHpt *pending = spapr->pending_hpt;
356 /* Let the thread know it's cancelled */
357 spapr->pending_hpt = NULL;
359 if (!pending) {
360 /* Nothing to do */
361 return;
364 if (!pending->complete) {
365 /* thread will clean itself up */
366 return;
369 free_pending_hpt(pending);
372 target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu,
373 SpaprMachineState *spapr,
374 target_ulong shift)
376 SpaprPendingHpt *pending = spapr->pending_hpt;
378 if (pending) {
379 /* something already in progress */
380 if (pending->shift == shift) {
381 /* and it's suitable */
382 if (pending->complete) {
383 return pending->ret;
384 } else {
385 return H_LONG_BUSY_ORDER_100_MSEC;
389 /* not suitable, cancel and replace */
390 cancel_hpt_prepare(spapr);
393 if (!shift) {
394 /* nothing to do */
395 return H_SUCCESS;
398 /* start new prepare */
400 pending = g_new0(SpaprPendingHpt, 1);
401 pending->shift = shift;
402 pending->ret = H_HARDWARE;
404 qemu_thread_create(&pending->thread, "sPAPR HPT prepare",
405 hpt_prepare_thread, pending, QEMU_THREAD_DETACHED);
407 spapr->pending_hpt = pending;
409 /* In theory we could estimate the time more accurately based on
410 * the new size, but there's not much point */
411 return H_LONG_BUSY_ORDER_100_MSEC;
414 static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot)
416 uint8_t *addr = htab;
418 addr += pteg * HASH_PTEG_SIZE_64;
419 addr += slot * HASH_PTE_SIZE_64;
420 return ldq_p(addr);
423 static void new_hpte_store(void *htab, uint64_t pteg, int slot,
424 uint64_t pte0, uint64_t pte1)
426 uint8_t *addr = htab;
428 addr += pteg * HASH_PTEG_SIZE_64;
429 addr += slot * HASH_PTE_SIZE_64;
431 stq_p(addr, pte0);
432 stq_p(addr + HPTE64_DW1, pte1);
435 static int rehash_hpte(PowerPCCPU *cpu,
436 const ppc_hash_pte64_t *hptes,
437 void *old_hpt, uint64_t oldsize,
438 void *new_hpt, uint64_t newsize,
439 uint64_t pteg, int slot)
441 uint64_t old_hash_mask = (oldsize >> 7) - 1;
442 uint64_t new_hash_mask = (newsize >> 7) - 1;
443 target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot);
444 target_ulong pte1;
445 uint64_t avpn;
446 unsigned base_pg_shift;
447 uint64_t hash, new_pteg, replace_pte0;
449 if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) {
450 return H_SUCCESS;
453 pte1 = ppc_hash64_hpte1(cpu, hptes, slot);
455 base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1);
456 assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */
457 avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23);
459 if (pte0 & HPTE64_V_SECONDARY) {
460 pteg = ~pteg;
463 if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) {
464 uint64_t offset, vsid;
466 /* We only have 28 - 23 bits of offset in avpn */
467 offset = (avpn & 0x1f) << 23;
468 vsid = avpn >> 5;
469 /* We can find more bits from the pteg value */
470 if (base_pg_shift < 23) {
471 offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift;
474 hash = vsid ^ (offset >> base_pg_shift);
475 } else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) {
476 uint64_t offset, vsid;
478 /* We only have 40 - 23 bits of seg_off in avpn */
479 offset = (avpn & 0x1ffff) << 23;
480 vsid = avpn >> 17;
481 if (base_pg_shift < 23) {
482 offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask)
483 << base_pg_shift;
486 hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift);
487 } else {
488 error_report("rehash_pte: Bad segment size in HPTE");
489 return H_HARDWARE;
492 new_pteg = hash & new_hash_mask;
493 if (pte0 & HPTE64_V_SECONDARY) {
494 assert(~pteg == (hash & old_hash_mask));
495 new_pteg = ~new_pteg;
496 } else {
497 assert(pteg == (hash & old_hash_mask));
499 assert((oldsize != newsize) || (pteg == new_pteg));
500 replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot);
502 * Strictly speaking, we don't need all these tests, since we only
503 * ever rehash bolted HPTEs. We might in future handle non-bolted
504 * HPTEs, though so make the logic correct for those cases as
505 * well.
507 if (replace_pte0 & HPTE64_V_VALID) {
508 assert(newsize < oldsize);
509 if (replace_pte0 & HPTE64_V_BOLTED) {
510 if (pte0 & HPTE64_V_BOLTED) {
511 /* Bolted collision, nothing we can do */
512 return H_PTEG_FULL;
513 } else {
514 /* Discard this hpte */
515 return H_SUCCESS;
520 new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1);
521 return H_SUCCESS;
524 static int rehash_hpt(PowerPCCPU *cpu,
525 void *old_hpt, uint64_t oldsize,
526 void *new_hpt, uint64_t newsize)
528 uint64_t n_ptegs = oldsize >> 7;
529 uint64_t pteg;
530 int slot;
531 int rc;
533 for (pteg = 0; pteg < n_ptegs; pteg++) {
534 hwaddr ptex = pteg * HPTES_PER_GROUP;
535 const ppc_hash_pte64_t *hptes
536 = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
538 if (!hptes) {
539 return H_HARDWARE;
542 for (slot = 0; slot < HPTES_PER_GROUP; slot++) {
543 rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize,
544 pteg, slot);
545 if (rc != H_SUCCESS) {
546 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
547 return rc;
550 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
553 return H_SUCCESS;
556 target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu,
557 SpaprMachineState *spapr,
558 target_ulong flags,
559 target_ulong shift)
561 SpaprPendingHpt *pending = spapr->pending_hpt;
562 int rc;
563 size_t newsize;
565 if (flags != 0) {
566 return H_PARAMETER;
569 if (!pending || (pending->shift != shift)) {
570 /* no matching prepare */
571 return H_CLOSED;
574 if (!pending->complete) {
575 /* prepare has not completed */
576 return H_BUSY;
579 /* Shouldn't have got past PREPARE without an HPT */
580 g_assert(spapr->htab_shift);
582 newsize = 1ULL << pending->shift;
583 rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr),
584 pending->hpt, newsize);
585 if (rc == H_SUCCESS) {
586 qemu_vfree(spapr->htab);
587 spapr->htab = pending->hpt;
588 spapr->htab_shift = pending->shift;
590 push_sregs_to_kvm_pr(spapr);
592 pending->hpt = NULL; /* so it's not free()d */
595 /* Clean up */
596 spapr->pending_hpt = NULL;
597 free_pending_hpt(pending);
599 return rc;
602 static void hypercall_register_types(void)
604 /* hcall-pft */
605 spapr_register_hypercall(H_ENTER, h_enter);
606 spapr_register_hypercall(H_REMOVE, h_remove);
607 spapr_register_hypercall(H_PROTECT, h_protect);
608 spapr_register_hypercall(H_READ, h_read);
610 /* hcall-bulk */
611 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
615 type_init(hypercall_register_types)