1 #include "qemu/osdep.h"
2 #include "qemu/cutils.h"
3 #include "qemu/memalign.h"
4 #include "qemu/error-report.h"
6 #include "helper_regs.h"
7 #include "hw/ppc/spapr.h"
8 #include "mmu-hash64.h"
9 #include "mmu-book3s-v3.h"
12 static inline bool valid_ptex(PowerPCCPU
*cpu
, target_ulong ptex
)
15 * hash value/pteg group index is normalized by HPT mask
17 if (((ptex
& ~7ULL) / HPTES_PER_GROUP
) & ~ppc_hash64_hpt_mask(cpu
)) {
23 static target_ulong
h_enter(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
24 target_ulong opcode
, target_ulong
*args
)
26 target_ulong flags
= args
[0];
27 target_ulong ptex
= args
[1];
28 target_ulong pteh
= args
[2];
29 target_ulong ptel
= args
[3];
33 const ppc_hash_pte64_t
*hptes
;
35 apshift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pteh
, ptel
);
37 /* Bad page size encoding */
41 raddr
= (ptel
& HPTE64_R_RPN
) & ~((1ULL << apshift
) - 1);
43 if (is_ram_address(spapr
, raddr
)) {
44 /* Regular RAM - should have WIMG=0010 */
45 if ((ptel
& HPTE64_R_WIMG
) != HPTE64_R_M
) {
49 target_ulong wimg_flags
;
50 /* Looks like an IO address */
51 /* FIXME: What WIMG combinations could be sensible for IO?
52 * For now we allow WIMG=010x, but are there others? */
53 /* FIXME: Should we check against registered IO addresses? */
54 wimg_flags
= (ptel
& (HPTE64_R_W
| HPTE64_R_I
| HPTE64_R_M
));
56 if (wimg_flags
!= HPTE64_R_I
&&
57 wimg_flags
!= (HPTE64_R_I
| HPTE64_R_M
)) {
64 if (!valid_ptex(cpu
, ptex
)) {
71 if (likely((flags
& H_EXACT
) == 0)) {
72 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
73 for (slot
= 0; slot
< 8; slot
++) {
74 if (!(ppc_hash64_hpte0(cpu
, hptes
, slot
) & HPTE64_V_VALID
)) {
78 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
83 hptes
= ppc_hash64_map_hptes(cpu
, ptex
+ slot
, 1);
84 if (ppc_hash64_hpte0(cpu
, hptes
, 0) & HPTE64_V_VALID
) {
85 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
+ slot
, 1);
88 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
91 spapr_store_hpte(cpu
, ptex
+ slot
, pteh
| HPTE64_V_HPTE_DIRTY
, ptel
);
93 args
[0] = ptex
+ slot
;
104 static RemoveResult
remove_hpte(PowerPCCPU
*cpu
108 target_ulong
*vp
, target_ulong
*rp
)
110 const ppc_hash_pte64_t
*hptes
;
113 if (!valid_ptex(cpu
, ptex
)) {
117 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, 1);
118 v
= ppc_hash64_hpte0(cpu
, hptes
, 0);
119 r
= ppc_hash64_hpte1(cpu
, hptes
, 0);
120 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
122 if ((v
& HPTE64_V_VALID
) == 0 ||
123 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
) ||
124 ((flags
& H_ANDCOND
) && (v
& avpn
) != 0)) {
125 return REMOVE_NOT_FOUND
;
129 spapr_store_hpte(cpu
, ptex
, HPTE64_V_HPTE_DIRTY
, 0);
130 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
131 return REMOVE_SUCCESS
;
134 static target_ulong
h_remove(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
135 target_ulong opcode
, target_ulong
*args
)
137 CPUPPCState
*env
= &cpu
->env
;
138 target_ulong flags
= args
[0];
139 target_ulong ptex
= args
[1];
140 target_ulong avpn
= args
[2];
143 ret
= remove_hpte(cpu
, ptex
, avpn
, flags
,
148 check_tlb_flush(env
, true);
151 case REMOVE_NOT_FOUND
:
161 g_assert_not_reached();
164 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
165 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
166 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
167 #define H_BULK_REMOVE_END 0xc000000000000000ULL
168 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
169 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
170 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
171 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
172 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
173 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
174 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
175 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
176 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
177 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
178 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
180 #define H_BULK_REMOVE_MAX_BATCH 4
182 static target_ulong
h_bulk_remove(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
183 target_ulong opcode
, target_ulong
*args
)
185 CPUPPCState
*env
= &cpu
->env
;
187 target_ulong rc
= H_SUCCESS
;
189 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
190 target_ulong
*tsh
= &args
[i
*2];
191 target_ulong tsl
= args
[i
*2 + 1];
192 target_ulong v
, r
, ret
;
194 if ((*tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
196 } else if ((*tsh
& H_BULK_REMOVE_TYPE
) != H_BULK_REMOVE_REQUEST
) {
200 *tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
201 *tsh
|= H_BULK_REMOVE_RESPONSE
;
203 if ((*tsh
& H_BULK_REMOVE_ANDCOND
) && (*tsh
& H_BULK_REMOVE_AVPN
)) {
204 *tsh
|= H_BULK_REMOVE_PARM
;
208 ret
= remove_hpte(cpu
, *tsh
& H_BULK_REMOVE_PTEX
, tsl
,
209 (*tsh
& H_BULK_REMOVE_FLAGS
) >> 26,
216 *tsh
|= (r
& (HPTE64_R_C
| HPTE64_R_R
)) << 43;
229 check_tlb_flush(env
, true);
234 static target_ulong
h_protect(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
235 target_ulong opcode
, target_ulong
*args
)
237 CPUPPCState
*env
= &cpu
->env
;
238 target_ulong flags
= args
[0];
239 target_ulong ptex
= args
[1];
240 target_ulong avpn
= args
[2];
241 const ppc_hash_pte64_t
*hptes
;
244 if (!valid_ptex(cpu
, ptex
)) {
248 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, 1);
249 v
= ppc_hash64_hpte0(cpu
, hptes
, 0);
250 r
= ppc_hash64_hpte1(cpu
, hptes
, 0);
251 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
253 if ((v
& HPTE64_V_VALID
) == 0 ||
254 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
)) {
258 r
&= ~(HPTE64_R_PP0
| HPTE64_R_PP
| HPTE64_R_N
|
259 HPTE64_R_KEY_HI
| HPTE64_R_KEY_LO
);
260 r
|= (flags
<< 55) & HPTE64_R_PP0
;
261 r
|= (flags
<< 48) & HPTE64_R_KEY_HI
;
262 r
|= flags
& (HPTE64_R_PP
| HPTE64_R_N
| HPTE64_R_KEY_LO
);
263 spapr_store_hpte(cpu
, ptex
,
264 (v
& ~HPTE64_V_VALID
) | HPTE64_V_HPTE_DIRTY
, 0);
265 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
267 check_tlb_flush(env
, true);
268 /* Don't need a memory barrier, due to qemu's global lock */
269 spapr_store_hpte(cpu
, ptex
, v
| HPTE64_V_HPTE_DIRTY
, r
);
273 static target_ulong
h_read(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
274 target_ulong opcode
, target_ulong
*args
)
276 target_ulong flags
= args
[0];
277 target_ulong ptex
= args
[1];
278 int i
, ridx
, n_entries
= 1;
279 const ppc_hash_pte64_t
*hptes
;
281 if (!valid_ptex(cpu
, ptex
)) {
285 if (flags
& H_READ_4
) {
286 /* Clear the two low order bits */
291 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, n_entries
);
292 for (i
= 0, ridx
= 0; i
< n_entries
; i
++) {
293 args
[ridx
++] = ppc_hash64_hpte0(cpu
, hptes
, i
);
294 args
[ridx
++] = ppc_hash64_hpte1(cpu
, hptes
, i
);
296 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, n_entries
);
301 struct SpaprPendingHpt
{
302 /* These fields are read-only after initialization */
306 /* These fields are protected by the BQL */
309 /* These fields are private to the preparation thread if
310 * !complete, otherwise protected by the BQL */
315 static void free_pending_hpt(SpaprPendingHpt
*pending
)
318 qemu_vfree(pending
->hpt
);
324 static void *hpt_prepare_thread(void *opaque
)
326 SpaprPendingHpt
*pending
= opaque
;
327 size_t size
= 1ULL << pending
->shift
;
329 pending
->hpt
= qemu_try_memalign(size
, size
);
331 memset(pending
->hpt
, 0, size
);
332 pending
->ret
= H_SUCCESS
;
334 pending
->ret
= H_NO_MEM
;
337 qemu_mutex_lock_iothread();
339 if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt
== pending
) {
341 pending
->complete
= true;
343 /* We've been cancelled, clean ourselves up */
344 free_pending_hpt(pending
);
347 qemu_mutex_unlock_iothread();
351 /* Must be called with BQL held */
352 static void cancel_hpt_prepare(SpaprMachineState
*spapr
)
354 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
356 /* Let the thread know it's cancelled */
357 spapr
->pending_hpt
= NULL
;
364 if (!pending
->complete
) {
365 /* thread will clean itself up */
369 free_pending_hpt(pending
);
372 target_ulong
softmmu_resize_hpt_prepare(PowerPCCPU
*cpu
,
373 SpaprMachineState
*spapr
,
376 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
379 /* something already in progress */
380 if (pending
->shift
== shift
) {
381 /* and it's suitable */
382 if (pending
->complete
) {
385 return H_LONG_BUSY_ORDER_100_MSEC
;
389 /* not suitable, cancel and replace */
390 cancel_hpt_prepare(spapr
);
398 /* start new prepare */
400 pending
= g_new0(SpaprPendingHpt
, 1);
401 pending
->shift
= shift
;
402 pending
->ret
= H_HARDWARE
;
404 qemu_thread_create(&pending
->thread
, "sPAPR HPT prepare",
405 hpt_prepare_thread
, pending
, QEMU_THREAD_DETACHED
);
407 spapr
->pending_hpt
= pending
;
409 /* In theory we could estimate the time more accurately based on
410 * the new size, but there's not much point */
411 return H_LONG_BUSY_ORDER_100_MSEC
;
414 static uint64_t new_hpte_load0(void *htab
, uint64_t pteg
, int slot
)
416 uint8_t *addr
= htab
;
418 addr
+= pteg
* HASH_PTEG_SIZE_64
;
419 addr
+= slot
* HASH_PTE_SIZE_64
;
423 static void new_hpte_store(void *htab
, uint64_t pteg
, int slot
,
424 uint64_t pte0
, uint64_t pte1
)
426 uint8_t *addr
= htab
;
428 addr
+= pteg
* HASH_PTEG_SIZE_64
;
429 addr
+= slot
* HASH_PTE_SIZE_64
;
432 stq_p(addr
+ HPTE64_DW1
, pte1
);
435 static int rehash_hpte(PowerPCCPU
*cpu
,
436 const ppc_hash_pte64_t
*hptes
,
437 void *old_hpt
, uint64_t oldsize
,
438 void *new_hpt
, uint64_t newsize
,
439 uint64_t pteg
, int slot
)
441 uint64_t old_hash_mask
= (oldsize
>> 7) - 1;
442 uint64_t new_hash_mask
= (newsize
>> 7) - 1;
443 target_ulong pte0
= ppc_hash64_hpte0(cpu
, hptes
, slot
);
446 unsigned base_pg_shift
;
447 uint64_t hash
, new_pteg
, replace_pte0
;
449 if (!(pte0
& HPTE64_V_VALID
) || !(pte0
& HPTE64_V_BOLTED
)) {
453 pte1
= ppc_hash64_hpte1(cpu
, hptes
, slot
);
455 base_pg_shift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pte0
, pte1
);
456 assert(base_pg_shift
); /* H_ENTER shouldn't allow a bad encoding */
457 avpn
= HPTE64_V_AVPN_VAL(pte0
) & ~(((1ULL << base_pg_shift
) - 1) >> 23);
459 if (pte0
& HPTE64_V_SECONDARY
) {
463 if ((pte0
& HPTE64_V_SSIZE
) == HPTE64_V_SSIZE_256M
) {
464 uint64_t offset
, vsid
;
466 /* We only have 28 - 23 bits of offset in avpn */
467 offset
= (avpn
& 0x1f) << 23;
469 /* We can find more bits from the pteg value */
470 if (base_pg_shift
< 23) {
471 offset
|= ((vsid
^ pteg
) & old_hash_mask
) << base_pg_shift
;
474 hash
= vsid
^ (offset
>> base_pg_shift
);
475 } else if ((pte0
& HPTE64_V_SSIZE
) == HPTE64_V_SSIZE_1T
) {
476 uint64_t offset
, vsid
;
478 /* We only have 40 - 23 bits of seg_off in avpn */
479 offset
= (avpn
& 0x1ffff) << 23;
481 if (base_pg_shift
< 23) {
482 offset
|= ((vsid
^ (vsid
<< 25) ^ pteg
) & old_hash_mask
)
486 hash
= vsid
^ (vsid
<< 25) ^ (offset
>> base_pg_shift
);
488 error_report("rehash_pte: Bad segment size in HPTE");
492 new_pteg
= hash
& new_hash_mask
;
493 if (pte0
& HPTE64_V_SECONDARY
) {
494 assert(~pteg
== (hash
& old_hash_mask
));
495 new_pteg
= ~new_pteg
;
497 assert(pteg
== (hash
& old_hash_mask
));
499 assert((oldsize
!= newsize
) || (pteg
== new_pteg
));
500 replace_pte0
= new_hpte_load0(new_hpt
, new_pteg
, slot
);
502 * Strictly speaking, we don't need all these tests, since we only
503 * ever rehash bolted HPTEs. We might in future handle non-bolted
504 * HPTEs, though so make the logic correct for those cases as
507 if (replace_pte0
& HPTE64_V_VALID
) {
508 assert(newsize
< oldsize
);
509 if (replace_pte0
& HPTE64_V_BOLTED
) {
510 if (pte0
& HPTE64_V_BOLTED
) {
511 /* Bolted collision, nothing we can do */
514 /* Discard this hpte */
520 new_hpte_store(new_hpt
, new_pteg
, slot
, pte0
, pte1
);
524 static int rehash_hpt(PowerPCCPU
*cpu
,
525 void *old_hpt
, uint64_t oldsize
,
526 void *new_hpt
, uint64_t newsize
)
528 uint64_t n_ptegs
= oldsize
>> 7;
533 for (pteg
= 0; pteg
< n_ptegs
; pteg
++) {
534 hwaddr ptex
= pteg
* HPTES_PER_GROUP
;
535 const ppc_hash_pte64_t
*hptes
536 = ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
542 for (slot
= 0; slot
< HPTES_PER_GROUP
; slot
++) {
543 rc
= rehash_hpte(cpu
, hptes
, old_hpt
, oldsize
, new_hpt
, newsize
,
545 if (rc
!= H_SUCCESS
) {
546 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
550 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
556 target_ulong
softmmu_resize_hpt_commit(PowerPCCPU
*cpu
,
557 SpaprMachineState
*spapr
,
561 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
569 if (!pending
|| (pending
->shift
!= shift
)) {
570 /* no matching prepare */
574 if (!pending
->complete
) {
575 /* prepare has not completed */
579 /* Shouldn't have got past PREPARE without an HPT */
580 g_assert(spapr
->htab_shift
);
582 newsize
= 1ULL << pending
->shift
;
583 rc
= rehash_hpt(cpu
, spapr
->htab
, HTAB_SIZE(spapr
),
584 pending
->hpt
, newsize
);
585 if (rc
== H_SUCCESS
) {
586 qemu_vfree(spapr
->htab
);
587 spapr
->htab
= pending
->hpt
;
588 spapr
->htab_shift
= pending
->shift
;
590 push_sregs_to_kvm_pr(spapr
);
592 pending
->hpt
= NULL
; /* so it's not free()d */
596 spapr
->pending_hpt
= NULL
;
597 free_pending_hpt(pending
);
602 static void hypercall_register_types(void)
605 spapr_register_hypercall(H_ENTER
, h_enter
);
606 spapr_register_hypercall(H_REMOVE
, h_remove
);
607 spapr_register_hypercall(H_PROTECT
, h_protect
);
608 spapr_register_hypercall(H_READ
, h_read
);
611 spapr_register_hypercall(H_BULK_REMOVE
, h_bulk_remove
);
615 type_init(hypercall_register_types
)