1 #include "qemu/osdep.h"
2 #include "qemu/cutils.h"
4 #include "helper_regs.h"
5 #include "hw/ppc/spapr.h"
6 #include "mmu-hash64.h"
7 #include "mmu-book3s-v3.h"
9 static inline bool valid_ptex(PowerPCCPU
*cpu
, target_ulong ptex
)
12 * hash value/pteg group index is normalized by HPT mask
14 if (((ptex
& ~7ULL) / HPTES_PER_GROUP
) & ~ppc_hash64_hpt_mask(cpu
)) {
20 static target_ulong
h_enter(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
21 target_ulong opcode
, target_ulong
*args
)
23 target_ulong flags
= args
[0];
24 target_ulong ptex
= args
[1];
25 target_ulong pteh
= args
[2];
26 target_ulong ptel
= args
[3];
30 const ppc_hash_pte64_t
*hptes
;
32 apshift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pteh
, ptel
);
34 /* Bad page size encoding */
38 raddr
= (ptel
& HPTE64_R_RPN
) & ~((1ULL << apshift
) - 1);
40 if (is_ram_address(spapr
, raddr
)) {
41 /* Regular RAM - should have WIMG=0010 */
42 if ((ptel
& HPTE64_R_WIMG
) != HPTE64_R_M
) {
46 target_ulong wimg_flags
;
47 /* Looks like an IO address */
48 /* FIXME: What WIMG combinations could be sensible for IO?
49 * For now we allow WIMG=010x, but are there others? */
50 /* FIXME: Should we check against registered IO addresses? */
51 wimg_flags
= (ptel
& (HPTE64_R_W
| HPTE64_R_I
| HPTE64_R_M
));
53 if (wimg_flags
!= HPTE64_R_I
&&
54 wimg_flags
!= (HPTE64_R_I
| HPTE64_R_M
)) {
61 if (!valid_ptex(cpu
, ptex
)) {
68 if (likely((flags
& H_EXACT
) == 0)) {
69 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
70 for (slot
= 0; slot
< 8; slot
++) {
71 if (!(ppc_hash64_hpte0(cpu
, hptes
, slot
) & HPTE64_V_VALID
)) {
75 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
80 hptes
= ppc_hash64_map_hptes(cpu
, ptex
+ slot
, 1);
81 if (ppc_hash64_hpte0(cpu
, hptes
, 0) & HPTE64_V_VALID
) {
82 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
+ slot
, 1);
85 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
88 spapr_store_hpte(cpu
, ptex
+ slot
, pteh
| HPTE64_V_HPTE_DIRTY
, ptel
);
90 args
[0] = ptex
+ slot
;
101 static RemoveResult
remove_hpte(PowerPCCPU
*cpu
105 target_ulong
*vp
, target_ulong
*rp
)
107 const ppc_hash_pte64_t
*hptes
;
110 if (!valid_ptex(cpu
, ptex
)) {
114 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, 1);
115 v
= ppc_hash64_hpte0(cpu
, hptes
, 0);
116 r
= ppc_hash64_hpte1(cpu
, hptes
, 0);
117 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
119 if ((v
& HPTE64_V_VALID
) == 0 ||
120 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
) ||
121 ((flags
& H_ANDCOND
) && (v
& avpn
) != 0)) {
122 return REMOVE_NOT_FOUND
;
126 spapr_store_hpte(cpu
, ptex
, HPTE64_V_HPTE_DIRTY
, 0);
127 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
128 return REMOVE_SUCCESS
;
131 static target_ulong
h_remove(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
132 target_ulong opcode
, target_ulong
*args
)
134 CPUPPCState
*env
= &cpu
->env
;
135 target_ulong flags
= args
[0];
136 target_ulong ptex
= args
[1];
137 target_ulong avpn
= args
[2];
140 ret
= remove_hpte(cpu
, ptex
, avpn
, flags
,
145 check_tlb_flush(env
, true);
148 case REMOVE_NOT_FOUND
:
158 g_assert_not_reached();
161 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
162 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
163 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
164 #define H_BULK_REMOVE_END 0xc000000000000000ULL
165 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
166 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
167 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
168 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
169 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
170 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
171 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
172 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
173 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
174 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
175 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
177 #define H_BULK_REMOVE_MAX_BATCH 4
179 static target_ulong
h_bulk_remove(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
180 target_ulong opcode
, target_ulong
*args
)
182 CPUPPCState
*env
= &cpu
->env
;
184 target_ulong rc
= H_SUCCESS
;
186 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
187 target_ulong
*tsh
= &args
[i
*2];
188 target_ulong tsl
= args
[i
*2 + 1];
189 target_ulong v
, r
, ret
;
191 if ((*tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
193 } else if ((*tsh
& H_BULK_REMOVE_TYPE
) != H_BULK_REMOVE_REQUEST
) {
197 *tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
198 *tsh
|= H_BULK_REMOVE_RESPONSE
;
200 if ((*tsh
& H_BULK_REMOVE_ANDCOND
) && (*tsh
& H_BULK_REMOVE_AVPN
)) {
201 *tsh
|= H_BULK_REMOVE_PARM
;
205 ret
= remove_hpte(cpu
, *tsh
& H_BULK_REMOVE_PTEX
, tsl
,
206 (*tsh
& H_BULK_REMOVE_FLAGS
) >> 26,
213 *tsh
|= (r
& (HPTE64_R_C
| HPTE64_R_R
)) << 43;
226 check_tlb_flush(env
, true);
231 static target_ulong
h_protect(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
232 target_ulong opcode
, target_ulong
*args
)
234 CPUPPCState
*env
= &cpu
->env
;
235 target_ulong flags
= args
[0];
236 target_ulong ptex
= args
[1];
237 target_ulong avpn
= args
[2];
238 const ppc_hash_pte64_t
*hptes
;
241 if (!valid_ptex(cpu
, ptex
)) {
245 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, 1);
246 v
= ppc_hash64_hpte0(cpu
, hptes
, 0);
247 r
= ppc_hash64_hpte1(cpu
, hptes
, 0);
248 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
250 if ((v
& HPTE64_V_VALID
) == 0 ||
251 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
)) {
255 r
&= ~(HPTE64_R_PP0
| HPTE64_R_PP
| HPTE64_R_N
|
256 HPTE64_R_KEY_HI
| HPTE64_R_KEY_LO
);
257 r
|= (flags
<< 55) & HPTE64_R_PP0
;
258 r
|= (flags
<< 48) & HPTE64_R_KEY_HI
;
259 r
|= flags
& (HPTE64_R_PP
| HPTE64_R_N
| HPTE64_R_KEY_LO
);
260 spapr_store_hpte(cpu
, ptex
,
261 (v
& ~HPTE64_V_VALID
) | HPTE64_V_HPTE_DIRTY
, 0);
262 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
264 check_tlb_flush(env
, true);
265 /* Don't need a memory barrier, due to qemu's global lock */
266 spapr_store_hpte(cpu
, ptex
, v
| HPTE64_V_HPTE_DIRTY
, r
);
270 static target_ulong
h_read(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
271 target_ulong opcode
, target_ulong
*args
)
273 target_ulong flags
= args
[0];
274 target_ulong ptex
= args
[1];
275 int i
, ridx
, n_entries
= 1;
276 const ppc_hash_pte64_t
*hptes
;
278 if (!valid_ptex(cpu
, ptex
)) {
282 if (flags
& H_READ_4
) {
283 /* Clear the two low order bits */
288 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, n_entries
);
289 for (i
= 0, ridx
= 0; i
< n_entries
; i
++) {
290 args
[ridx
++] = ppc_hash64_hpte0(cpu
, hptes
, i
);
291 args
[ridx
++] = ppc_hash64_hpte1(cpu
, hptes
, i
);
293 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, n_entries
);
298 struct SpaprPendingHpt
{
299 /* These fields are read-only after initialization */
303 /* These fields are protected by the BQL */
306 /* These fields are private to the preparation thread if
307 * !complete, otherwise protected by the BQL */
312 static void free_pending_hpt(SpaprPendingHpt
*pending
)
315 qemu_vfree(pending
->hpt
);
321 static void *hpt_prepare_thread(void *opaque
)
323 SpaprPendingHpt
*pending
= opaque
;
324 size_t size
= 1ULL << pending
->shift
;
326 pending
->hpt
= qemu_try_memalign(size
, size
);
328 memset(pending
->hpt
, 0, size
);
329 pending
->ret
= H_SUCCESS
;
331 pending
->ret
= H_NO_MEM
;
334 qemu_mutex_lock_iothread();
336 if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt
== pending
) {
338 pending
->complete
= true;
340 /* We've been cancelled, clean ourselves up */
341 free_pending_hpt(pending
);
344 qemu_mutex_unlock_iothread();
348 /* Must be called with BQL held */
349 static void cancel_hpt_prepare(SpaprMachineState
*spapr
)
351 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
353 /* Let the thread know it's cancelled */
354 spapr
->pending_hpt
= NULL
;
361 if (!pending
->complete
) {
362 /* thread will clean itself up */
366 free_pending_hpt(pending
);
369 target_ulong
softmmu_resize_hpt_prepare(PowerPCCPU
*cpu
,
370 SpaprMachineState
*spapr
,
373 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
376 /* something already in progress */
377 if (pending
->shift
== shift
) {
378 /* and it's suitable */
379 if (pending
->complete
) {
382 return H_LONG_BUSY_ORDER_100_MSEC
;
386 /* not suitable, cancel and replace */
387 cancel_hpt_prepare(spapr
);
395 /* start new prepare */
397 pending
= g_new0(SpaprPendingHpt
, 1);
398 pending
->shift
= shift
;
399 pending
->ret
= H_HARDWARE
;
401 qemu_thread_create(&pending
->thread
, "sPAPR HPT prepare",
402 hpt_prepare_thread
, pending
, QEMU_THREAD_DETACHED
);
404 spapr
->pending_hpt
= pending
;
406 /* In theory we could estimate the time more accurately based on
407 * the new size, but there's not much point */
408 return H_LONG_BUSY_ORDER_100_MSEC
;
411 static uint64_t new_hpte_load0(void *htab
, uint64_t pteg
, int slot
)
413 uint8_t *addr
= htab
;
415 addr
+= pteg
* HASH_PTEG_SIZE_64
;
416 addr
+= slot
* HASH_PTE_SIZE_64
;
420 static void new_hpte_store(void *htab
, uint64_t pteg
, int slot
,
421 uint64_t pte0
, uint64_t pte1
)
423 uint8_t *addr
= htab
;
425 addr
+= pteg
* HASH_PTEG_SIZE_64
;
426 addr
+= slot
* HASH_PTE_SIZE_64
;
429 stq_p(addr
+ HASH_PTE_SIZE_64
/ 2, pte1
);
432 static int rehash_hpte(PowerPCCPU
*cpu
,
433 const ppc_hash_pte64_t
*hptes
,
434 void *old_hpt
, uint64_t oldsize
,
435 void *new_hpt
, uint64_t newsize
,
436 uint64_t pteg
, int slot
)
438 uint64_t old_hash_mask
= (oldsize
>> 7) - 1;
439 uint64_t new_hash_mask
= (newsize
>> 7) - 1;
440 target_ulong pte0
= ppc_hash64_hpte0(cpu
, hptes
, slot
);
443 unsigned base_pg_shift
;
444 uint64_t hash
, new_pteg
, replace_pte0
;
446 if (!(pte0
& HPTE64_V_VALID
) || !(pte0
& HPTE64_V_BOLTED
)) {
450 pte1
= ppc_hash64_hpte1(cpu
, hptes
, slot
);
452 base_pg_shift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pte0
, pte1
);
453 assert(base_pg_shift
); /* H_ENTER shouldn't allow a bad encoding */
454 avpn
= HPTE64_V_AVPN_VAL(pte0
) & ~(((1ULL << base_pg_shift
) - 1) >> 23);
456 if (pte0
& HPTE64_V_SECONDARY
) {
460 if ((pte0
& HPTE64_V_SSIZE
) == HPTE64_V_SSIZE_256M
) {
461 uint64_t offset
, vsid
;
463 /* We only have 28 - 23 bits of offset in avpn */
464 offset
= (avpn
& 0x1f) << 23;
466 /* We can find more bits from the pteg value */
467 if (base_pg_shift
< 23) {
468 offset
|= ((vsid
^ pteg
) & old_hash_mask
) << base_pg_shift
;
471 hash
= vsid
^ (offset
>> base_pg_shift
);
472 } else if ((pte0
& HPTE64_V_SSIZE
) == HPTE64_V_SSIZE_1T
) {
473 uint64_t offset
, vsid
;
475 /* We only have 40 - 23 bits of seg_off in avpn */
476 offset
= (avpn
& 0x1ffff) << 23;
478 if (base_pg_shift
< 23) {
479 offset
|= ((vsid
^ (vsid
<< 25) ^ pteg
) & old_hash_mask
)
483 hash
= vsid
^ (vsid
<< 25) ^ (offset
>> base_pg_shift
);
485 error_report("rehash_pte: Bad segment size in HPTE");
489 new_pteg
= hash
& new_hash_mask
;
490 if (pte0
& HPTE64_V_SECONDARY
) {
491 assert(~pteg
== (hash
& old_hash_mask
));
492 new_pteg
= ~new_pteg
;
494 assert(pteg
== (hash
& old_hash_mask
));
496 assert((oldsize
!= newsize
) || (pteg
== new_pteg
));
497 replace_pte0
= new_hpte_load0(new_hpt
, new_pteg
, slot
);
499 * Strictly speaking, we don't need all these tests, since we only
500 * ever rehash bolted HPTEs. We might in future handle non-bolted
501 * HPTEs, though so make the logic correct for those cases as
504 if (replace_pte0
& HPTE64_V_VALID
) {
505 assert(newsize
< oldsize
);
506 if (replace_pte0
& HPTE64_V_BOLTED
) {
507 if (pte0
& HPTE64_V_BOLTED
) {
508 /* Bolted collision, nothing we can do */
511 /* Discard this hpte */
517 new_hpte_store(new_hpt
, new_pteg
, slot
, pte0
, pte1
);
521 static int rehash_hpt(PowerPCCPU
*cpu
,
522 void *old_hpt
, uint64_t oldsize
,
523 void *new_hpt
, uint64_t newsize
)
525 uint64_t n_ptegs
= oldsize
>> 7;
530 for (pteg
= 0; pteg
< n_ptegs
; pteg
++) {
531 hwaddr ptex
= pteg
* HPTES_PER_GROUP
;
532 const ppc_hash_pte64_t
*hptes
533 = ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
539 for (slot
= 0; slot
< HPTES_PER_GROUP
; slot
++) {
540 rc
= rehash_hpte(cpu
, hptes
, old_hpt
, oldsize
, new_hpt
, newsize
,
542 if (rc
!= H_SUCCESS
) {
543 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
547 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
553 target_ulong
softmmu_resize_hpt_commit(PowerPCCPU
*cpu
,
554 SpaprMachineState
*spapr
,
558 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
566 if (!pending
|| (pending
->shift
!= shift
)) {
567 /* no matching prepare */
571 if (!pending
->complete
) {
572 /* prepare has not completed */
576 /* Shouldn't have got past PREPARE without an HPT */
577 g_assert(spapr
->htab_shift
);
579 newsize
= 1ULL << pending
->shift
;
580 rc
= rehash_hpt(cpu
, spapr
->htab
, HTAB_SIZE(spapr
),
581 pending
->hpt
, newsize
);
582 if (rc
== H_SUCCESS
) {
583 qemu_vfree(spapr
->htab
);
584 spapr
->htab
= pending
->hpt
;
585 spapr
->htab_shift
= pending
->shift
;
587 push_sregs_to_kvm_pr(spapr
);
589 pending
->hpt
= NULL
; /* so it's not free()d */
593 spapr
->pending_hpt
= NULL
;
594 free_pending_hpt(pending
);
599 static void hypercall_register_types(void)
602 spapr_register_hypercall(H_ENTER
, h_enter
);
603 spapr_register_hypercall(H_REMOVE
, h_remove
);
604 spapr_register_hypercall(H_PROTECT
, h_protect
);
605 spapr_register_hypercall(H_READ
, h_read
);
608 spapr_register_hypercall(H_BULK_REMOVE
, h_bulk_remove
);
612 type_init(hypercall_register_types
)