1 #include "qemu/osdep.h"
2 #include "qemu/cutils.h"
3 #include "qemu/memalign.h"
5 #include "helper_regs.h"
6 #include "hw/ppc/spapr.h"
7 #include "mmu-hash64.h"
8 #include "mmu-book3s-v3.h"
10 static inline bool valid_ptex(PowerPCCPU
*cpu
, target_ulong ptex
)
13 * hash value/pteg group index is normalized by HPT mask
15 if (((ptex
& ~7ULL) / HPTES_PER_GROUP
) & ~ppc_hash64_hpt_mask(cpu
)) {
21 static target_ulong
h_enter(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
22 target_ulong opcode
, target_ulong
*args
)
24 target_ulong flags
= args
[0];
25 target_ulong ptex
= args
[1];
26 target_ulong pteh
= args
[2];
27 target_ulong ptel
= args
[3];
31 const ppc_hash_pte64_t
*hptes
;
33 apshift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pteh
, ptel
);
35 /* Bad page size encoding */
39 raddr
= (ptel
& HPTE64_R_RPN
) & ~((1ULL << apshift
) - 1);
41 if (is_ram_address(spapr
, raddr
)) {
42 /* Regular RAM - should have WIMG=0010 */
43 if ((ptel
& HPTE64_R_WIMG
) != HPTE64_R_M
) {
47 target_ulong wimg_flags
;
48 /* Looks like an IO address */
49 /* FIXME: What WIMG combinations could be sensible for IO?
50 * For now we allow WIMG=010x, but are there others? */
51 /* FIXME: Should we check against registered IO addresses? */
52 wimg_flags
= (ptel
& (HPTE64_R_W
| HPTE64_R_I
| HPTE64_R_M
));
54 if (wimg_flags
!= HPTE64_R_I
&&
55 wimg_flags
!= (HPTE64_R_I
| HPTE64_R_M
)) {
62 if (!valid_ptex(cpu
, ptex
)) {
69 if (likely((flags
& H_EXACT
) == 0)) {
70 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
71 for (slot
= 0; slot
< 8; slot
++) {
72 if (!(ppc_hash64_hpte0(cpu
, hptes
, slot
) & HPTE64_V_VALID
)) {
76 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
81 hptes
= ppc_hash64_map_hptes(cpu
, ptex
+ slot
, 1);
82 if (ppc_hash64_hpte0(cpu
, hptes
, 0) & HPTE64_V_VALID
) {
83 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
+ slot
, 1);
86 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
89 spapr_store_hpte(cpu
, ptex
+ slot
, pteh
| HPTE64_V_HPTE_DIRTY
, ptel
);
91 args
[0] = ptex
+ slot
;
102 static RemoveResult
remove_hpte(PowerPCCPU
*cpu
106 target_ulong
*vp
, target_ulong
*rp
)
108 const ppc_hash_pte64_t
*hptes
;
111 if (!valid_ptex(cpu
, ptex
)) {
115 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, 1);
116 v
= ppc_hash64_hpte0(cpu
, hptes
, 0);
117 r
= ppc_hash64_hpte1(cpu
, hptes
, 0);
118 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
120 if ((v
& HPTE64_V_VALID
) == 0 ||
121 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
) ||
122 ((flags
& H_ANDCOND
) && (v
& avpn
) != 0)) {
123 return REMOVE_NOT_FOUND
;
127 spapr_store_hpte(cpu
, ptex
, HPTE64_V_HPTE_DIRTY
, 0);
128 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
129 return REMOVE_SUCCESS
;
132 static target_ulong
h_remove(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
133 target_ulong opcode
, target_ulong
*args
)
135 CPUPPCState
*env
= &cpu
->env
;
136 target_ulong flags
= args
[0];
137 target_ulong ptex
= args
[1];
138 target_ulong avpn
= args
[2];
141 ret
= remove_hpte(cpu
, ptex
, avpn
, flags
,
146 check_tlb_flush(env
, true);
149 case REMOVE_NOT_FOUND
:
159 g_assert_not_reached();
162 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
163 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
164 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
165 #define H_BULK_REMOVE_END 0xc000000000000000ULL
166 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
167 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
168 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
169 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
170 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
171 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
172 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
173 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
174 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
175 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
176 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
178 #define H_BULK_REMOVE_MAX_BATCH 4
180 static target_ulong
h_bulk_remove(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
181 target_ulong opcode
, target_ulong
*args
)
183 CPUPPCState
*env
= &cpu
->env
;
185 target_ulong rc
= H_SUCCESS
;
187 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
188 target_ulong
*tsh
= &args
[i
*2];
189 target_ulong tsl
= args
[i
*2 + 1];
190 target_ulong v
, r
, ret
;
192 if ((*tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
194 } else if ((*tsh
& H_BULK_REMOVE_TYPE
) != H_BULK_REMOVE_REQUEST
) {
198 *tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
199 *tsh
|= H_BULK_REMOVE_RESPONSE
;
201 if ((*tsh
& H_BULK_REMOVE_ANDCOND
) && (*tsh
& H_BULK_REMOVE_AVPN
)) {
202 *tsh
|= H_BULK_REMOVE_PARM
;
206 ret
= remove_hpte(cpu
, *tsh
& H_BULK_REMOVE_PTEX
, tsl
,
207 (*tsh
& H_BULK_REMOVE_FLAGS
) >> 26,
214 *tsh
|= (r
& (HPTE64_R_C
| HPTE64_R_R
)) << 43;
227 check_tlb_flush(env
, true);
232 static target_ulong
h_protect(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
233 target_ulong opcode
, target_ulong
*args
)
235 CPUPPCState
*env
= &cpu
->env
;
236 target_ulong flags
= args
[0];
237 target_ulong ptex
= args
[1];
238 target_ulong avpn
= args
[2];
239 const ppc_hash_pte64_t
*hptes
;
242 if (!valid_ptex(cpu
, ptex
)) {
246 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, 1);
247 v
= ppc_hash64_hpte0(cpu
, hptes
, 0);
248 r
= ppc_hash64_hpte1(cpu
, hptes
, 0);
249 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, 1);
251 if ((v
& HPTE64_V_VALID
) == 0 ||
252 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
)) {
256 r
&= ~(HPTE64_R_PP0
| HPTE64_R_PP
| HPTE64_R_N
|
257 HPTE64_R_KEY_HI
| HPTE64_R_KEY_LO
);
258 r
|= (flags
<< 55) & HPTE64_R_PP0
;
259 r
|= (flags
<< 48) & HPTE64_R_KEY_HI
;
260 r
|= flags
& (HPTE64_R_PP
| HPTE64_R_N
| HPTE64_R_KEY_LO
);
261 spapr_store_hpte(cpu
, ptex
,
262 (v
& ~HPTE64_V_VALID
) | HPTE64_V_HPTE_DIRTY
, 0);
263 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
265 check_tlb_flush(env
, true);
266 /* Don't need a memory barrier, due to qemu's global lock */
267 spapr_store_hpte(cpu
, ptex
, v
| HPTE64_V_HPTE_DIRTY
, r
);
271 static target_ulong
h_read(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
272 target_ulong opcode
, target_ulong
*args
)
274 target_ulong flags
= args
[0];
275 target_ulong ptex
= args
[1];
276 int i
, ridx
, n_entries
= 1;
277 const ppc_hash_pte64_t
*hptes
;
279 if (!valid_ptex(cpu
, ptex
)) {
283 if (flags
& H_READ_4
) {
284 /* Clear the two low order bits */
289 hptes
= ppc_hash64_map_hptes(cpu
, ptex
, n_entries
);
290 for (i
= 0, ridx
= 0; i
< n_entries
; i
++) {
291 args
[ridx
++] = ppc_hash64_hpte0(cpu
, hptes
, i
);
292 args
[ridx
++] = ppc_hash64_hpte1(cpu
, hptes
, i
);
294 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, n_entries
);
299 struct SpaprPendingHpt
{
300 /* These fields are read-only after initialization */
304 /* These fields are protected by the BQL */
307 /* These fields are private to the preparation thread if
308 * !complete, otherwise protected by the BQL */
313 static void free_pending_hpt(SpaprPendingHpt
*pending
)
316 qemu_vfree(pending
->hpt
);
322 static void *hpt_prepare_thread(void *opaque
)
324 SpaprPendingHpt
*pending
= opaque
;
325 size_t size
= 1ULL << pending
->shift
;
327 pending
->hpt
= qemu_try_memalign(size
, size
);
329 memset(pending
->hpt
, 0, size
);
330 pending
->ret
= H_SUCCESS
;
332 pending
->ret
= H_NO_MEM
;
335 qemu_mutex_lock_iothread();
337 if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt
== pending
) {
339 pending
->complete
= true;
341 /* We've been cancelled, clean ourselves up */
342 free_pending_hpt(pending
);
345 qemu_mutex_unlock_iothread();
349 /* Must be called with BQL held */
350 static void cancel_hpt_prepare(SpaprMachineState
*spapr
)
352 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
354 /* Let the thread know it's cancelled */
355 spapr
->pending_hpt
= NULL
;
362 if (!pending
->complete
) {
363 /* thread will clean itself up */
367 free_pending_hpt(pending
);
370 target_ulong
softmmu_resize_hpt_prepare(PowerPCCPU
*cpu
,
371 SpaprMachineState
*spapr
,
374 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
377 /* something already in progress */
378 if (pending
->shift
== shift
) {
379 /* and it's suitable */
380 if (pending
->complete
) {
383 return H_LONG_BUSY_ORDER_100_MSEC
;
387 /* not suitable, cancel and replace */
388 cancel_hpt_prepare(spapr
);
396 /* start new prepare */
398 pending
= g_new0(SpaprPendingHpt
, 1);
399 pending
->shift
= shift
;
400 pending
->ret
= H_HARDWARE
;
402 qemu_thread_create(&pending
->thread
, "sPAPR HPT prepare",
403 hpt_prepare_thread
, pending
, QEMU_THREAD_DETACHED
);
405 spapr
->pending_hpt
= pending
;
407 /* In theory we could estimate the time more accurately based on
408 * the new size, but there's not much point */
409 return H_LONG_BUSY_ORDER_100_MSEC
;
412 static uint64_t new_hpte_load0(void *htab
, uint64_t pteg
, int slot
)
414 uint8_t *addr
= htab
;
416 addr
+= pteg
* HASH_PTEG_SIZE_64
;
417 addr
+= slot
* HASH_PTE_SIZE_64
;
421 static void new_hpte_store(void *htab
, uint64_t pteg
, int slot
,
422 uint64_t pte0
, uint64_t pte1
)
424 uint8_t *addr
= htab
;
426 addr
+= pteg
* HASH_PTEG_SIZE_64
;
427 addr
+= slot
* HASH_PTE_SIZE_64
;
430 stq_p(addr
+ HPTE64_DW1
, pte1
);
433 static int rehash_hpte(PowerPCCPU
*cpu
,
434 const ppc_hash_pte64_t
*hptes
,
435 void *old_hpt
, uint64_t oldsize
,
436 void *new_hpt
, uint64_t newsize
,
437 uint64_t pteg
, int slot
)
439 uint64_t old_hash_mask
= (oldsize
>> 7) - 1;
440 uint64_t new_hash_mask
= (newsize
>> 7) - 1;
441 target_ulong pte0
= ppc_hash64_hpte0(cpu
, hptes
, slot
);
444 unsigned base_pg_shift
;
445 uint64_t hash
, new_pteg
, replace_pte0
;
447 if (!(pte0
& HPTE64_V_VALID
) || !(pte0
& HPTE64_V_BOLTED
)) {
451 pte1
= ppc_hash64_hpte1(cpu
, hptes
, slot
);
453 base_pg_shift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pte0
, pte1
);
454 assert(base_pg_shift
); /* H_ENTER shouldn't allow a bad encoding */
455 avpn
= HPTE64_V_AVPN_VAL(pte0
) & ~(((1ULL << base_pg_shift
) - 1) >> 23);
457 if (pte0
& HPTE64_V_SECONDARY
) {
461 if ((pte0
& HPTE64_V_SSIZE
) == HPTE64_V_SSIZE_256M
) {
462 uint64_t offset
, vsid
;
464 /* We only have 28 - 23 bits of offset in avpn */
465 offset
= (avpn
& 0x1f) << 23;
467 /* We can find more bits from the pteg value */
468 if (base_pg_shift
< 23) {
469 offset
|= ((vsid
^ pteg
) & old_hash_mask
) << base_pg_shift
;
472 hash
= vsid
^ (offset
>> base_pg_shift
);
473 } else if ((pte0
& HPTE64_V_SSIZE
) == HPTE64_V_SSIZE_1T
) {
474 uint64_t offset
, vsid
;
476 /* We only have 40 - 23 bits of seg_off in avpn */
477 offset
= (avpn
& 0x1ffff) << 23;
479 if (base_pg_shift
< 23) {
480 offset
|= ((vsid
^ (vsid
<< 25) ^ pteg
) & old_hash_mask
)
484 hash
= vsid
^ (vsid
<< 25) ^ (offset
>> base_pg_shift
);
486 error_report("rehash_pte: Bad segment size in HPTE");
490 new_pteg
= hash
& new_hash_mask
;
491 if (pte0
& HPTE64_V_SECONDARY
) {
492 assert(~pteg
== (hash
& old_hash_mask
));
493 new_pteg
= ~new_pteg
;
495 assert(pteg
== (hash
& old_hash_mask
));
497 assert((oldsize
!= newsize
) || (pteg
== new_pteg
));
498 replace_pte0
= new_hpte_load0(new_hpt
, new_pteg
, slot
);
500 * Strictly speaking, we don't need all these tests, since we only
501 * ever rehash bolted HPTEs. We might in future handle non-bolted
502 * HPTEs, though so make the logic correct for those cases as
505 if (replace_pte0
& HPTE64_V_VALID
) {
506 assert(newsize
< oldsize
);
507 if (replace_pte0
& HPTE64_V_BOLTED
) {
508 if (pte0
& HPTE64_V_BOLTED
) {
509 /* Bolted collision, nothing we can do */
512 /* Discard this hpte */
518 new_hpte_store(new_hpt
, new_pteg
, slot
, pte0
, pte1
);
522 static int rehash_hpt(PowerPCCPU
*cpu
,
523 void *old_hpt
, uint64_t oldsize
,
524 void *new_hpt
, uint64_t newsize
)
526 uint64_t n_ptegs
= oldsize
>> 7;
531 for (pteg
= 0; pteg
< n_ptegs
; pteg
++) {
532 hwaddr ptex
= pteg
* HPTES_PER_GROUP
;
533 const ppc_hash_pte64_t
*hptes
534 = ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
540 for (slot
= 0; slot
< HPTES_PER_GROUP
; slot
++) {
541 rc
= rehash_hpte(cpu
, hptes
, old_hpt
, oldsize
, new_hpt
, newsize
,
543 if (rc
!= H_SUCCESS
) {
544 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
548 ppc_hash64_unmap_hptes(cpu
, hptes
, ptex
, HPTES_PER_GROUP
);
554 target_ulong
softmmu_resize_hpt_commit(PowerPCCPU
*cpu
,
555 SpaprMachineState
*spapr
,
559 SpaprPendingHpt
*pending
= spapr
->pending_hpt
;
567 if (!pending
|| (pending
->shift
!= shift
)) {
568 /* no matching prepare */
572 if (!pending
->complete
) {
573 /* prepare has not completed */
577 /* Shouldn't have got past PREPARE without an HPT */
578 g_assert(spapr
->htab_shift
);
580 newsize
= 1ULL << pending
->shift
;
581 rc
= rehash_hpt(cpu
, spapr
->htab
, HTAB_SIZE(spapr
),
582 pending
->hpt
, newsize
);
583 if (rc
== H_SUCCESS
) {
584 qemu_vfree(spapr
->htab
);
585 spapr
->htab
= pending
->hpt
;
586 spapr
->htab_shift
= pending
->shift
;
588 push_sregs_to_kvm_pr(spapr
);
590 pending
->hpt
= NULL
; /* so it's not free()d */
594 spapr
->pending_hpt
= NULL
;
595 free_pending_hpt(pending
);
600 static void hypercall_register_types(void)
603 spapr_register_hypercall(H_ENTER
, h_enter
);
604 spapr_register_hypercall(H_REMOVE
, h_remove
);
605 spapr_register_hypercall(H_PROTECT
, h_protect
);
606 spapr_register_hypercall(H_READ
, h_read
);
609 spapr_register_hypercall(H_BULK_REMOVE
, h_bulk_remove
);
613 type_init(hypercall_register_types
)