nfs.c: replace QEMUOptionParameter with QemuOpts
[qemu/ar7.git] / hw / ppc / spapr_hcall.c
blob0bae0535e891d6796936ed7206b6b1d9cc06bc38
1 #include "sysemu/sysemu.h"
2 #include "cpu.h"
3 #include "helper_regs.h"
4 #include "hw/ppc/spapr.h"
5 #include "mmu-hash64.h"
7 struct SPRSyncState {
8 CPUState *cs;
9 int spr;
10 target_ulong value;
11 target_ulong mask;
14 static void do_spr_sync(void *arg)
16 struct SPRSyncState *s = arg;
17 PowerPCCPU *cpu = POWERPC_CPU(s->cs);
18 CPUPPCState *env = &cpu->env;
20 cpu_synchronize_state(s->cs);
21 env->spr[s->spr] &= ~s->mask;
22 env->spr[s->spr] |= s->value;
25 static void set_spr(CPUState *cs, int spr, target_ulong value,
26 target_ulong mask)
28 struct SPRSyncState s = {
29 .cs = cs,
30 .spr = spr,
31 .value = value,
32 .mask = mask
34 run_on_cpu(cs, do_spr_sync, &s);
37 static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
38 target_ulong pte_index)
40 target_ulong rb, va_low;
42 rb = (v & ~0x7fULL) << 16; /* AVA field */
43 va_low = pte_index >> 3;
44 if (v & HPTE64_V_SECONDARY) {
45 va_low = ~va_low;
47 /* xor vsid from AVA */
48 if (!(v & HPTE64_V_1TB_SEG)) {
49 va_low ^= v >> 12;
50 } else {
51 va_low ^= v >> 24;
53 va_low &= 0x7ff;
54 if (v & HPTE64_V_LARGE) {
55 rb |= 1; /* L field */
56 #if 0 /* Disable that P7 specific bit for now */
57 if (r & 0xff000) {
58 /* non-16MB large page, must be 64k */
59 /* (masks depend on page size) */
60 rb |= 0x1000; /* page encoding in LP field */
61 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
62 rb |= (va_low & 0xfe); /* AVAL field */
64 #endif
65 } else {
66 /* 4kB page */
67 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of AVA */
69 rb |= (v >> 54) & 0x300; /* B field */
70 return rb;
73 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
76 * hash value/pteg group index is normalized by htab_mask
78 if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
79 return false;
81 return true;
84 static target_ulong h_enter(PowerPCCPU *cpu, sPAPREnvironment *spapr,
85 target_ulong opcode, target_ulong *args)
87 CPUPPCState *env = &cpu->env;
88 target_ulong flags = args[0];
89 target_ulong pte_index = args[1];
90 target_ulong pteh = args[2];
91 target_ulong ptel = args[3];
92 target_ulong page_shift = 12;
93 target_ulong raddr;
94 target_ulong index;
95 uint64_t token;
97 /* only handle 4k and 16M pages for now */
98 if (pteh & HPTE64_V_LARGE) {
99 #if 0 /* We don't support 64k pages yet */
100 if ((ptel & 0xf000) == 0x1000) {
101 /* 64k page */
102 } else
103 #endif
104 if ((ptel & 0xff000) == 0) {
105 /* 16M page */
106 page_shift = 24;
107 /* lowest AVA bit must be 0 for 16M pages */
108 if (pteh & 0x80) {
109 return H_PARAMETER;
111 } else {
112 return H_PARAMETER;
116 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1);
118 if (raddr < spapr->ram_limit) {
119 /* Regular RAM - should have WIMG=0010 */
120 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
121 return H_PARAMETER;
123 } else {
124 /* Looks like an IO address */
125 /* FIXME: What WIMG combinations could be sensible for IO?
126 * For now we allow WIMG=010x, but are there others? */
127 /* FIXME: Should we check against registered IO addresses? */
128 if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
129 return H_PARAMETER;
133 pteh &= ~0x60ULL;
135 if (!valid_pte_index(env, pte_index)) {
136 return H_PARAMETER;
139 index = 0;
140 if (likely((flags & H_EXACT) == 0)) {
141 pte_index &= ~7ULL;
142 token = ppc_hash64_start_access(cpu, pte_index);
143 for (; index < 8; index++) {
144 if ((ppc_hash64_load_hpte0(env, token, index) & HPTE64_V_VALID) == 0) {
145 break;
148 ppc_hash64_stop_access(token);
149 if (index == 8) {
150 return H_PTEG_FULL;
152 } else {
153 token = ppc_hash64_start_access(cpu, pte_index);
154 if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
155 ppc_hash64_stop_access(token);
156 return H_PTEG_FULL;
158 ppc_hash64_stop_access(token);
161 ppc_hash64_store_hpte(env, pte_index + index,
162 pteh | HPTE64_V_HPTE_DIRTY, ptel);
164 args[0] = pte_index + index;
165 return H_SUCCESS;
168 typedef enum {
169 REMOVE_SUCCESS = 0,
170 REMOVE_NOT_FOUND = 1,
171 REMOVE_PARM = 2,
172 REMOVE_HW = 3,
173 } RemoveResult;
175 static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex,
176 target_ulong avpn,
177 target_ulong flags,
178 target_ulong *vp, target_ulong *rp)
180 uint64_t token;
181 target_ulong v, r, rb;
183 if (!valid_pte_index(env, ptex)) {
184 return REMOVE_PARM;
187 token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex);
188 v = ppc_hash64_load_hpte0(env, token, 0);
189 r = ppc_hash64_load_hpte1(env, token, 0);
190 ppc_hash64_stop_access(token);
192 if ((v & HPTE64_V_VALID) == 0 ||
193 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
194 ((flags & H_ANDCOND) && (v & avpn) != 0)) {
195 return REMOVE_NOT_FOUND;
197 *vp = v;
198 *rp = r;
199 ppc_hash64_store_hpte(env, ptex, HPTE64_V_HPTE_DIRTY, 0);
200 rb = compute_tlbie_rb(v, r, ptex);
201 ppc_tlb_invalidate_one(env, rb);
202 return REMOVE_SUCCESS;
205 static target_ulong h_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
206 target_ulong opcode, target_ulong *args)
208 CPUPPCState *env = &cpu->env;
209 target_ulong flags = args[0];
210 target_ulong pte_index = args[1];
211 target_ulong avpn = args[2];
212 RemoveResult ret;
214 ret = remove_hpte(env, pte_index, avpn, flags,
215 &args[0], &args[1]);
217 switch (ret) {
218 case REMOVE_SUCCESS:
219 return H_SUCCESS;
221 case REMOVE_NOT_FOUND:
222 return H_NOT_FOUND;
224 case REMOVE_PARM:
225 return H_PARAMETER;
227 case REMOVE_HW:
228 return H_HARDWARE;
231 g_assert_not_reached();
234 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
235 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
236 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
237 #define H_BULK_REMOVE_END 0xc000000000000000ULL
238 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
239 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
240 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
241 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
242 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
243 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
244 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
245 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
246 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
247 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
248 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
250 #define H_BULK_REMOVE_MAX_BATCH 4
252 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr,
253 target_ulong opcode, target_ulong *args)
255 CPUPPCState *env = &cpu->env;
256 int i;
258 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
259 target_ulong *tsh = &args[i*2];
260 target_ulong tsl = args[i*2 + 1];
261 target_ulong v, r, ret;
263 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
264 break;
265 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
266 return H_PARAMETER;
269 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
270 *tsh |= H_BULK_REMOVE_RESPONSE;
272 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
273 *tsh |= H_BULK_REMOVE_PARM;
274 return H_PARAMETER;
277 ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl,
278 (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
279 &v, &r);
281 *tsh |= ret << 60;
283 switch (ret) {
284 case REMOVE_SUCCESS:
285 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
286 break;
288 case REMOVE_PARM:
289 return H_PARAMETER;
291 case REMOVE_HW:
292 return H_HARDWARE;
296 return H_SUCCESS;
299 static target_ulong h_protect(PowerPCCPU *cpu, sPAPREnvironment *spapr,
300 target_ulong opcode, target_ulong *args)
302 CPUPPCState *env = &cpu->env;
303 target_ulong flags = args[0];
304 target_ulong pte_index = args[1];
305 target_ulong avpn = args[2];
306 uint64_t token;
307 target_ulong v, r, rb;
309 if (!valid_pte_index(env, pte_index)) {
310 return H_PARAMETER;
313 token = ppc_hash64_start_access(cpu, pte_index);
314 v = ppc_hash64_load_hpte0(env, token, 0);
315 r = ppc_hash64_load_hpte1(env, token, 0);
316 ppc_hash64_stop_access(token);
318 if ((v & HPTE64_V_VALID) == 0 ||
319 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
320 return H_NOT_FOUND;
323 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
324 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
325 r |= (flags << 55) & HPTE64_R_PP0;
326 r |= (flags << 48) & HPTE64_R_KEY_HI;
327 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
328 rb = compute_tlbie_rb(v, r, pte_index);
329 ppc_hash64_store_hpte(env, pte_index,
330 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
331 ppc_tlb_invalidate_one(env, rb);
332 /* Don't need a memory barrier, due to qemu's global lock */
333 ppc_hash64_store_hpte(env, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
334 return H_SUCCESS;
337 static target_ulong h_read(PowerPCCPU *cpu, sPAPREnvironment *spapr,
338 target_ulong opcode, target_ulong *args)
340 CPUPPCState *env = &cpu->env;
341 target_ulong flags = args[0];
342 target_ulong pte_index = args[1];
343 uint8_t *hpte;
344 int i, ridx, n_entries = 1;
346 if (!valid_pte_index(env, pte_index)) {
347 return H_PARAMETER;
350 if (flags & H_READ_4) {
351 /* Clear the two low order bits */
352 pte_index &= ~(3ULL);
353 n_entries = 4;
356 hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
358 for (i = 0, ridx = 0; i < n_entries; i++) {
359 args[ridx++] = ldq_p(hpte);
360 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
361 hpte += HASH_PTE_SIZE_64;
364 return H_SUCCESS;
367 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
368 target_ulong opcode, target_ulong *args)
370 /* FIXME: actually implement this */
371 return H_HARDWARE;
374 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL
375 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL
376 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
377 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
378 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
379 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
381 #define VPA_MIN_SIZE 640
382 #define VPA_SIZE_OFFSET 0x4
383 #define VPA_SHARED_PROC_OFFSET 0x9
384 #define VPA_SHARED_PROC_VAL 0x2
386 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
388 CPUState *cs = CPU(ppc_env_get_cpu(env));
389 uint16_t size;
390 uint8_t tmp;
392 if (vpa == 0) {
393 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
394 return H_HARDWARE;
397 if (vpa % env->dcache_line_size) {
398 return H_PARAMETER;
400 /* FIXME: bounds check the address */
402 size = lduw_be_phys(cs->as, vpa + 0x4);
404 if (size < VPA_MIN_SIZE) {
405 return H_PARAMETER;
408 /* VPA is not allowed to cross a page boundary */
409 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
410 return H_PARAMETER;
413 env->vpa_addr = vpa;
415 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
416 tmp |= VPA_SHARED_PROC_VAL;
417 stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
419 return H_SUCCESS;
422 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
424 if (env->slb_shadow_addr) {
425 return H_RESOURCE;
428 if (env->dtl_addr) {
429 return H_RESOURCE;
432 env->vpa_addr = 0;
433 return H_SUCCESS;
436 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
438 CPUState *cs = CPU(ppc_env_get_cpu(env));
439 uint32_t size;
441 if (addr == 0) {
442 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
443 return H_HARDWARE;
446 size = ldl_be_phys(cs->as, addr + 0x4);
447 if (size < 0x8) {
448 return H_PARAMETER;
451 if ((addr / 4096) != ((addr + size - 1) / 4096)) {
452 return H_PARAMETER;
455 if (!env->vpa_addr) {
456 return H_RESOURCE;
459 env->slb_shadow_addr = addr;
460 env->slb_shadow_size = size;
462 return H_SUCCESS;
465 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
467 env->slb_shadow_addr = 0;
468 env->slb_shadow_size = 0;
469 return H_SUCCESS;
472 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
474 CPUState *cs = CPU(ppc_env_get_cpu(env));
475 uint32_t size;
477 if (addr == 0) {
478 hcall_dprintf("Can't cope with DTL at logical 0\n");
479 return H_HARDWARE;
482 size = ldl_be_phys(cs->as, addr + 0x4);
484 if (size < 48) {
485 return H_PARAMETER;
488 if (!env->vpa_addr) {
489 return H_RESOURCE;
492 env->dtl_addr = addr;
493 env->dtl_size = size;
495 return H_SUCCESS;
498 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
500 env->dtl_addr = 0;
501 env->dtl_size = 0;
503 return H_SUCCESS;
506 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr,
507 target_ulong opcode, target_ulong *args)
509 target_ulong flags = args[0];
510 target_ulong procno = args[1];
511 target_ulong vpa = args[2];
512 target_ulong ret = H_PARAMETER;
513 CPUPPCState *tenv;
514 PowerPCCPU *tcpu;
516 tcpu = ppc_get_vcpu_by_dt_id(procno);
517 if (!tcpu) {
518 return H_PARAMETER;
520 tenv = &tcpu->env;
522 switch (flags) {
523 case FLAGS_REGISTER_VPA:
524 ret = register_vpa(tenv, vpa);
525 break;
527 case FLAGS_DEREGISTER_VPA:
528 ret = deregister_vpa(tenv, vpa);
529 break;
531 case FLAGS_REGISTER_SLBSHADOW:
532 ret = register_slb_shadow(tenv, vpa);
533 break;
535 case FLAGS_DEREGISTER_SLBSHADOW:
536 ret = deregister_slb_shadow(tenv, vpa);
537 break;
539 case FLAGS_REGISTER_DTL:
540 ret = register_dtl(tenv, vpa);
541 break;
543 case FLAGS_DEREGISTER_DTL:
544 ret = deregister_dtl(tenv, vpa);
545 break;
548 return ret;
551 static target_ulong h_cede(PowerPCCPU *cpu, sPAPREnvironment *spapr,
552 target_ulong opcode, target_ulong *args)
554 CPUPPCState *env = &cpu->env;
555 CPUState *cs = CPU(cpu);
557 env->msr |= (1ULL << MSR_EE);
558 hreg_compute_hflags(env);
559 if (!cpu_has_work(cs)) {
560 cs->halted = 1;
561 cs->exception_index = EXCP_HLT;
562 cs->exit_request = 1;
564 return H_SUCCESS;
567 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPREnvironment *spapr,
568 target_ulong opcode, target_ulong *args)
570 target_ulong rtas_r3 = args[0];
571 uint32_t token = rtas_ld(rtas_r3, 0);
572 uint32_t nargs = rtas_ld(rtas_r3, 1);
573 uint32_t nret = rtas_ld(rtas_r3, 2);
575 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
576 nret, rtas_r3 + 12 + 4*nargs);
579 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
580 target_ulong opcode, target_ulong *args)
582 CPUState *cs = CPU(cpu);
583 target_ulong size = args[0];
584 target_ulong addr = args[1];
586 switch (size) {
587 case 1:
588 args[0] = ldub_phys(cs->as, addr);
589 return H_SUCCESS;
590 case 2:
591 args[0] = lduw_phys(cs->as, addr);
592 return H_SUCCESS;
593 case 4:
594 args[0] = ldl_phys(cs->as, addr);
595 return H_SUCCESS;
596 case 8:
597 args[0] = ldq_phys(cs->as, addr);
598 return H_SUCCESS;
600 return H_PARAMETER;
603 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
604 target_ulong opcode, target_ulong *args)
606 CPUState *cs = CPU(cpu);
608 target_ulong size = args[0];
609 target_ulong addr = args[1];
610 target_ulong val = args[2];
612 switch (size) {
613 case 1:
614 stb_phys(cs->as, addr, val);
615 return H_SUCCESS;
616 case 2:
617 stw_phys(cs->as, addr, val);
618 return H_SUCCESS;
619 case 4:
620 stl_phys(cs->as, addr, val);
621 return H_SUCCESS;
622 case 8:
623 stq_phys(cs->as, addr, val);
624 return H_SUCCESS;
626 return H_PARAMETER;
629 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
630 target_ulong opcode, target_ulong *args)
632 CPUState *cs = CPU(cpu);
634 target_ulong dst = args[0]; /* Destination address */
635 target_ulong src = args[1]; /* Source address */
636 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
637 target_ulong count = args[3]; /* Element count */
638 target_ulong op = args[4]; /* 0 = copy, 1 = invert */
639 uint64_t tmp;
640 unsigned int mask = (1 << esize) - 1;
641 int step = 1 << esize;
643 if (count > 0x80000000) {
644 return H_PARAMETER;
647 if ((dst & mask) || (src & mask) || (op > 1)) {
648 return H_PARAMETER;
651 if (dst >= src && dst < (src + (count << esize))) {
652 dst = dst + ((count - 1) << esize);
653 src = src + ((count - 1) << esize);
654 step = -step;
657 while (count--) {
658 switch (esize) {
659 case 0:
660 tmp = ldub_phys(cs->as, src);
661 break;
662 case 1:
663 tmp = lduw_phys(cs->as, src);
664 break;
665 case 2:
666 tmp = ldl_phys(cs->as, src);
667 break;
668 case 3:
669 tmp = ldq_phys(cs->as, src);
670 break;
671 default:
672 return H_PARAMETER;
674 if (op == 1) {
675 tmp = ~tmp;
677 switch (esize) {
678 case 0:
679 stb_phys(cs->as, dst, tmp);
680 break;
681 case 1:
682 stw_phys(cs->as, dst, tmp);
683 break;
684 case 2:
685 stl_phys(cs->as, dst, tmp);
686 break;
687 case 3:
688 stq_phys(cs->as, dst, tmp);
689 break;
691 dst = dst + step;
692 src = src + step;
695 return H_SUCCESS;
698 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
699 target_ulong opcode, target_ulong *args)
701 /* Nothing to do on emulation, KVM will trap this in the kernel */
702 return H_SUCCESS;
705 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPREnvironment *spapr,
706 target_ulong opcode, target_ulong *args)
708 /* Nothing to do on emulation, KVM will trap this in the kernel */
709 return H_SUCCESS;
712 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
713 target_ulong opcode, target_ulong *args)
715 CPUState *cs;
716 target_ulong mflags = args[0];
717 target_ulong resource = args[1];
718 target_ulong value1 = args[2];
719 target_ulong value2 = args[3];
720 target_ulong ret = H_P2;
722 if (resource == H_SET_MODE_RESOURCE_LE) {
723 if (value1) {
724 ret = H_P3;
725 goto out;
727 if (value2) {
728 ret = H_P4;
729 goto out;
731 switch (mflags) {
732 case H_SET_MODE_ENDIAN_BIG:
733 CPU_FOREACH(cs) {
734 set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
736 ret = H_SUCCESS;
737 break;
739 case H_SET_MODE_ENDIAN_LITTLE:
740 CPU_FOREACH(cs) {
741 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
743 ret = H_SUCCESS;
744 break;
746 default:
747 ret = H_UNSUPPORTED_FLAG;
751 out:
752 return ret;
755 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
756 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
758 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
760 spapr_hcall_fn *slot;
762 if (opcode <= MAX_HCALL_OPCODE) {
763 assert((opcode & 0x3) == 0);
765 slot = &papr_hypercall_table[opcode / 4];
766 } else {
767 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
769 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
772 assert(!(*slot));
773 *slot = fn;
776 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
777 target_ulong *args)
779 if ((opcode <= MAX_HCALL_OPCODE)
780 && ((opcode & 0x3) == 0)) {
781 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
783 if (fn) {
784 return fn(cpu, spapr, opcode, args);
786 } else if ((opcode >= KVMPPC_HCALL_BASE) &&
787 (opcode <= KVMPPC_HCALL_MAX)) {
788 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
790 if (fn) {
791 return fn(cpu, spapr, opcode, args);
795 hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode);
796 return H_FUNCTION;
799 static void hypercall_register_types(void)
801 /* hcall-pft */
802 spapr_register_hypercall(H_ENTER, h_enter);
803 spapr_register_hypercall(H_REMOVE, h_remove);
804 spapr_register_hypercall(H_PROTECT, h_protect);
805 spapr_register_hypercall(H_READ, h_read);
807 /* hcall-bulk */
808 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
810 /* hcall-dabr */
811 spapr_register_hypercall(H_SET_DABR, h_set_dabr);
813 /* hcall-splpar */
814 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
815 spapr_register_hypercall(H_CEDE, h_cede);
817 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
818 * here between the "CI" and the "CACHE" variants, they will use whatever
819 * mapping attributes qemu is using. When using KVM, the kernel will
820 * enforce the attributes more strongly
822 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
823 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
824 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
825 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
826 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
827 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
828 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
830 /* qemu/KVM-PPC specific hcalls */
831 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
833 spapr_register_hypercall(H_SET_MODE, h_set_mode);
836 type_init(hypercall_register_types)