2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright Novell Inc 2010
17 * Authors: Alexander Graf <agraf@suse.de>
21 #include <asm/kvm_ppc.h>
22 #include <asm/disassemble.h>
23 #include <asm/kvm_book3s.h>
24 #include <asm/kvm_fpu.h>
26 #include <asm/cacheflush.h>
27 #include <asm/switch_to.h>
28 #include <linux/vmalloc.h>
33 #define dprintk printk
35 #define dprintk(...) do { } while(0);
51 #define OP_31_LFSX 535
52 #define OP_31_LFSUX 567
53 #define OP_31_LFDX 599
54 #define OP_31_LFDUX 631
55 #define OP_31_STFSX 663
56 #define OP_31_STFSUX 695
57 #define OP_31_STFX 727
58 #define OP_31_STFUX 759
59 #define OP_31_LWIZX 887
60 #define OP_31_STFIWX 983
62 #define OP_59_FADDS 21
63 #define OP_59_FSUBS 20
64 #define OP_59_FSQRTS 22
65 #define OP_59_FDIVS 18
67 #define OP_59_FMULS 25
68 #define OP_59_FRSQRTES 26
69 #define OP_59_FMSUBS 28
70 #define OP_59_FMADDS 29
71 #define OP_59_FNMSUBS 30
72 #define OP_59_FNMADDS 31
75 #define OP_63_FCPSGN 8
77 #define OP_63_FCTIW 14
78 #define OP_63_FCTIWZ 15
81 #define OP_63_FSQRT 22
85 #define OP_63_FRSQRTE 26
86 #define OP_63_FMSUB 28
87 #define OP_63_FMADD 29
88 #define OP_63_FNMSUB 30
89 #define OP_63_FNMADD 31
90 #define OP_63_FCMPO 32
91 #define OP_63_MTFSB1 38 // XXX
94 #define OP_63_MCRFS 64
95 #define OP_63_MTFSB0 70
97 #define OP_63_MTFSFI 134
98 #define OP_63_FABS 264
99 #define OP_63_MFFS 583
100 #define OP_63_MTFSF 711
102 #define OP_4X_PS_CMPU0 0
103 #define OP_4X_PSQ_LX 6
104 #define OP_4XW_PSQ_STX 7
105 #define OP_4A_PS_SUM0 10
106 #define OP_4A_PS_SUM1 11
107 #define OP_4A_PS_MULS0 12
108 #define OP_4A_PS_MULS1 13
109 #define OP_4A_PS_MADDS0 14
110 #define OP_4A_PS_MADDS1 15
111 #define OP_4A_PS_DIV 18
112 #define OP_4A_PS_SUB 20
113 #define OP_4A_PS_ADD 21
114 #define OP_4A_PS_SEL 23
115 #define OP_4A_PS_RES 24
116 #define OP_4A_PS_MUL 25
117 #define OP_4A_PS_RSQRTE 26
118 #define OP_4A_PS_MSUB 28
119 #define OP_4A_PS_MADD 29
120 #define OP_4A_PS_NMSUB 30
121 #define OP_4A_PS_NMADD 31
122 #define OP_4X_PS_CMPO0 32
123 #define OP_4X_PSQ_LUX 38
124 #define OP_4XW_PSQ_STUX 39
125 #define OP_4X_PS_NEG 40
126 #define OP_4X_PS_CMPU1 64
127 #define OP_4X_PS_MR 72
128 #define OP_4X_PS_CMPO1 96
129 #define OP_4X_PS_NABS 136
130 #define OP_4X_PS_ABS 264
131 #define OP_4X_PS_MERGE00 528
132 #define OP_4X_PS_MERGE01 560
133 #define OP_4X_PS_MERGE10 592
134 #define OP_4X_PS_MERGE11 624
136 #define SCALAR_NONE 0
137 #define SCALAR_HIGH (1 << 0)
138 #define SCALAR_LOW (1 << 1)
139 #define SCALAR_NO_PS0 (1 << 2)
140 #define SCALAR_NO_PS1 (1 << 3)
142 #define GQR_ST_TYPE_MASK 0x00000007
143 #define GQR_ST_TYPE_SHIFT 0
144 #define GQR_ST_SCALE_MASK 0x00003f00
145 #define GQR_ST_SCALE_SHIFT 8
146 #define GQR_LD_TYPE_MASK 0x00070000
147 #define GQR_LD_TYPE_SHIFT 16
148 #define GQR_LD_SCALE_MASK 0x3f000000
149 #define GQR_LD_SCALE_SHIFT 24
151 #define GQR_QUANTIZE_FLOAT 0
152 #define GQR_QUANTIZE_U8 4
153 #define GQR_QUANTIZE_U16 5
154 #define GQR_QUANTIZE_S8 6
155 #define GQR_QUANTIZE_S16 7
157 #define FPU_LS_SINGLE 0
158 #define FPU_LS_DOUBLE 1
159 #define FPU_LS_SINGLE_LOW 2
161 static inline void kvmppc_sync_qpr(struct kvm_vcpu
*vcpu
, int rt
)
163 kvm_cvt_df(&vcpu
->arch
.fpr
[rt
], &vcpu
->arch
.qpr
[rt
]);
166 static void kvmppc_inject_pf(struct kvm_vcpu
*vcpu
, ulong eaddr
, bool is_store
)
169 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
171 shared
->msr
= kvmppc_set_field(shared
->msr
, 33, 36, 0);
172 shared
->msr
= kvmppc_set_field(shared
->msr
, 42, 47, 0);
175 dsisr
= kvmppc_set_field(0, 33, 33, 1);
177 shared
->dsisr
= kvmppc_set_field(dsisr
, 38, 38, 1);
178 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
);
181 static int kvmppc_emulate_fpr_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
182 int rs
, ulong addr
, int ls_type
)
184 int emulated
= EMULATE_FAIL
;
187 int len
= sizeof(u32
);
189 if (ls_type
== FPU_LS_DOUBLE
)
192 /* read from memory */
193 r
= kvmppc_ld(vcpu
, &addr
, len
, tmp
, true);
194 vcpu
->arch
.paddr_accessed
= addr
;
197 kvmppc_inject_pf(vcpu
, addr
, false);
199 } else if (r
== EMULATE_DO_MMIO
) {
200 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_MMIO_REG_FPR
| rs
,
205 emulated
= EMULATE_DONE
;
207 /* put in registers */
210 kvm_cvt_fd((u32
*)tmp
, &vcpu
->arch
.fpr
[rs
]);
211 vcpu
->arch
.qpr
[rs
] = *((u32
*)tmp
);
214 vcpu
->arch
.fpr
[rs
] = *((u64
*)tmp
);
218 dprintk(KERN_INFO
"KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64
*)tmp
,
225 static int kvmppc_emulate_fpr_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
226 int rs
, ulong addr
, int ls_type
)
228 int emulated
= EMULATE_FAIL
;
236 kvm_cvt_df(&vcpu
->arch
.fpr
[rs
], (u32
*)tmp
);
240 case FPU_LS_SINGLE_LOW
:
241 *((u32
*)tmp
) = vcpu
->arch
.fpr
[rs
];
242 val
= vcpu
->arch
.fpr
[rs
] & 0xffffffff;
246 *((u64
*)tmp
) = vcpu
->arch
.fpr
[rs
];
247 val
= vcpu
->arch
.fpr
[rs
];
255 r
= kvmppc_st(vcpu
, &addr
, len
, tmp
, true);
256 vcpu
->arch
.paddr_accessed
= addr
;
258 kvmppc_inject_pf(vcpu
, addr
, true);
259 } else if (r
== EMULATE_DO_MMIO
) {
260 emulated
= kvmppc_handle_store(run
, vcpu
, val
, len
, 1);
262 emulated
= EMULATE_DONE
;
265 dprintk(KERN_INFO
"KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
271 static int kvmppc_emulate_psq_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
272 int rs
, ulong addr
, bool w
, int i
)
274 int emulated
= EMULATE_FAIL
;
279 /* read from memory */
281 r
= kvmppc_ld(vcpu
, &addr
, sizeof(u32
), tmp
, true);
282 memcpy(&tmp
[1], &one
, sizeof(u32
));
284 r
= kvmppc_ld(vcpu
, &addr
, sizeof(u32
) * 2, tmp
, true);
286 vcpu
->arch
.paddr_accessed
= addr
;
288 kvmppc_inject_pf(vcpu
, addr
, false);
290 } else if ((r
== EMULATE_DO_MMIO
) && w
) {
291 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_MMIO_REG_FPR
| rs
,
293 vcpu
->arch
.qpr
[rs
] = tmp
[1];
295 } else if (r
== EMULATE_DO_MMIO
) {
296 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_MMIO_REG_FQPR
| rs
,
301 emulated
= EMULATE_DONE
;
303 /* put in registers */
304 kvm_cvt_fd(&tmp
[0], &vcpu
->arch
.fpr
[rs
]);
305 vcpu
->arch
.qpr
[rs
] = tmp
[1];
307 dprintk(KERN_INFO
"KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp
[0],
308 tmp
[1], addr
, w
? 4 : 8);
314 static int kvmppc_emulate_psq_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
315 int rs
, ulong addr
, bool w
, int i
)
317 int emulated
= EMULATE_FAIL
;
320 int len
= w
? sizeof(u32
) : sizeof(u64
);
322 kvm_cvt_df(&vcpu
->arch
.fpr
[rs
], &tmp
[0]);
323 tmp
[1] = vcpu
->arch
.qpr
[rs
];
325 r
= kvmppc_st(vcpu
, &addr
, len
, tmp
, true);
326 vcpu
->arch
.paddr_accessed
= addr
;
328 kvmppc_inject_pf(vcpu
, addr
, true);
329 } else if ((r
== EMULATE_DO_MMIO
) && w
) {
330 emulated
= kvmppc_handle_store(run
, vcpu
, tmp
[0], 4, 1);
331 } else if (r
== EMULATE_DO_MMIO
) {
332 u64 val
= ((u64
)tmp
[0] << 32) | tmp
[1];
333 emulated
= kvmppc_handle_store(run
, vcpu
, val
, 8, 1);
335 emulated
= EMULATE_DONE
;
338 dprintk(KERN_INFO
"KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
339 tmp
[0], tmp
[1], addr
, len
);
345 * Cuts out inst bits with ordering according to spec.
346 * That means the leftmost bit is zero. All given bits are included.
348 static inline u32
inst_get_field(u32 inst
, int msb
, int lsb
)
350 return kvmppc_get_field(inst
, msb
+ 32, lsb
+ 32);
354 * Replaces inst bits with ordering according to spec.
356 static inline u32
inst_set_field(u32 inst
, int msb
, int lsb
, int value
)
358 return kvmppc_set_field(inst
, msb
+ 32, lsb
+ 32, value
);
361 bool kvmppc_inst_is_paired_single(struct kvm_vcpu
*vcpu
, u32 inst
)
363 if (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
))
366 switch (get_op(inst
)) {
382 switch (inst_get_field(inst
, 21, 30)) {
393 case OP_4X_PS_MERGE00
:
394 case OP_4X_PS_MERGE01
:
395 case OP_4X_PS_MERGE10
:
396 case OP_4X_PS_MERGE11
:
400 switch (inst_get_field(inst
, 25, 30)) {
402 case OP_4XW_PSQ_STUX
:
406 switch (inst_get_field(inst
, 26, 30)) {
411 case OP_4A_PS_MADDS0
:
412 case OP_4A_PS_MADDS1
:
419 case OP_4A_PS_RSQRTE
:
428 switch (inst_get_field(inst
, 21, 30)) {
436 switch (inst_get_field(inst
, 26, 30)) {
446 switch (inst_get_field(inst
, 21, 30)) {
468 switch (inst_get_field(inst
, 26, 30)) {
479 switch (inst_get_field(inst
, 21, 30)) {
497 static int get_d_signext(u32 inst
)
499 int d
= inst
& 0x8ff;
507 static int kvmppc_ps_three_in(struct kvm_vcpu
*vcpu
, bool rc
,
508 int reg_out
, int reg_in1
, int reg_in2
,
509 int reg_in3
, int scalar
,
510 void (*func
)(u64
*fpscr
,
512 u32
*src2
, u32
*src3
))
514 u32
*qpr
= vcpu
->arch
.qpr
;
515 u64
*fpr
= vcpu
->arch
.fpr
;
517 u32 ps0_in1
, ps0_in2
, ps0_in3
;
518 u32 ps1_in1
, ps1_in2
, ps1_in3
;
524 kvm_cvt_df(&fpr
[reg_in1
], &ps0_in1
);
525 kvm_cvt_df(&fpr
[reg_in2
], &ps0_in2
);
526 kvm_cvt_df(&fpr
[reg_in3
], &ps0_in3
);
528 if (scalar
& SCALAR_LOW
)
529 ps0_in2
= qpr
[reg_in2
];
531 func(&vcpu
->arch
.fpscr
, &ps0_out
, &ps0_in1
, &ps0_in2
, &ps0_in3
);
533 dprintk(KERN_INFO
"PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
534 ps0_in1
, ps0_in2
, ps0_in3
, ps0_out
);
536 if (!(scalar
& SCALAR_NO_PS0
))
537 kvm_cvt_fd(&ps0_out
, &fpr
[reg_out
]);
540 ps1_in1
= qpr
[reg_in1
];
541 ps1_in2
= qpr
[reg_in2
];
542 ps1_in3
= qpr
[reg_in3
];
544 if (scalar
& SCALAR_HIGH
)
547 if (!(scalar
& SCALAR_NO_PS1
))
548 func(&vcpu
->arch
.fpscr
, &qpr
[reg_out
], &ps1_in1
, &ps1_in2
, &ps1_in3
);
550 dprintk(KERN_INFO
"PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
551 ps1_in1
, ps1_in2
, ps1_in3
, qpr
[reg_out
]);
556 static int kvmppc_ps_two_in(struct kvm_vcpu
*vcpu
, bool rc
,
557 int reg_out
, int reg_in1
, int reg_in2
,
559 void (*func
)(u64
*fpscr
,
563 u32
*qpr
= vcpu
->arch
.qpr
;
564 u64
*fpr
= vcpu
->arch
.fpr
;
566 u32 ps0_in1
, ps0_in2
;
568 u32 ps1_in1
, ps1_in2
;
574 kvm_cvt_df(&fpr
[reg_in1
], &ps0_in1
);
576 if (scalar
& SCALAR_LOW
)
577 ps0_in2
= qpr
[reg_in2
];
579 kvm_cvt_df(&fpr
[reg_in2
], &ps0_in2
);
581 func(&vcpu
->arch
.fpscr
, &ps0_out
, &ps0_in1
, &ps0_in2
);
583 if (!(scalar
& SCALAR_NO_PS0
)) {
584 dprintk(KERN_INFO
"PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
585 ps0_in1
, ps0_in2
, ps0_out
);
587 kvm_cvt_fd(&ps0_out
, &fpr
[reg_out
]);
591 ps1_in1
= qpr
[reg_in1
];
592 ps1_in2
= qpr
[reg_in2
];
594 if (scalar
& SCALAR_HIGH
)
597 func(&vcpu
->arch
.fpscr
, &ps1_out
, &ps1_in1
, &ps1_in2
);
599 if (!(scalar
& SCALAR_NO_PS1
)) {
600 qpr
[reg_out
] = ps1_out
;
602 dprintk(KERN_INFO
"PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
603 ps1_in1
, ps1_in2
, qpr
[reg_out
]);
609 static int kvmppc_ps_one_in(struct kvm_vcpu
*vcpu
, bool rc
,
610 int reg_out
, int reg_in
,
612 u32
*dst
, u32
*src1
))
614 u32
*qpr
= vcpu
->arch
.qpr
;
615 u64
*fpr
= vcpu
->arch
.fpr
;
623 kvm_cvt_df(&fpr
[reg_in
], &ps0_in
);
624 func(&vcpu
->arch
.fpscr
, &ps0_out
, &ps0_in
);
626 dprintk(KERN_INFO
"PS1 ps0 -> f(0x%x) = 0x%x\n",
629 kvm_cvt_fd(&ps0_out
, &fpr
[reg_out
]);
632 ps1_in
= qpr
[reg_in
];
633 func(&vcpu
->arch
.fpscr
, &qpr
[reg_out
], &ps1_in
);
635 dprintk(KERN_INFO
"PS1 ps1 -> f(0x%x) = 0x%x\n",
636 ps1_in
, qpr
[reg_out
]);
641 int kvmppc_emulate_paired_single(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
643 u32 inst
= kvmppc_get_last_inst(vcpu
);
644 enum emulation_result emulated
= EMULATE_DONE
;
646 int ax_rd
= inst_get_field(inst
, 6, 10);
647 int ax_ra
= inst_get_field(inst
, 11, 15);
648 int ax_rb
= inst_get_field(inst
, 16, 20);
649 int ax_rc
= inst_get_field(inst
, 21, 25);
650 short full_d
= inst_get_field(inst
, 16, 31);
652 u64
*fpr_d
= &vcpu
->arch
.fpr
[ax_rd
];
653 u64
*fpr_a
= &vcpu
->arch
.fpr
[ax_ra
];
654 u64
*fpr_b
= &vcpu
->arch
.fpr
[ax_rb
];
655 u64
*fpr_c
= &vcpu
->arch
.fpr
[ax_rc
];
657 bool rcomp
= (inst
& 1) ? true : false;
658 u32 cr
= kvmppc_get_cr(vcpu
);
663 if (!kvmppc_inst_is_paired_single(vcpu
, inst
))
666 if (!(vcpu
->arch
.shared
->msr
& MSR_FP
)) {
667 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
);
668 return EMULATE_AGAIN
;
671 kvmppc_giveup_ext(vcpu
, MSR_FP
);
674 /* Do we need to clear FE0 / FE1 here? Don't think so. */
677 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++) {
679 kvm_cvt_df(&vcpu
->arch
.fpr
[i
], &f
);
680 dprintk(KERN_INFO
"FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
681 i
, f
, vcpu
->arch
.fpr
[i
], i
, vcpu
->arch
.qpr
[i
]);
685 switch (get_op(inst
)) {
688 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
689 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
690 int i
= inst_get_field(inst
, 17, 19);
692 addr
+= get_d_signext(inst
);
693 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
698 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
699 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
700 int i
= inst_get_field(inst
, 17, 19);
702 addr
+= get_d_signext(inst
);
703 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
705 if (emulated
== EMULATE_DONE
)
706 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
711 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
712 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
713 int i
= inst_get_field(inst
, 17, 19);
715 addr
+= get_d_signext(inst
);
716 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
721 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
722 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
723 int i
= inst_get_field(inst
, 17, 19);
725 addr
+= get_d_signext(inst
);
726 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
728 if (emulated
== EMULATE_DONE
)
729 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
734 switch (inst_get_field(inst
, 21, 30)) {
737 emulated
= EMULATE_FAIL
;
741 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
742 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
743 int i
= inst_get_field(inst
, 22, 24);
745 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
746 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
751 emulated
= EMULATE_FAIL
;
755 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
756 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
757 int i
= inst_get_field(inst
, 22, 24);
759 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
760 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
762 if (emulated
== EMULATE_DONE
)
763 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
767 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rb
];
768 vcpu
->arch
.fpr
[ax_rd
] ^= 0x8000000000000000ULL
;
769 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
770 vcpu
->arch
.qpr
[ax_rd
] ^= 0x80000000;
774 emulated
= EMULATE_FAIL
;
778 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rb
];
779 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
783 emulated
= EMULATE_FAIL
;
787 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rb
];
788 vcpu
->arch
.fpr
[ax_rd
] |= 0x8000000000000000ULL
;
789 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
790 vcpu
->arch
.qpr
[ax_rd
] |= 0x80000000;
794 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rb
];
795 vcpu
->arch
.fpr
[ax_rd
] &= ~0x8000000000000000ULL
;
796 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
797 vcpu
->arch
.qpr
[ax_rd
] &= ~0x80000000;
799 case OP_4X_PS_MERGE00
:
801 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_ra
];
802 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
803 kvm_cvt_df(&vcpu
->arch
.fpr
[ax_rb
],
804 &vcpu
->arch
.qpr
[ax_rd
]);
806 case OP_4X_PS_MERGE01
:
808 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_ra
];
809 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
811 case OP_4X_PS_MERGE10
:
813 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
814 kvm_cvt_fd(&vcpu
->arch
.qpr
[ax_ra
],
815 &vcpu
->arch
.fpr
[ax_rd
]);
816 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
817 kvm_cvt_df(&vcpu
->arch
.fpr
[ax_rb
],
818 &vcpu
->arch
.qpr
[ax_rd
]);
820 case OP_4X_PS_MERGE11
:
822 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
823 kvm_cvt_fd(&vcpu
->arch
.qpr
[ax_ra
],
824 &vcpu
->arch
.fpr
[ax_rd
]);
825 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
829 switch (inst_get_field(inst
, 25, 30)) {
832 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
833 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
834 int i
= inst_get_field(inst
, 22, 24);
836 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
837 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
840 case OP_4XW_PSQ_STUX
:
842 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
843 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
844 int i
= inst_get_field(inst
, 22, 24);
846 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
847 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
849 if (emulated
== EMULATE_DONE
)
850 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
855 switch (inst_get_field(inst
, 26, 30)) {
857 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
858 ax_rb
, ax_ra
, SCALAR_NO_PS0
| SCALAR_HIGH
, fps_fadds
);
859 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rc
];
862 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
863 ax_ra
, ax_rb
, SCALAR_NO_PS1
| SCALAR_LOW
, fps_fadds
);
864 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rc
];
867 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
868 ax_ra
, ax_rc
, SCALAR_HIGH
, fps_fmuls
);
871 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
872 ax_ra
, ax_rc
, SCALAR_LOW
, fps_fmuls
);
874 case OP_4A_PS_MADDS0
:
875 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
876 ax_ra
, ax_rc
, ax_rb
, SCALAR_HIGH
, fps_fmadds
);
878 case OP_4A_PS_MADDS1
:
879 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
880 ax_ra
, ax_rc
, ax_rb
, SCALAR_LOW
, fps_fmadds
);
883 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
884 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fdivs
);
887 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
888 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fsubs
);
891 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
892 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fadds
);
895 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
896 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fsel
);
899 emulated
= kvmppc_ps_one_in(vcpu
, rcomp
, ax_rd
,
903 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
904 ax_ra
, ax_rc
, SCALAR_NONE
, fps_fmuls
);
906 case OP_4A_PS_RSQRTE
:
907 emulated
= kvmppc_ps_one_in(vcpu
, rcomp
, ax_rd
,
911 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
912 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fmsubs
);
915 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
916 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fmadds
);
919 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
920 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fnmsubs
);
923 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
924 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fnmadds
);
929 /* Real FPU operations */
933 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
935 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
941 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
943 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
946 if (emulated
== EMULATE_DONE
)
947 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
952 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
954 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
960 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
962 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
965 if (emulated
== EMULATE_DONE
)
966 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
971 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
973 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
979 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
981 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
984 if (emulated
== EMULATE_DONE
)
985 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
990 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
992 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
998 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
1000 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
1003 if (emulated
== EMULATE_DONE
)
1004 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1008 switch (inst_get_field(inst
, 21, 30)) {
1011 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
1013 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
1014 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1015 addr
, FPU_LS_SINGLE
);
1020 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1021 kvmppc_get_gpr(vcpu
, ax_rb
);
1023 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1024 addr
, FPU_LS_SINGLE
);
1026 if (emulated
== EMULATE_DONE
)
1027 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1032 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1033 kvmppc_get_gpr(vcpu
, ax_rb
);
1035 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1036 addr
, FPU_LS_DOUBLE
);
1041 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1042 kvmppc_get_gpr(vcpu
, ax_rb
);
1044 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1045 addr
, FPU_LS_DOUBLE
);
1047 if (emulated
== EMULATE_DONE
)
1048 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1053 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1054 kvmppc_get_gpr(vcpu
, ax_rb
);
1056 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1057 addr
, FPU_LS_SINGLE
);
1062 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1063 kvmppc_get_gpr(vcpu
, ax_rb
);
1065 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1066 addr
, FPU_LS_SINGLE
);
1068 if (emulated
== EMULATE_DONE
)
1069 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1074 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1075 kvmppc_get_gpr(vcpu
, ax_rb
);
1077 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1078 addr
, FPU_LS_DOUBLE
);
1083 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1084 kvmppc_get_gpr(vcpu
, ax_rb
);
1086 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1087 addr
, FPU_LS_DOUBLE
);
1089 if (emulated
== EMULATE_DONE
)
1090 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1095 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1096 kvmppc_get_gpr(vcpu
, ax_rb
);
1098 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1107 switch (inst_get_field(inst
, 21, 30)) {
1109 fpd_fadds(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1110 kvmppc_sync_qpr(vcpu
, ax_rd
);
1113 fpd_fsubs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1114 kvmppc_sync_qpr(vcpu
, ax_rd
);
1117 fpd_fdivs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1118 kvmppc_sync_qpr(vcpu
, ax_rd
);
1121 fpd_fres(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1122 kvmppc_sync_qpr(vcpu
, ax_rd
);
1124 case OP_59_FRSQRTES
:
1125 fpd_frsqrtes(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1126 kvmppc_sync_qpr(vcpu
, ax_rd
);
1129 switch (inst_get_field(inst
, 26, 30)) {
1131 fpd_fmuls(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
);
1132 kvmppc_sync_qpr(vcpu
, ax_rd
);
1135 fpd_fmsubs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1136 kvmppc_sync_qpr(vcpu
, ax_rd
);
1139 fpd_fmadds(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1140 kvmppc_sync_qpr(vcpu
, ax_rd
);
1143 fpd_fnmsubs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1144 kvmppc_sync_qpr(vcpu
, ax_rd
);
1147 fpd_fnmadds(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1148 kvmppc_sync_qpr(vcpu
, ax_rd
);
1153 switch (inst_get_field(inst
, 21, 30)) {
1158 /* XXX need to implement */
1161 /* XXX missing CR */
1162 *fpr_d
= vcpu
->arch
.fpscr
;
1165 /* XXX missing fm bits */
1166 /* XXX missing CR */
1167 vcpu
->arch
.fpscr
= *fpr_b
;
1172 u32 cr0_mask
= 0xf0000000;
1173 u32 cr_shift
= inst_get_field(inst
, 6, 8) * 4;
1175 fpd_fcmpu(&vcpu
->arch
.fpscr
, &tmp_cr
, fpr_a
, fpr_b
);
1176 cr
&= ~(cr0_mask
>> cr_shift
);
1177 cr
|= (cr
& cr0_mask
) >> cr_shift
;
1183 u32 cr0_mask
= 0xf0000000;
1184 u32 cr_shift
= inst_get_field(inst
, 6, 8) * 4;
1186 fpd_fcmpo(&vcpu
->arch
.fpscr
, &tmp_cr
, fpr_a
, fpr_b
);
1187 cr
&= ~(cr0_mask
>> cr_shift
);
1188 cr
|= (cr
& cr0_mask
) >> cr_shift
;
1192 fpd_fneg(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1198 fpd_fabs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1201 fpd_fcpsgn(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1204 fpd_fdiv(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1207 fpd_fadd(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1210 fpd_fsub(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1213 fpd_fctiw(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1216 fpd_fctiwz(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1219 fpd_frsp(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1220 kvmppc_sync_qpr(vcpu
, ax_rd
);
1227 fpd_fsqrt(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1228 /* fD = 1.0f / fD */
1229 fpd_fdiv(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, (u64
*)&one
, fpr_d
);
1233 switch (inst_get_field(inst
, 26, 30)) {
1235 fpd_fmul(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
);
1238 fpd_fsel(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1241 fpd_fmsub(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1244 fpd_fmadd(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1247 fpd_fnmsub(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1250 fpd_fnmadd(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1257 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++) {
1259 kvm_cvt_df(&vcpu
->arch
.fpr
[i
], &f
);
1260 dprintk(KERN_INFO
"FPR[%d] = 0x%x\n", i
, f
);
1265 kvmppc_set_cr(vcpu
, cr
);