2 * arch/ia64/vmx/optvfault.S
3 * optimize virtualization fault handler
5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
9 #include <asm/asmmacro.h>
10 #include <asm/processor.h>
13 #include "asm-offsets.h"
15 #define ACCE_MOV_FROM_AR
16 #define ACCE_MOV_FROM_RR
17 #define ACCE_MOV_TO_RR
20 #define ACCE_MOV_TO_PSR
24 GLOBAL_ENTRY(kvm_asm_mov_from_ar)
25 #ifndef ACCE_MOV_FROM_AR
26 br.many kvm_virtualization_fault_back
28 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
29 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
37 addl r20=@gprel(asm_mov_to_reg),gp
40 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
46 END(kvm_asm_mov_from_ar)
50 GLOBAL_ENTRY(kvm_asm_mov_from_rr)
51 #ifndef ACCE_MOV_FROM_RR
52 br.many kvm_virtualization_fault_back
56 addl r20=@gprel(asm_mov_from_reg),gp
58 adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
62 add r27=VMM_VCPU_VRR0_OFFSET,r21
66 kvm_asm_mov_from_rr_back_1:
67 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
68 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
77 END(kvm_asm_mov_from_rr)
81 GLOBAL_ENTRY(kvm_asm_mov_to_rr)
82 #ifndef ACCE_MOV_TO_RR
83 br.many kvm_virtualization_fault_back
87 addl r20=@gprel(asm_mov_from_reg),gp
89 adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
93 add r27=VMM_VCPU_VRR0_OFFSET,r21
97 kvm_asm_mov_to_rr_back_1:
98 adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
105 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
110 kvm_asm_mov_to_rr_back_2:
111 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
113 ;; // vrr.rid<<4 |0xe
121 shladd r16 = r16, 4, r17
133 (p6) dep r19=r18,r19,2,6
137 cmp.eq.or p6,p0=4,r23
139 adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
140 (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
144 (p6) shladd r17=r23,1,r17
147 (p6) tbit.nz p6,p7=r16,0
152 END(kvm_asm_mov_to_rr)
156 GLOBAL_ENTRY(kvm_asm_rsm)
158 br.many kvm_virtualization_fault_back
160 add r16=VMM_VPD_BASE_OFFSET,r21
168 add r17=VPD_VPSR_START_OFFSET,r16
169 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
174 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
185 /* Comment it out due to short of fp lazy alorgithm support
186 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
190 tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
192 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
198 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
199 (p6) br.dptk kvm_resume_to_guest
201 add r26=VMM_VCPU_META_RR0_OFFSET,r21
202 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
215 br.many kvm_resume_to_guest
220 GLOBAL_ENTRY(kvm_asm_ssm)
222 br.many kvm_virtualization_fault_back
224 add r16=VMM_VPD_BASE_OFFSET,r21
232 add r27=VPD_VPSR_START_OFFSET,r16
238 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
247 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
252 cmp.ne.or p6,p0=r28,r19
253 (p6) br.dptk kvm_asm_ssm_1
255 add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
256 add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
271 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
273 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
274 (p6) br.dptk kvm_resume_to_guest
276 add r29=VPD_VTPR_START_OFFSET,r16
277 add r30=VPD_VHPI_START_OFFSET,r16
288 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
289 br.many kvm_resume_to_guest
294 GLOBAL_ENTRY(kvm_asm_mov_to_psr)
295 #ifndef ACCE_MOV_TO_PSR
296 br.many kvm_virtualization_fault_back
298 add r16=VMM_VPD_BASE_OFFSET,r21
299 extr.u r26=r25,13,7 //r2
302 addl r20=@gprel(asm_mov_from_reg),gp
304 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
308 add r27=VPD_VPSR_START_OFFSET,r16
312 kvm_asm_mov_to_psr_back:
314 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
321 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
329 (p5) br.many kvm_asm_mov_to_psr_1
331 //virtual to physical
332 (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
333 (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
334 (p7) dep r23=-1,r23,0,1
336 //physical to virtual
337 (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
338 (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
339 (p6) dep r23=0,r23,0,1
352 kvm_asm_mov_to_psr_1:
354 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
362 /* Comment it out due to short of fp lazy algorithm support
363 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
367 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
369 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
375 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
376 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
377 (p6) br.dpnt.few kvm_resume_to_guest
379 add r29=VPD_VTPR_START_OFFSET,r16
380 add r30=VPD_VHPI_START_OFFSET,r16
391 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
392 br.many kvm_resume_to_guest
393 END(kvm_asm_mov_to_psr)
396 ENTRY(kvm_asm_dispatch_vexirq)
400 extr.u r17=r16,IA64_PSR_RI_BIT,2
401 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
407 (p6) add r18=0x10,r18
408 dep r16=r17,r16,IA64_PSR_RI_BIT,2
413 br.many kvm_dispatch_vexirq
414 END(kvm_asm_dispatch_vexirq)
417 // TODO: add support when pta.vf = 1
418 GLOBAL_ENTRY(kvm_asm_thash)
420 br.many kvm_virtualization_fault_back
422 extr.u r17=r25,20,7 // get r3 from opcode in r25
423 extr.u r18=r25,6,7 // get r1 from opcode in r25
424 addl r20=@gprel(asm_mov_from_reg),gp
426 adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
427 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
428 adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
432 ld8 r16=[r16] // get VPD addr
434 br.many b0 // r19 return value
437 shr.u r23=r19,61 // get RR number
438 adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
439 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
441 shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr
442 ld8 r17=[r16] // get PTA
445 extr.u r29=r17,2,6 // get pta.size
446 ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value
448 extr.u r25=r25,2,6 // get rr.ps
449 shl r22=r26,r29 // 1UL << pta.size
451 shr.u r23=r19,r25 // vaddr >> rr.ps
452 adds r26=3,r29 // pta.size + 3
453 shl r27=r17,3 // pta << 3
455 shl r23=r23,3 // (vaddr >> rr.ps) << 3
456 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
459 adds r22=-1,r22 // (1UL << pta.size) - 1
460 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
461 and r19=r19,r16 // vaddr & VRN_MASK
463 and r22=r22,r23 // vhpt_offset
464 or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
465 adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
467 or r19=r19,r22 // calc pval
469 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
475 #define MOV_TO_REG0 \
484 #define MOV_TO_REG(n) \
493 #define MOV_FROM_REG(n) \
502 #define MOV_TO_BANK0_REG(n) \
503 ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
522 END(asm_mov_to_bank0_reg##n##)
525 #define MOV_FROM_BANK0_REG(n) \
526 ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
550 END(asm_mov_from_bank0_reg##n##)
553 #define JMP_TO_MOV_TO_BANK0_REG(n) \
557 br.sptk.many asm_mov_to_bank0_reg##n##; \
562 #define JMP_TO_MOV_FROM_BANK0_REG(n) \
566 br.sptk.many asm_mov_from_bank0_reg##n##; \
571 MOV_FROM_BANK0_REG(16)
572 MOV_FROM_BANK0_REG(17)
573 MOV_FROM_BANK0_REG(18)
574 MOV_FROM_BANK0_REG(19)
575 MOV_FROM_BANK0_REG(20)
576 MOV_FROM_BANK0_REG(21)
577 MOV_FROM_BANK0_REG(22)
578 MOV_FROM_BANK0_REG(23)
579 MOV_FROM_BANK0_REG(24)
580 MOV_FROM_BANK0_REG(25)
581 MOV_FROM_BANK0_REG(26)
582 MOV_FROM_BANK0_REG(27)
583 MOV_FROM_BANK0_REG(28)
584 MOV_FROM_BANK0_REG(29)
585 MOV_FROM_BANK0_REG(30)
586 MOV_FROM_BANK0_REG(31)
589 // mov from reg table
590 ENTRY(asm_mov_from_reg)
607 JMP_TO_MOV_FROM_BANK0_REG(16)
608 JMP_TO_MOV_FROM_BANK0_REG(17)
609 JMP_TO_MOV_FROM_BANK0_REG(18)
610 JMP_TO_MOV_FROM_BANK0_REG(19)
611 JMP_TO_MOV_FROM_BANK0_REG(20)
612 JMP_TO_MOV_FROM_BANK0_REG(21)
613 JMP_TO_MOV_FROM_BANK0_REG(22)
614 JMP_TO_MOV_FROM_BANK0_REG(23)
615 JMP_TO_MOV_FROM_BANK0_REG(24)
616 JMP_TO_MOV_FROM_BANK0_REG(25)
617 JMP_TO_MOV_FROM_BANK0_REG(26)
618 JMP_TO_MOV_FROM_BANK0_REG(27)
619 JMP_TO_MOV_FROM_BANK0_REG(28)
620 JMP_TO_MOV_FROM_BANK0_REG(29)
621 JMP_TO_MOV_FROM_BANK0_REG(30)
622 JMP_TO_MOV_FROM_BANK0_REG(31)
719 END(asm_mov_from_reg)
727 ENTRY(kvm_resume_to_guest)
728 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
731 adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
736 adds r19=VMM_VPD_BASE_OFFSET,r21
739 extr.u r17=r16,IA64_PSR_RI_BIT,2
740 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
745 (p6) add r18=0x10,r18
749 dep r16=r17,r16,IA64_PSR_RI_BIT,2
752 adds r19= VPD_VPSR_START_OFFSET,r25
753 add r28=PAL_VPS_RESUME_NORMAL,r20
754 add r29=PAL_VPS_RESUME_HANDLER,r20
760 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
765 br.sptk.many b0 // call pal service
767 END(kvm_resume_to_guest)
789 ENTRY(asm_mov_to_reg)
806 JMP_TO_MOV_TO_BANK0_REG(16)
807 JMP_TO_MOV_TO_BANK0_REG(17)
808 JMP_TO_MOV_TO_BANK0_REG(18)
809 JMP_TO_MOV_TO_BANK0_REG(19)
810 JMP_TO_MOV_TO_BANK0_REG(20)
811 JMP_TO_MOV_TO_BANK0_REG(21)
812 JMP_TO_MOV_TO_BANK0_REG(22)
813 JMP_TO_MOV_TO_BANK0_REG(23)
814 JMP_TO_MOV_TO_BANK0_REG(24)
815 JMP_TO_MOV_TO_BANK0_REG(25)
816 JMP_TO_MOV_TO_BANK0_REG(26)
817 JMP_TO_MOV_TO_BANK0_REG(27)
818 JMP_TO_MOV_TO_BANK0_REG(28)
819 JMP_TO_MOV_TO_BANK0_REG(29)
820 JMP_TO_MOV_TO_BANK0_REG(30)
821 JMP_TO_MOV_TO_BANK0_REG(31)