3 #include "config-host.h"
5 extern int kvm_allowed
;
6 extern int kvm_irqchip
;
16 #include <sys/utsname.h>
18 #define MSR_IA32_TSC 0x10
20 static struct kvm_msr_list
*kvm_msr_list
;
21 extern unsigned int kvm_shadow_memory
;
22 extern kvm_context_t kvm_context
;
23 static int kvm_has_msr_star
;
25 static int lm_capable_kernel
;
26 extern __thread CPUState
*vcpu_env
;
28 int kvm_arch_qemu_create_context(void)
31 if (kvm_shadow_memory
)
32 kvm_set_shadow_pages(kvm_context
, kvm_shadow_memory
);
34 kvm_msr_list
= kvm_get_msr_list(kvm_context
);
37 for (i
= 0; i
< kvm_msr_list
->nmsrs
; ++i
)
38 if (kvm_msr_list
->indices
[i
] == MSR_STAR
)
43 static void set_msr_entry(struct kvm_msr_entry
*entry
, uint32_t index
,
50 /* returns 0 on success, non-0 on failure */
51 static int get_msr_entry(struct kvm_msr_entry
*entry
, CPUState
*env
)
53 switch (entry
->index
) {
54 case MSR_IA32_SYSENTER_CS
:
55 env
->sysenter_cs
= entry
->data
;
57 case MSR_IA32_SYSENTER_ESP
:
58 env
->sysenter_esp
= entry
->data
;
60 case MSR_IA32_SYSENTER_EIP
:
61 env
->sysenter_eip
= entry
->data
;
64 env
->star
= entry
->data
;
68 env
->cstar
= entry
->data
;
70 case MSR_KERNELGSBASE
:
71 env
->kernelgsbase
= entry
->data
;
74 env
->fmask
= entry
->data
;
77 env
->lstar
= entry
->data
;
81 env
->tsc
= entry
->data
;
84 printf("Warning unknown msr index 0x%x\n", entry
->index
);
96 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
98 lhs
->selector
= rhs
->selector
;
99 lhs
->base
= rhs
->base
;
100 lhs
->limit
= rhs
->limit
;
112 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
114 unsigned flags
= rhs
->flags
;
115 lhs
->selector
= rhs
->selector
;
116 lhs
->base
= rhs
->base
;
117 lhs
->limit
= rhs
->limit
;
118 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
119 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
120 lhs
->dpl
= rhs
->selector
& 3;
121 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
122 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
123 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
124 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
125 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
129 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
131 lhs
->selector
= rhs
->selector
;
132 lhs
->base
= rhs
->base
;
133 lhs
->limit
= rhs
->limit
;
135 (rhs
->type
<< DESC_TYPE_SHIFT
)
136 | (rhs
->present
* DESC_P_MASK
)
137 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
138 | (rhs
->db
<< DESC_B_SHIFT
)
139 | (rhs
->s
* DESC_S_MASK
)
140 | (rhs
->l
<< DESC_L_SHIFT
)
141 | (rhs
->g
* DESC_G_MASK
)
142 | (rhs
->avl
* DESC_AVL_MASK
);
145 /* the reset values of qemu are not compatible to SVM
146 * this function is used to fix the segment descriptor values */
147 static void fix_realmode_dataseg(struct kvm_segment
*seg
)
154 void kvm_arch_load_regs(CPUState
*env
)
156 struct kvm_regs regs
;
158 struct kvm_sregs sregs
;
159 struct kvm_msr_entry msrs
[MSR_COUNT
];
162 regs
.rax
= env
->regs
[R_EAX
];
163 regs
.rbx
= env
->regs
[R_EBX
];
164 regs
.rcx
= env
->regs
[R_ECX
];
165 regs
.rdx
= env
->regs
[R_EDX
];
166 regs
.rsi
= env
->regs
[R_ESI
];
167 regs
.rdi
= env
->regs
[R_EDI
];
168 regs
.rsp
= env
->regs
[R_ESP
];
169 regs
.rbp
= env
->regs
[R_EBP
];
171 regs
.r8
= env
->regs
[8];
172 regs
.r9
= env
->regs
[9];
173 regs
.r10
= env
->regs
[10];
174 regs
.r11
= env
->regs
[11];
175 regs
.r12
= env
->regs
[12];
176 regs
.r13
= env
->regs
[13];
177 regs
.r14
= env
->regs
[14];
178 regs
.r15
= env
->regs
[15];
181 regs
.rflags
= env
->eflags
;
184 kvm_set_regs(kvm_context
, env
->cpu_index
, ®s
);
186 memset(&fpu
, 0, sizeof fpu
);
187 fpu
.fsw
= env
->fpus
& ~(7 << 11);
188 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
190 for (i
= 0; i
< 8; ++i
)
191 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
192 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
193 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
194 fpu
.mxcsr
= env
->mxcsr
;
195 kvm_set_fpu(kvm_context
, env
->cpu_index
, &fpu
);
197 memcpy(sregs
.interrupt_bitmap
, env
->kvm_interrupt_bitmap
, sizeof(sregs
.interrupt_bitmap
));
199 if ((env
->eflags
& VM_MASK
)) {
200 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
201 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
202 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
203 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
204 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
205 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
207 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
208 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
209 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
210 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
211 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
212 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
214 if (env
->cr
[0] & CR0_PE_MASK
) {
215 /* force ss cpl to cs cpl */
216 sregs
.ss
.selector
= (sregs
.ss
.selector
& ~3) |
217 (sregs
.cs
.selector
& 3);
218 sregs
.ss
.dpl
= sregs
.ss
.selector
& 3;
221 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
222 fix_realmode_dataseg(&sregs
.cs
);
223 fix_realmode_dataseg(&sregs
.ds
);
224 fix_realmode_dataseg(&sregs
.es
);
225 fix_realmode_dataseg(&sregs
.fs
);
226 fix_realmode_dataseg(&sregs
.gs
);
227 fix_realmode_dataseg(&sregs
.ss
);
231 set_seg(&sregs
.tr
, &env
->tr
);
232 set_seg(&sregs
.ldt
, &env
->ldt
);
234 sregs
.idt
.limit
= env
->idt
.limit
;
235 sregs
.idt
.base
= env
->idt
.base
;
236 sregs
.gdt
.limit
= env
->gdt
.limit
;
237 sregs
.gdt
.base
= env
->gdt
.base
;
239 sregs
.cr0
= env
->cr
[0];
240 sregs
.cr2
= env
->cr
[2];
241 sregs
.cr3
= env
->cr
[3];
242 sregs
.cr4
= env
->cr
[4];
244 sregs
.apic_base
= cpu_get_apic_base(env
);
245 sregs
.efer
= env
->efer
;
246 sregs
.cr8
= cpu_get_apic_tpr(env
);
248 kvm_set_sregs(kvm_context
, env
->cpu_index
, &sregs
);
252 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
253 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
254 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
255 if (kvm_has_msr_star
)
256 set_msr_entry(&msrs
[n
++], MSR_STAR
, env
->star
);
257 set_msr_entry(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
259 if (lm_capable_kernel
) {
260 set_msr_entry(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
261 set_msr_entry(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
262 set_msr_entry(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
263 set_msr_entry(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
267 rc
= kvm_set_msrs(kvm_context
, env
->cpu_index
, msrs
, n
);
269 perror("kvm_set_msrs FAILED");
273 void kvm_arch_save_regs(CPUState
*env
)
275 struct kvm_regs regs
;
277 struct kvm_sregs sregs
;
278 struct kvm_msr_entry msrs
[MSR_COUNT
];
282 kvm_get_regs(kvm_context
, env
->cpu_index
, ®s
);
284 env
->regs
[R_EAX
] = regs
.rax
;
285 env
->regs
[R_EBX
] = regs
.rbx
;
286 env
->regs
[R_ECX
] = regs
.rcx
;
287 env
->regs
[R_EDX
] = regs
.rdx
;
288 env
->regs
[R_ESI
] = regs
.rsi
;
289 env
->regs
[R_EDI
] = regs
.rdi
;
290 env
->regs
[R_ESP
] = regs
.rsp
;
291 env
->regs
[R_EBP
] = regs
.rbp
;
293 env
->regs
[8] = regs
.r8
;
294 env
->regs
[9] = regs
.r9
;
295 env
->regs
[10] = regs
.r10
;
296 env
->regs
[11] = regs
.r11
;
297 env
->regs
[12] = regs
.r12
;
298 env
->regs
[13] = regs
.r13
;
299 env
->regs
[14] = regs
.r14
;
300 env
->regs
[15] = regs
.r15
;
303 env
->eflags
= regs
.rflags
;
306 kvm_get_fpu(kvm_context
, env
->cpu_index
, &fpu
);
307 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
310 for (i
= 0; i
< 8; ++i
)
311 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
312 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
313 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
314 env
->mxcsr
= fpu
.mxcsr
;
316 kvm_get_sregs(kvm_context
, env
->cpu_index
, &sregs
);
318 memcpy(env
->kvm_interrupt_bitmap
, sregs
.interrupt_bitmap
, sizeof(env
->kvm_interrupt_bitmap
));
320 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
321 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
322 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
323 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
324 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
325 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
327 get_seg(&env
->tr
, &sregs
.tr
);
328 get_seg(&env
->ldt
, &sregs
.ldt
);
330 env
->idt
.limit
= sregs
.idt
.limit
;
331 env
->idt
.base
= sregs
.idt
.base
;
332 env
->gdt
.limit
= sregs
.gdt
.limit
;
333 env
->gdt
.base
= sregs
.gdt
.base
;
335 env
->cr
[0] = sregs
.cr0
;
336 env
->cr
[2] = sregs
.cr2
;
337 env
->cr
[3] = sregs
.cr3
;
338 env
->cr
[4] = sregs
.cr4
;
340 cpu_set_apic_base(env
, sregs
.apic_base
);
342 env
->efer
= sregs
.efer
;
343 //cpu_set_apic_tpr(env, sregs.cr8);
345 #define HFLAG_COPY_MASK ~( \
346 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
347 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
348 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
349 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
353 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
354 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
355 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
356 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
357 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
358 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
359 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
361 if (env
->efer
& MSR_EFER_LMA
) {
362 hflags
|= HF_LMA_MASK
;
365 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
366 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
368 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
369 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
370 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
371 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
372 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
373 (env
->eflags
& VM_MASK
) ||
374 !(hflags
& HF_CS32_MASK
)) {
375 hflags
|= HF_ADDSEG_MASK
;
377 hflags
|= ((env
->segs
[R_DS
].base
|
378 env
->segs
[R_ES
].base
|
379 env
->segs
[R_SS
].base
) != 0) <<
383 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
384 env
->cc_src
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
385 env
->df
= 1 - (2 * ((env
->eflags
>> 10) & 1));
386 env
->cc_op
= CC_OP_EFLAGS
;
387 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
391 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
392 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
393 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
394 if (kvm_has_msr_star
)
395 msrs
[n
++].index
= MSR_STAR
;
396 msrs
[n
++].index
= MSR_IA32_TSC
;
398 if (lm_capable_kernel
) {
399 msrs
[n
++].index
= MSR_CSTAR
;
400 msrs
[n
++].index
= MSR_KERNELGSBASE
;
401 msrs
[n
++].index
= MSR_FMASK
;
402 msrs
[n
++].index
= MSR_LSTAR
;
405 rc
= kvm_get_msrs(kvm_context
, env
->cpu_index
, msrs
, n
);
407 perror("kvm_get_msrs FAILED");
410 n
= rc
; /* actual number of MSRs */
411 for (i
=0 ; i
<n
; i
++) {
412 if (get_msr_entry(&msrs
[i
], env
))
418 static void host_cpuid(uint32_t function
, uint32_t *eax
, uint32_t *ebx
,
419 uint32_t *ecx
, uint32_t *edx
)
426 "sub $128, %%rsp \n\t" /* skip red zone */
427 "push %0; push %%rsi \n\t"
428 "push %%rax; push %%rbx; push %%rcx; push %%rdx \n\t"
429 "mov 8*5(%%rsp), %%rsi \n\t"
430 "mov (%%rsi), %%eax \n\t"
432 "mov %%eax, (%%rsi) \n\t"
433 "mov %%ebx, 4(%%rsi) \n\t"
434 "mov %%ecx, 8(%%rsi) \n\t"
435 "mov %%edx, 12(%%rsi) \n\t"
436 "pop %%rdx; pop %%rcx; pop %%rbx; pop %%rax \n\t"
437 "pop %%rsi; pop %0 \n\t"
440 "push %0; push %%esi \n\t"
441 "push %%eax; push %%ebx; push %%ecx; push %%edx \n\t"
442 "mov 4*5(%%esp), %%esi \n\t"
443 "mov (%%esi), %%eax \n\t"
445 "mov %%eax, (%%esi) \n\t"
446 "mov %%ebx, 4(%%esi) \n\t"
447 "mov %%ecx, 8(%%esi) \n\t"
448 "mov %%edx, 12(%%esi) \n\t"
449 "pop %%edx; pop %%ecx; pop %%ebx; pop %%eax \n\t"
450 "pop %%esi; pop %0 \n\t"
452 : : "rm"(vec
) : "memory");
464 static void do_cpuid_ent(struct kvm_cpuid_entry
*e
, uint32_t function
,
467 env
->regs
[R_EAX
] = function
;
468 qemu_kvm_cpuid_on_env(env
);
469 e
->function
= function
;
470 e
->eax
= env
->regs
[R_EAX
];
471 e
->ebx
= env
->regs
[R_EBX
];
472 e
->ecx
= env
->regs
[R_ECX
];
473 e
->edx
= env
->regs
[R_EDX
];
474 if (function
== 0x80000001) {
475 uint32_t h_eax
, h_edx
;
476 struct utsname utsname
;
478 host_cpuid(function
, &h_eax
, NULL
, NULL
, &h_edx
);
480 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
483 if ((h_edx
& 0x20000000) == 0 || !lm_capable_kernel
)
484 e
->edx
&= ~0x20000000u
;
486 if ((h_edx
& 0x00000800) == 0)
487 e
->edx
&= ~0x00000800u
;
489 if ((h_edx
& 0x00100000) == 0)
490 e
->edx
&= ~0x00100000u
;
495 // sysenter isn't supported on compatibility mode on AMD. and syscall
496 // isn't supported in compatibility mode on Intel. so advertise the
497 // actuall cpu, and say goodbye to migration between different vendors
498 // is you use compatibility mode.
502 host_cpuid(0, NULL
, &bcd
[0], &bcd
[1], &bcd
[2]);
509 int kvm_arch_qemu_init_env(CPUState
*cenv
)
511 struct kvm_cpuid_entry cpuid_ent
[100];
512 #ifdef KVM_CPUID_SIGNATURE
513 struct kvm_cpuid_entry
*pv_ent
;
514 uint32_t signature
[3];
522 #ifdef KVM_CPUID_SIGNATURE
523 /* Paravirtualization CPUIDs */
524 memcpy(signature
, "KVMKVMKVM", 12);
525 pv_ent
= &cpuid_ent
[cpuid_nent
++];
526 memset(pv_ent
, 0, sizeof(*pv_ent
));
527 pv_ent
->function
= KVM_CPUID_SIGNATURE
;
529 pv_ent
->ebx
= signature
[0];
530 pv_ent
->ecx
= signature
[1];
531 pv_ent
->edx
= signature
[2];
533 pv_ent
= &cpuid_ent
[cpuid_nent
++];
534 memset(pv_ent
, 0, sizeof(*pv_ent
));
535 pv_ent
->function
= KVM_CPUID_FEATURES
;
539 copy
.regs
[R_EAX
] = 0;
540 qemu_kvm_cpuid_on_env(©
);
541 limit
= copy
.regs
[R_EAX
];
543 for (i
= 0; i
<= limit
; ++i
)
544 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
, ©
);
546 copy
.regs
[R_EAX
] = 0x80000000;
547 qemu_kvm_cpuid_on_env(©
);
548 limit
= copy
.regs
[R_EAX
];
550 for (i
= 0x80000000; i
<= limit
; ++i
)
551 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
, ©
);
553 kvm_setup_cpuid(kvm_context
, cenv
->cpu_index
, cpuid_nent
, cpuid_ent
);
557 int kvm_arch_halt(void *opaque
, int vcpu
)
559 CPUState
*env
= cpu_single_env
;
561 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
562 (env
->eflags
& IF_MASK
))) {
563 env
->hflags
|= HF_HALTED_MASK
;
564 env
->exception_index
= EXCP_HLT
;
569 void kvm_arch_pre_kvm_run(void *opaque
, int vcpu
)
571 CPUState
*env
= cpu_single_env
;
573 if (!kvm_irqchip_in_kernel(kvm_context
))
574 kvm_set_cr8(kvm_context
, vcpu
, cpu_get_apic_tpr(env
));
577 void kvm_arch_post_kvm_run(void *opaque
, int vcpu
)
579 CPUState
*env
= vcpu_env
;
580 cpu_single_env
= env
;
582 env
->eflags
= kvm_get_interrupt_flag(kvm_context
, vcpu
)
583 ? env
->eflags
| IF_MASK
: env
->eflags
& ~IF_MASK
;
584 env
->ready_for_interrupt_injection
585 = kvm_is_ready_for_interrupt_injection(kvm_context
, vcpu
);
587 cpu_set_apic_tpr(env
, kvm_get_cr8(kvm_context
, vcpu
));
588 cpu_set_apic_base(env
, kvm_get_apic_base(kvm_context
, vcpu
));
591 int kvm_arch_has_work(CPUState
*env
)
593 if ((env
->interrupt_request
& (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_EXIT
)) &&
594 (env
->eflags
& IF_MASK
))
599 int kvm_arch_try_push_interrupts(void *opaque
)
601 CPUState
*env
= cpu_single_env
;
604 if (env
->ready_for_interrupt_injection
&&
605 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
606 (env
->eflags
& IF_MASK
)) {
607 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
608 irq
= cpu_get_pic_interrupt(env
);
610 r
= kvm_inject_irq(kvm_context
, env
->cpu_index
, irq
);
612 printf("cpu %d fail inject %x\n", env
->cpu_index
, irq
);
616 return (env
->interrupt_request
& CPU_INTERRUPT_HARD
) != 0;
619 void kvm_arch_update_regs_for_sipi(CPUState
*env
)
621 SegmentCache cs
= env
->segs
[R_CS
];
623 kvm_arch_save_regs(env
);
624 env
->segs
[R_CS
] = cs
;
626 kvm_arch_load_regs(env
);
629 int handle_tpr_access(void *opaque
, int vcpu
,
630 uint64_t rip
, int is_write
)
632 kvm_tpr_access_report(cpu_single_env
, rip
, is_write
);