qemu-kvm: Drop redundant cpuid filtering from cpu_x86_cpuid
[qemu-kvm.git] / cpu-exec.c
blob5c156ac3a0d75101cecc28f0302a720acee6194b
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "qemu-barrier.h"
27 int tb_invalidated_flag;
29 //#define CONFIG_DEBUG_EXEC
31 bool qemu_cpu_has_work(CPUState *env)
33 return cpu_has_work(env);
36 void cpu_loop_exit(CPUState *env)
38 env->current_tb = NULL;
39 longjmp(env->jmp_env, 1);
42 /* exit the current TB from a signal handler. The host registers are
43 restored in a state compatible with the CPU emulator
45 #if defined(CONFIG_SOFTMMU)
46 void cpu_resume_from_signal(CPUState *env, void *puc)
48 /* XXX: restore cpu registers saved in host registers */
50 env->exception_index = -1;
51 longjmp(env->jmp_env, 1);
53 #endif
55 /* Execute the code without caching the generated code. An interpreter
56 could be used if available. */
57 static void cpu_exec_nocache(CPUState *env, int max_cycles,
58 TranslationBlock *orig_tb)
60 unsigned long next_tb;
61 TranslationBlock *tb;
63 /* Should never happen.
64 We only end up here when an existing TB is too long. */
65 if (max_cycles > CF_COUNT_MASK)
66 max_cycles = CF_COUNT_MASK;
68 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
69 max_cycles);
70 env->current_tb = tb;
71 /* execute the generated code */
72 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
73 env->current_tb = NULL;
75 if ((next_tb & 3) == 2) {
76 /* Restore PC. This may happen if async event occurs before
77 the TB starts executing. */
78 cpu_pc_from_tb(env, tb);
80 tb_phys_invalidate(tb, -1);
81 tb_free(tb);
84 static TranslationBlock *tb_find_slow(CPUState *env,
85 target_ulong pc,
86 target_ulong cs_base,
87 uint64_t flags)
89 TranslationBlock *tb, **ptb1;
90 unsigned int h;
91 tb_page_addr_t phys_pc, phys_page1;
92 target_ulong virt_page2;
94 tb_invalidated_flag = 0;
96 /* find translated block using physical mappings */
97 phys_pc = get_page_addr_code(env, pc);
98 phys_page1 = phys_pc & TARGET_PAGE_MASK;
99 h = tb_phys_hash_func(phys_pc);
100 ptb1 = &tb_phys_hash[h];
101 for(;;) {
102 tb = *ptb1;
103 if (!tb)
104 goto not_found;
105 if (tb->pc == pc &&
106 tb->page_addr[0] == phys_page1 &&
107 tb->cs_base == cs_base &&
108 tb->flags == flags) {
109 /* check next page if needed */
110 if (tb->page_addr[1] != -1) {
111 tb_page_addr_t phys_page2;
113 virt_page2 = (pc & TARGET_PAGE_MASK) +
114 TARGET_PAGE_SIZE;
115 phys_page2 = get_page_addr_code(env, virt_page2);
116 if (tb->page_addr[1] == phys_page2)
117 goto found;
118 } else {
119 goto found;
122 ptb1 = &tb->phys_hash_next;
124 not_found:
125 /* if no translated code available, then translate it now */
126 tb = tb_gen_code(env, pc, cs_base, flags, 0);
128 found:
129 /* Move the last found TB to the head of the list */
130 if (likely(*ptb1)) {
131 *ptb1 = tb->phys_hash_next;
132 tb->phys_hash_next = tb_phys_hash[h];
133 tb_phys_hash[h] = tb;
135 /* we add the TB in the virtual pc hash table */
136 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
137 return tb;
140 static inline TranslationBlock *tb_find_fast(CPUState *env)
142 TranslationBlock *tb;
143 target_ulong cs_base, pc;
144 int flags;
146 /* we record a subset of the CPU state. It will
147 always be the same before a given translated block
148 is executed. */
149 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
150 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
151 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
152 tb->flags != flags)) {
153 tb = tb_find_slow(env, pc, cs_base, flags);
155 return tb;
158 static CPUDebugExcpHandler *debug_excp_handler;
160 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
162 CPUDebugExcpHandler *old_handler = debug_excp_handler;
164 debug_excp_handler = handler;
165 return old_handler;
168 static void cpu_handle_debug_exception(CPUState *env)
170 CPUWatchpoint *wp;
172 if (!env->watchpoint_hit) {
173 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
174 wp->flags &= ~BP_WATCHPOINT_HIT;
177 if (debug_excp_handler) {
178 debug_excp_handler(env);
182 /* main execution loop */
184 volatile sig_atomic_t exit_request;
186 int cpu_exec(CPUState *env)
188 int ret, interrupt_request;
189 TranslationBlock *tb;
190 uint8_t *tc_ptr;
191 unsigned long next_tb;
193 if (env->halted) {
194 if (!cpu_has_work(env)) {
195 return EXCP_HALTED;
198 env->halted = 0;
201 cpu_single_env = env;
203 if (unlikely(exit_request)) {
204 env->exit_request = 1;
207 #if defined(TARGET_I386)
208 /* put eflags in CPU temporary format */
209 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
210 DF = 1 - (2 * ((env->eflags >> 10) & 1));
211 CC_OP = CC_OP_EFLAGS;
212 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
213 #elif defined(TARGET_SPARC)
214 #elif defined(TARGET_M68K)
215 env->cc_op = CC_OP_FLAGS;
216 env->cc_dest = env->sr & 0xf;
217 env->cc_x = (env->sr >> 4) & 1;
218 #elif defined(TARGET_ALPHA)
219 #elif defined(TARGET_ARM)
220 #elif defined(TARGET_UNICORE32)
221 #elif defined(TARGET_PPC)
222 #elif defined(TARGET_LM32)
223 #elif defined(TARGET_MICROBLAZE)
224 #elif defined(TARGET_MIPS)
225 #elif defined(TARGET_SH4)
226 #elif defined(TARGET_CRIS)
227 #elif defined(TARGET_S390X)
228 #elif defined(TARGET_XTENSA)
229 #elif defined(TARGET_IA64)
230 /* XXXXX */
231 #else
232 #error unsupported target CPU
233 #endif
234 env->exception_index = -1;
236 /* prepare setjmp context for exception handling */
237 for(;;) {
238 if (setjmp(env->jmp_env) == 0) {
239 /* if an exception is pending, we execute it here */
240 if (env->exception_index >= 0) {
241 if (env->exception_index >= EXCP_INTERRUPT) {
242 /* exit request from the cpu execution loop */
243 ret = env->exception_index;
244 if (ret == EXCP_DEBUG) {
245 cpu_handle_debug_exception(env);
247 break;
248 } else {
249 #if defined(CONFIG_USER_ONLY)
250 /* if user mode only, we simulate a fake exception
251 which will be handled outside the cpu execution
252 loop */
253 #if defined(TARGET_I386)
254 do_interrupt(env);
255 #endif
256 ret = env->exception_index;
257 break;
258 #else
259 do_interrupt(env);
260 env->exception_index = -1;
261 #endif
265 next_tb = 0; /* force lookup of first TB */
266 for(;;) {
267 interrupt_request = env->interrupt_request;
268 if (unlikely(interrupt_request)) {
269 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
270 /* Mask out external interrupts for this step. */
271 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
273 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
274 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
275 env->exception_index = EXCP_DEBUG;
276 cpu_loop_exit(env);
278 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
279 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
280 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
281 if (interrupt_request & CPU_INTERRUPT_HALT) {
282 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
283 env->halted = 1;
284 env->exception_index = EXCP_HLT;
285 cpu_loop_exit(env);
287 #endif
288 #if defined(TARGET_I386)
289 if (interrupt_request & CPU_INTERRUPT_INIT) {
290 svm_check_intercept(env, SVM_EXIT_INIT);
291 do_cpu_init(env);
292 env->exception_index = EXCP_HALTED;
293 cpu_loop_exit(env);
294 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
295 do_cpu_sipi(env);
296 } else if (env->hflags2 & HF2_GIF_MASK) {
297 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
298 !(env->hflags & HF_SMM_MASK)) {
299 svm_check_intercept(env, SVM_EXIT_SMI);
300 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
301 do_smm_enter(env);
302 next_tb = 0;
303 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
304 !(env->hflags2 & HF2_NMI_MASK)) {
305 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
306 env->hflags2 |= HF2_NMI_MASK;
307 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
308 next_tb = 0;
309 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
310 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
311 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
312 next_tb = 0;
313 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
314 (((env->hflags2 & HF2_VINTR_MASK) &&
315 (env->hflags2 & HF2_HIF_MASK)) ||
316 (!(env->hflags2 & HF2_VINTR_MASK) &&
317 (env->eflags & IF_MASK &&
318 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
319 int intno;
320 svm_check_intercept(env, SVM_EXIT_INTR);
321 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
322 intno = cpu_get_pic_interrupt(env);
323 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
324 do_interrupt_x86_hardirq(env, intno, 1);
325 /* ensure that no TB jump will be modified as
326 the program flow was changed */
327 next_tb = 0;
328 #if !defined(CONFIG_USER_ONLY)
329 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
330 (env->eflags & IF_MASK) &&
331 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
332 int intno;
333 /* FIXME: this should respect TPR */
334 svm_check_intercept(env, SVM_EXIT_VINTR);
335 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
337 do_interrupt_x86_hardirq(env, intno, 1);
338 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
339 next_tb = 0;
340 #endif
343 #elif defined(TARGET_PPC)
344 #if 0
345 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
346 cpu_reset(env);
348 #endif
349 if (interrupt_request & CPU_INTERRUPT_HARD) {
350 ppc_hw_interrupt(env);
351 if (env->pending_interrupts == 0)
352 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
353 next_tb = 0;
355 #elif defined(TARGET_LM32)
356 if ((interrupt_request & CPU_INTERRUPT_HARD)
357 && (env->ie & IE_IE)) {
358 env->exception_index = EXCP_IRQ;
359 do_interrupt(env);
360 next_tb = 0;
362 #elif defined(TARGET_MICROBLAZE)
363 if ((interrupt_request & CPU_INTERRUPT_HARD)
364 && (env->sregs[SR_MSR] & MSR_IE)
365 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
366 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
367 env->exception_index = EXCP_IRQ;
368 do_interrupt(env);
369 next_tb = 0;
371 #elif defined(TARGET_MIPS)
372 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
373 cpu_mips_hw_interrupts_pending(env)) {
374 /* Raise it */
375 env->exception_index = EXCP_EXT_INTERRUPT;
376 env->error_code = 0;
377 do_interrupt(env);
378 next_tb = 0;
380 #elif defined(TARGET_SPARC)
381 if (interrupt_request & CPU_INTERRUPT_HARD) {
382 if (cpu_interrupts_enabled(env) &&
383 env->interrupt_index > 0) {
384 int pil = env->interrupt_index & 0xf;
385 int type = env->interrupt_index & 0xf0;
387 if (((type == TT_EXTINT) &&
388 cpu_pil_allowed(env, pil)) ||
389 type != TT_EXTINT) {
390 env->exception_index = env->interrupt_index;
391 do_interrupt(env);
392 next_tb = 0;
396 #elif defined(TARGET_ARM)
397 if (interrupt_request & CPU_INTERRUPT_FIQ
398 && !(env->uncached_cpsr & CPSR_F)) {
399 env->exception_index = EXCP_FIQ;
400 do_interrupt(env);
401 next_tb = 0;
403 /* ARMv7-M interrupt return works by loading a magic value
404 into the PC. On real hardware the load causes the
405 return to occur. The qemu implementation performs the
406 jump normally, then does the exception return when the
407 CPU tries to execute code at the magic address.
408 This will cause the magic PC value to be pushed to
409 the stack if an interrupt occurred at the wrong time.
410 We avoid this by disabling interrupts when
411 pc contains a magic address. */
412 if (interrupt_request & CPU_INTERRUPT_HARD
413 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
414 || !(env->uncached_cpsr & CPSR_I))) {
415 env->exception_index = EXCP_IRQ;
416 do_interrupt(env);
417 next_tb = 0;
419 #elif defined(TARGET_UNICORE32)
420 if (interrupt_request & CPU_INTERRUPT_HARD
421 && !(env->uncached_asr & ASR_I)) {
422 do_interrupt(env);
423 next_tb = 0;
425 #elif defined(TARGET_SH4)
426 if (interrupt_request & CPU_INTERRUPT_HARD) {
427 do_interrupt(env);
428 next_tb = 0;
430 #elif defined(TARGET_ALPHA)
432 int idx = -1;
433 /* ??? This hard-codes the OSF/1 interrupt levels. */
434 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
435 case 0 ... 3:
436 if (interrupt_request & CPU_INTERRUPT_HARD) {
437 idx = EXCP_DEV_INTERRUPT;
439 /* FALLTHRU */
440 case 4:
441 if (interrupt_request & CPU_INTERRUPT_TIMER) {
442 idx = EXCP_CLK_INTERRUPT;
444 /* FALLTHRU */
445 case 5:
446 if (interrupt_request & CPU_INTERRUPT_SMP) {
447 idx = EXCP_SMP_INTERRUPT;
449 /* FALLTHRU */
450 case 6:
451 if (interrupt_request & CPU_INTERRUPT_MCHK) {
452 idx = EXCP_MCHK;
455 if (idx >= 0) {
456 env->exception_index = idx;
457 env->error_code = 0;
458 do_interrupt(env);
459 next_tb = 0;
462 #elif defined(TARGET_CRIS)
463 if (interrupt_request & CPU_INTERRUPT_HARD
464 && (env->pregs[PR_CCS] & I_FLAG)
465 && !env->locked_irq) {
466 env->exception_index = EXCP_IRQ;
467 do_interrupt(env);
468 next_tb = 0;
470 if (interrupt_request & CPU_INTERRUPT_NMI
471 && (env->pregs[PR_CCS] & M_FLAG)) {
472 env->exception_index = EXCP_NMI;
473 do_interrupt(env);
474 next_tb = 0;
476 #elif defined(TARGET_M68K)
477 if (interrupt_request & CPU_INTERRUPT_HARD
478 && ((env->sr & SR_I) >> SR_I_SHIFT)
479 < env->pending_level) {
480 /* Real hardware gets the interrupt vector via an
481 IACK cycle at this point. Current emulated
482 hardware doesn't rely on this, so we
483 provide/save the vector when the interrupt is
484 first signalled. */
485 env->exception_index = env->pending_vector;
486 do_interrupt_m68k_hardirq(env);
487 next_tb = 0;
489 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
490 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
491 (env->psw.mask & PSW_MASK_EXT)) {
492 do_interrupt(env);
493 next_tb = 0;
495 #elif defined(TARGET_XTENSA)
496 if (interrupt_request & CPU_INTERRUPT_HARD) {
497 env->exception_index = EXC_IRQ;
498 do_interrupt(env);
499 next_tb = 0;
501 #endif
502 /* Don't use the cached interrupt_request value,
503 do_interrupt may have updated the EXITTB flag. */
504 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
505 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
506 /* ensure that no TB jump will be modified as
507 the program flow was changed */
508 next_tb = 0;
511 if (unlikely(env->exit_request)) {
512 env->exit_request = 0;
513 env->exception_index = EXCP_INTERRUPT;
514 cpu_loop_exit(env);
516 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
517 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
518 /* restore flags in standard format */
519 #if defined(TARGET_I386)
520 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
521 | (DF & DF_MASK);
522 log_cpu_state(env, X86_DUMP_CCOP);
523 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
524 #elif defined(TARGET_M68K)
525 cpu_m68k_flush_flags(env, env->cc_op);
526 env->cc_op = CC_OP_FLAGS;
527 env->sr = (env->sr & 0xffe0)
528 | env->cc_dest | (env->cc_x << 4);
529 log_cpu_state(env, 0);
530 #else
531 log_cpu_state(env, 0);
532 #endif
534 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
535 spin_lock(&tb_lock);
536 tb = tb_find_fast(env);
537 /* Note: we do it here to avoid a gcc bug on Mac OS X when
538 doing it in tb_find_slow */
539 if (tb_invalidated_flag) {
540 /* as some TB could have been invalidated because
541 of memory exceptions while generating the code, we
542 must recompute the hash index here */
543 next_tb = 0;
544 tb_invalidated_flag = 0;
546 #ifdef CONFIG_DEBUG_EXEC
547 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
548 (long)tb->tc_ptr, tb->pc,
549 lookup_symbol(tb->pc));
550 #endif
551 /* see if we can patch the calling TB. When the TB
552 spans two pages, we cannot safely do a direct
553 jump. */
554 if (next_tb != 0 && tb->page_addr[1] == -1) {
555 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
557 spin_unlock(&tb_lock);
559 /* cpu_interrupt might be called while translating the
560 TB, but before it is linked into a potentially
561 infinite loop and becomes env->current_tb. Avoid
562 starting execution if there is a pending interrupt. */
563 env->current_tb = tb;
564 barrier();
565 if (likely(!env->exit_request)) {
566 tc_ptr = tb->tc_ptr;
567 /* execute the generated code */
568 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
569 if ((next_tb & 3) == 2) {
570 /* Instruction counter expired. */
571 int insns_left;
572 tb = (TranslationBlock *)(long)(next_tb & ~3);
573 /* Restore PC. */
574 cpu_pc_from_tb(env, tb);
575 insns_left = env->icount_decr.u32;
576 if (env->icount_extra && insns_left >= 0) {
577 /* Refill decrementer and continue execution. */
578 env->icount_extra += insns_left;
579 if (env->icount_extra > 0xffff) {
580 insns_left = 0xffff;
581 } else {
582 insns_left = env->icount_extra;
584 env->icount_extra -= insns_left;
585 env->icount_decr.u16.low = insns_left;
586 } else {
587 if (insns_left > 0) {
588 /* Execute remaining instructions. */
589 cpu_exec_nocache(env, insns_left, tb);
591 env->exception_index = EXCP_INTERRUPT;
592 next_tb = 0;
593 cpu_loop_exit(env);
597 env->current_tb = NULL;
598 /* reset soft MMU for next block (it can currently
599 only be set by a memory fault) */
600 } /* for(;;) */
601 } else {
602 /* Reload env after longjmp - the compiler may have smashed all
603 * local variables as longjmp is marked 'noreturn'. */
604 env = cpu_single_env;
606 } /* for(;;) */
609 #if defined(TARGET_I386)
610 /* restore flags in standard format */
611 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
612 | (DF & DF_MASK);
613 #elif defined(TARGET_ARM)
614 /* XXX: Save/restore host fpu exception state?. */
615 #elif defined(TARGET_UNICORE32)
616 #elif defined(TARGET_SPARC)
617 #elif defined(TARGET_PPC)
618 #elif defined(TARGET_LM32)
619 #elif defined(TARGET_M68K)
620 cpu_m68k_flush_flags(env, env->cc_op);
621 env->cc_op = CC_OP_FLAGS;
622 env->sr = (env->sr & 0xffe0)
623 | env->cc_dest | (env->cc_x << 4);
624 #elif defined(TARGET_MICROBLAZE)
625 #elif defined(TARGET_MIPS)
626 #elif defined(TARGET_SH4)
627 #elif defined(TARGET_IA64)
628 #elif defined(TARGET_ALPHA)
629 #elif defined(TARGET_CRIS)
630 #elif defined(TARGET_S390X)
631 #elif defined(TARGET_XTENSA)
632 /* XXXXX */
633 #else
634 #error unsupported target CPU
635 #endif
637 /* fail safe : never use cpu_single_env outside cpu_exec() */
638 cpu_single_env = NULL;
639 return ret;