gdbstub: move str_buf to GDBState and use GString
[qemu.git] / cpus.c
blobb4f8b84b61bcd122c4f30e251541fcd5fe0f4798
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
27 #include "qemu/config-file.h"
28 #include "migration/vmstate.h"
29 #include "monitor/monitor.h"
30 #include "qapi/error.h"
31 #include "qapi/qapi-commands-misc.h"
32 #include "qapi/qapi-events-run-state.h"
33 #include "qapi/qmp/qerror.h"
34 #include "qemu/error-report.h"
35 #include "qemu/qemu-print.h"
36 #include "sysemu/tcg.h"
37 #include "sysemu/block-backend.h"
38 #include "exec/gdbstub.h"
39 #include "sysemu/dma.h"
40 #include "sysemu/hw_accel.h"
41 #include "sysemu/kvm.h"
42 #include "sysemu/hax.h"
43 #include "sysemu/hvf.h"
44 #include "sysemu/whpx.h"
45 #include "exec/exec-all.h"
47 #include "qemu/thread.h"
48 #include "qemu/plugin.h"
49 #include "sysemu/cpus.h"
50 #include "sysemu/qtest.h"
51 #include "qemu/main-loop.h"
52 #include "qemu/option.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/seqlock.h"
55 #include "qemu/guest-random.h"
56 #include "tcg/tcg.h"
57 #include "hw/nmi.h"
58 #include "sysemu/replay.h"
59 #include "sysemu/runstate.h"
60 #include "hw/boards.h"
61 #include "hw/hw.h"
63 #ifdef CONFIG_LINUX
65 #include <sys/prctl.h>
67 #ifndef PR_MCE_KILL
68 #define PR_MCE_KILL 33
69 #endif
71 #ifndef PR_MCE_KILL_SET
72 #define PR_MCE_KILL_SET 1
73 #endif
75 #ifndef PR_MCE_KILL_EARLY
76 #define PR_MCE_KILL_EARLY 1
77 #endif
79 #endif /* CONFIG_LINUX */
81 static QemuMutex qemu_global_mutex;
83 int64_t max_delay;
84 int64_t max_advance;
86 /* vcpu throttling controls */
87 static QEMUTimer *throttle_timer;
88 static unsigned int throttle_percentage;
90 #define CPU_THROTTLE_PCT_MIN 1
91 #define CPU_THROTTLE_PCT_MAX 99
92 #define CPU_THROTTLE_TIMESLICE_NS 10000000
94 bool cpu_is_stopped(CPUState *cpu)
96 return cpu->stopped || !runstate_is_running();
99 static bool cpu_thread_is_idle(CPUState *cpu)
101 if (cpu->stop || cpu->queued_work_first) {
102 return false;
104 if (cpu_is_stopped(cpu)) {
105 return true;
107 if (!cpu->halted || cpu_has_work(cpu) ||
108 kvm_halt_in_kernel()) {
109 return false;
111 return true;
114 static bool all_cpu_threads_idle(void)
116 CPUState *cpu;
118 CPU_FOREACH(cpu) {
119 if (!cpu_thread_is_idle(cpu)) {
120 return false;
123 return true;
126 /***********************************************************/
127 /* guest cycle counter */
129 /* Protected by TimersState seqlock */
131 static bool icount_sleep = true;
132 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
133 #define MAX_ICOUNT_SHIFT 10
135 typedef struct TimersState {
136 /* Protected by BQL. */
137 int64_t cpu_ticks_prev;
138 int64_t cpu_ticks_offset;
140 /* Protect fields that can be respectively read outside the
141 * BQL, and written from multiple threads.
143 QemuSeqLock vm_clock_seqlock;
144 QemuSpin vm_clock_lock;
146 int16_t cpu_ticks_enabled;
148 /* Conversion factor from emulated instructions to virtual clock ticks. */
149 int16_t icount_time_shift;
151 /* Compensate for varying guest execution speed. */
152 int64_t qemu_icount_bias;
154 int64_t vm_clock_warp_start;
155 int64_t cpu_clock_offset;
157 /* Only written by TCG thread */
158 int64_t qemu_icount;
160 /* for adjusting icount */
161 QEMUTimer *icount_rt_timer;
162 QEMUTimer *icount_vm_timer;
163 QEMUTimer *icount_warp_timer;
164 } TimersState;
166 static TimersState timers_state;
167 bool mttcg_enabled;
170 /* The current number of executed instructions is based on what we
171 * originally budgeted minus the current state of the decrementing
172 * icount counters in extra/u16.low.
174 static int64_t cpu_get_icount_executed(CPUState *cpu)
176 return (cpu->icount_budget -
177 (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
181 * Update the global shared timer_state.qemu_icount to take into
182 * account executed instructions. This is done by the TCG vCPU
183 * thread so the main-loop can see time has moved forward.
185 static void cpu_update_icount_locked(CPUState *cpu)
187 int64_t executed = cpu_get_icount_executed(cpu);
188 cpu->icount_budget -= executed;
190 atomic_set_i64(&timers_state.qemu_icount,
191 timers_state.qemu_icount + executed);
195 * Update the global shared timer_state.qemu_icount to take into
196 * account executed instructions. This is done by the TCG vCPU
197 * thread so the main-loop can see time has moved forward.
199 void cpu_update_icount(CPUState *cpu)
201 seqlock_write_lock(&timers_state.vm_clock_seqlock,
202 &timers_state.vm_clock_lock);
203 cpu_update_icount_locked(cpu);
204 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
205 &timers_state.vm_clock_lock);
208 static int64_t cpu_get_icount_raw_locked(void)
210 CPUState *cpu = current_cpu;
212 if (cpu && cpu->running) {
213 if (!cpu->can_do_io) {
214 error_report("Bad icount read");
215 exit(1);
217 /* Take into account what has run */
218 cpu_update_icount_locked(cpu);
220 /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
221 return atomic_read_i64(&timers_state.qemu_icount);
224 static int64_t cpu_get_icount_locked(void)
226 int64_t icount = cpu_get_icount_raw_locked();
227 return atomic_read_i64(&timers_state.qemu_icount_bias) +
228 cpu_icount_to_ns(icount);
231 int64_t cpu_get_icount_raw(void)
233 int64_t icount;
234 unsigned start;
236 do {
237 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
238 icount = cpu_get_icount_raw_locked();
239 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
241 return icount;
244 /* Return the virtual CPU time, based on the instruction counter. */
245 int64_t cpu_get_icount(void)
247 int64_t icount;
248 unsigned start;
250 do {
251 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
252 icount = cpu_get_icount_locked();
253 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
255 return icount;
258 int64_t cpu_icount_to_ns(int64_t icount)
260 return icount << atomic_read(&timers_state.icount_time_shift);
263 static int64_t cpu_get_ticks_locked(void)
265 int64_t ticks = timers_state.cpu_ticks_offset;
266 if (timers_state.cpu_ticks_enabled) {
267 ticks += cpu_get_host_ticks();
270 if (timers_state.cpu_ticks_prev > ticks) {
271 /* Non increasing ticks may happen if the host uses software suspend. */
272 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
273 ticks = timers_state.cpu_ticks_prev;
276 timers_state.cpu_ticks_prev = ticks;
277 return ticks;
280 /* return the time elapsed in VM between vm_start and vm_stop. Unless
281 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
282 * counter.
284 int64_t cpu_get_ticks(void)
286 int64_t ticks;
288 if (use_icount) {
289 return cpu_get_icount();
292 qemu_spin_lock(&timers_state.vm_clock_lock);
293 ticks = cpu_get_ticks_locked();
294 qemu_spin_unlock(&timers_state.vm_clock_lock);
295 return ticks;
298 static int64_t cpu_get_clock_locked(void)
300 int64_t time;
302 time = timers_state.cpu_clock_offset;
303 if (timers_state.cpu_ticks_enabled) {
304 time += get_clock();
307 return time;
310 /* Return the monotonic time elapsed in VM, i.e.,
311 * the time between vm_start and vm_stop
313 int64_t cpu_get_clock(void)
315 int64_t ti;
316 unsigned start;
318 do {
319 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
320 ti = cpu_get_clock_locked();
321 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
323 return ti;
326 /* enable cpu_get_ticks()
327 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
329 void cpu_enable_ticks(void)
331 seqlock_write_lock(&timers_state.vm_clock_seqlock,
332 &timers_state.vm_clock_lock);
333 if (!timers_state.cpu_ticks_enabled) {
334 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
335 timers_state.cpu_clock_offset -= get_clock();
336 timers_state.cpu_ticks_enabled = 1;
338 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
339 &timers_state.vm_clock_lock);
342 /* disable cpu_get_ticks() : the clock is stopped. You must not call
343 * cpu_get_ticks() after that.
344 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
346 void cpu_disable_ticks(void)
348 seqlock_write_lock(&timers_state.vm_clock_seqlock,
349 &timers_state.vm_clock_lock);
350 if (timers_state.cpu_ticks_enabled) {
351 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
352 timers_state.cpu_clock_offset = cpu_get_clock_locked();
353 timers_state.cpu_ticks_enabled = 0;
355 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
356 &timers_state.vm_clock_lock);
359 /* Correlation between real and virtual time is always going to be
360 fairly approximate, so ignore small variation.
361 When the guest is idle real and virtual time will be aligned in
362 the IO wait loop. */
363 #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
365 static void icount_adjust(void)
367 int64_t cur_time;
368 int64_t cur_icount;
369 int64_t delta;
371 /* Protected by TimersState mutex. */
372 static int64_t last_delta;
374 /* If the VM is not running, then do nothing. */
375 if (!runstate_is_running()) {
376 return;
379 seqlock_write_lock(&timers_state.vm_clock_seqlock,
380 &timers_state.vm_clock_lock);
381 cur_time = cpu_get_clock_locked();
382 cur_icount = cpu_get_icount_locked();
384 delta = cur_icount - cur_time;
385 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
386 if (delta > 0
387 && last_delta + ICOUNT_WOBBLE < delta * 2
388 && timers_state.icount_time_shift > 0) {
389 /* The guest is getting too far ahead. Slow time down. */
390 atomic_set(&timers_state.icount_time_shift,
391 timers_state.icount_time_shift - 1);
393 if (delta < 0
394 && last_delta - ICOUNT_WOBBLE > delta * 2
395 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
396 /* The guest is getting too far behind. Speed time up. */
397 atomic_set(&timers_state.icount_time_shift,
398 timers_state.icount_time_shift + 1);
400 last_delta = delta;
401 atomic_set_i64(&timers_state.qemu_icount_bias,
402 cur_icount - (timers_state.qemu_icount
403 << timers_state.icount_time_shift));
404 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
405 &timers_state.vm_clock_lock);
408 static void icount_adjust_rt(void *opaque)
410 timer_mod(timers_state.icount_rt_timer,
411 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
412 icount_adjust();
415 static void icount_adjust_vm(void *opaque)
417 timer_mod(timers_state.icount_vm_timer,
418 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
419 NANOSECONDS_PER_SECOND / 10);
420 icount_adjust();
423 static int64_t qemu_icount_round(int64_t count)
425 int shift = atomic_read(&timers_state.icount_time_shift);
426 return (count + (1 << shift) - 1) >> shift;
429 static void icount_warp_rt(void)
431 unsigned seq;
432 int64_t warp_start;
434 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
435 * changes from -1 to another value, so the race here is okay.
437 do {
438 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
439 warp_start = timers_state.vm_clock_warp_start;
440 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
442 if (warp_start == -1) {
443 return;
446 seqlock_write_lock(&timers_state.vm_clock_seqlock,
447 &timers_state.vm_clock_lock);
448 if (runstate_is_running()) {
449 int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
450 cpu_get_clock_locked());
451 int64_t warp_delta;
453 warp_delta = clock - timers_state.vm_clock_warp_start;
454 if (use_icount == 2) {
456 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
457 * far ahead of real time.
459 int64_t cur_icount = cpu_get_icount_locked();
460 int64_t delta = clock - cur_icount;
461 warp_delta = MIN(warp_delta, delta);
463 atomic_set_i64(&timers_state.qemu_icount_bias,
464 timers_state.qemu_icount_bias + warp_delta);
466 timers_state.vm_clock_warp_start = -1;
467 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
468 &timers_state.vm_clock_lock);
470 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
471 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
475 static void icount_timer_cb(void *opaque)
477 /* No need for a checkpoint because the timer already synchronizes
478 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
480 icount_warp_rt();
483 void qtest_clock_warp(int64_t dest)
485 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
486 AioContext *aio_context;
487 assert(qtest_enabled());
488 aio_context = qemu_get_aio_context();
489 while (clock < dest) {
490 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
491 QEMU_TIMER_ATTR_ALL);
492 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
494 seqlock_write_lock(&timers_state.vm_clock_seqlock,
495 &timers_state.vm_clock_lock);
496 atomic_set_i64(&timers_state.qemu_icount_bias,
497 timers_state.qemu_icount_bias + warp);
498 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
499 &timers_state.vm_clock_lock);
501 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
502 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
503 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
505 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
508 void qemu_start_warp_timer(void)
510 int64_t clock;
511 int64_t deadline;
513 if (!use_icount) {
514 return;
517 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
518 * do not fire, so computing the deadline does not make sense.
520 if (!runstate_is_running()) {
521 return;
524 if (replay_mode != REPLAY_MODE_PLAY) {
525 if (!all_cpu_threads_idle()) {
526 return;
529 if (qtest_enabled()) {
530 /* When testing, qtest commands advance icount. */
531 return;
534 replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
535 } else {
536 /* warp clock deterministically in record/replay mode */
537 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
538 /* vCPU is sleeping and warp can't be started.
539 It is probably a race condition: notification sent
540 to vCPU was processed in advance and vCPU went to sleep.
541 Therefore we have to wake it up for doing someting. */
542 if (replay_has_checkpoint()) {
543 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
545 return;
549 /* We want to use the earliest deadline from ALL vm_clocks */
550 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
551 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
552 ~QEMU_TIMER_ATTR_EXTERNAL);
553 if (deadline < 0) {
554 static bool notified;
555 if (!icount_sleep && !notified) {
556 warn_report("icount sleep disabled and no active timers");
557 notified = true;
559 return;
562 if (deadline > 0) {
564 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
565 * sleep. Otherwise, the CPU might be waiting for a future timer
566 * interrupt to wake it up, but the interrupt never comes because
567 * the vCPU isn't running any insns and thus doesn't advance the
568 * QEMU_CLOCK_VIRTUAL.
570 if (!icount_sleep) {
572 * We never let VCPUs sleep in no sleep icount mode.
573 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
574 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
575 * It is useful when we want a deterministic execution time,
576 * isolated from host latencies.
578 seqlock_write_lock(&timers_state.vm_clock_seqlock,
579 &timers_state.vm_clock_lock);
580 atomic_set_i64(&timers_state.qemu_icount_bias,
581 timers_state.qemu_icount_bias + deadline);
582 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
583 &timers_state.vm_clock_lock);
584 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
585 } else {
587 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
588 * "real" time, (related to the time left until the next event) has
589 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
590 * This avoids that the warps are visible externally; for example,
591 * you will not be sending network packets continuously instead of
592 * every 100ms.
594 seqlock_write_lock(&timers_state.vm_clock_seqlock,
595 &timers_state.vm_clock_lock);
596 if (timers_state.vm_clock_warp_start == -1
597 || timers_state.vm_clock_warp_start > clock) {
598 timers_state.vm_clock_warp_start = clock;
600 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
601 &timers_state.vm_clock_lock);
602 timer_mod_anticipate(timers_state.icount_warp_timer,
603 clock + deadline);
605 } else if (deadline == 0) {
606 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
610 static void qemu_account_warp_timer(void)
612 if (!use_icount || !icount_sleep) {
613 return;
616 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
617 * do not fire, so computing the deadline does not make sense.
619 if (!runstate_is_running()) {
620 return;
623 /* warp clock deterministically in record/replay mode */
624 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
625 return;
628 timer_del(timers_state.icount_warp_timer);
629 icount_warp_rt();
632 static bool icount_state_needed(void *opaque)
634 return use_icount;
637 static bool warp_timer_state_needed(void *opaque)
639 TimersState *s = opaque;
640 return s->icount_warp_timer != NULL;
643 static bool adjust_timers_state_needed(void *opaque)
645 TimersState *s = opaque;
646 return s->icount_rt_timer != NULL;
650 * Subsection for warp timer migration is optional, because may not be created
652 static const VMStateDescription icount_vmstate_warp_timer = {
653 .name = "timer/icount/warp_timer",
654 .version_id = 1,
655 .minimum_version_id = 1,
656 .needed = warp_timer_state_needed,
657 .fields = (VMStateField[]) {
658 VMSTATE_INT64(vm_clock_warp_start, TimersState),
659 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
660 VMSTATE_END_OF_LIST()
664 static const VMStateDescription icount_vmstate_adjust_timers = {
665 .name = "timer/icount/timers",
666 .version_id = 1,
667 .minimum_version_id = 1,
668 .needed = adjust_timers_state_needed,
669 .fields = (VMStateField[]) {
670 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
671 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
672 VMSTATE_END_OF_LIST()
677 * This is a subsection for icount migration.
679 static const VMStateDescription icount_vmstate_timers = {
680 .name = "timer/icount",
681 .version_id = 1,
682 .minimum_version_id = 1,
683 .needed = icount_state_needed,
684 .fields = (VMStateField[]) {
685 VMSTATE_INT64(qemu_icount_bias, TimersState),
686 VMSTATE_INT64(qemu_icount, TimersState),
687 VMSTATE_END_OF_LIST()
689 .subsections = (const VMStateDescription*[]) {
690 &icount_vmstate_warp_timer,
691 &icount_vmstate_adjust_timers,
692 NULL
696 static const VMStateDescription vmstate_timers = {
697 .name = "timer",
698 .version_id = 2,
699 .minimum_version_id = 1,
700 .fields = (VMStateField[]) {
701 VMSTATE_INT64(cpu_ticks_offset, TimersState),
702 VMSTATE_UNUSED(8),
703 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
704 VMSTATE_END_OF_LIST()
706 .subsections = (const VMStateDescription*[]) {
707 &icount_vmstate_timers,
708 NULL
712 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
714 double pct;
715 double throttle_ratio;
716 int64_t sleeptime_ns, endtime_ns;
718 if (!cpu_throttle_get_percentage()) {
719 return;
722 pct = (double)cpu_throttle_get_percentage()/100;
723 throttle_ratio = pct / (1 - pct);
724 /* Add 1ns to fix double's rounding error (like 0.9999999...) */
725 sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
726 endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
727 while (sleeptime_ns > 0 && !cpu->stop) {
728 if (sleeptime_ns > SCALE_MS) {
729 qemu_cond_timedwait(cpu->halt_cond, &qemu_global_mutex,
730 sleeptime_ns / SCALE_MS);
731 } else {
732 qemu_mutex_unlock_iothread();
733 g_usleep(sleeptime_ns / SCALE_US);
734 qemu_mutex_lock_iothread();
736 sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
738 atomic_set(&cpu->throttle_thread_scheduled, 0);
741 static void cpu_throttle_timer_tick(void *opaque)
743 CPUState *cpu;
744 double pct;
746 /* Stop the timer if needed */
747 if (!cpu_throttle_get_percentage()) {
748 return;
750 CPU_FOREACH(cpu) {
751 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
752 async_run_on_cpu(cpu, cpu_throttle_thread,
753 RUN_ON_CPU_NULL);
757 pct = (double)cpu_throttle_get_percentage()/100;
758 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
759 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
762 void cpu_throttle_set(int new_throttle_pct)
764 /* Ensure throttle percentage is within valid range */
765 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
766 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
768 atomic_set(&throttle_percentage, new_throttle_pct);
770 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
771 CPU_THROTTLE_TIMESLICE_NS);
774 void cpu_throttle_stop(void)
776 atomic_set(&throttle_percentage, 0);
779 bool cpu_throttle_active(void)
781 return (cpu_throttle_get_percentage() != 0);
784 int cpu_throttle_get_percentage(void)
786 return atomic_read(&throttle_percentage);
789 void cpu_ticks_init(void)
791 seqlock_init(&timers_state.vm_clock_seqlock);
792 qemu_spin_init(&timers_state.vm_clock_lock);
793 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
794 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
795 cpu_throttle_timer_tick, NULL);
798 void configure_icount(QemuOpts *opts, Error **errp)
800 const char *option;
801 char *rem_str = NULL;
803 option = qemu_opt_get(opts, "shift");
804 if (!option) {
805 if (qemu_opt_get(opts, "align") != NULL) {
806 error_setg(errp, "Please specify shift option when using align");
808 return;
811 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
812 if (icount_sleep) {
813 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
814 icount_timer_cb, NULL);
817 icount_align_option = qemu_opt_get_bool(opts, "align", false);
819 if (icount_align_option && !icount_sleep) {
820 error_setg(errp, "align=on and sleep=off are incompatible");
822 if (strcmp(option, "auto") != 0) {
823 errno = 0;
824 timers_state.icount_time_shift = strtol(option, &rem_str, 0);
825 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
826 error_setg(errp, "icount: Invalid shift value");
828 use_icount = 1;
829 return;
830 } else if (icount_align_option) {
831 error_setg(errp, "shift=auto and align=on are incompatible");
832 } else if (!icount_sleep) {
833 error_setg(errp, "shift=auto and sleep=off are incompatible");
836 use_icount = 2;
838 /* 125MIPS seems a reasonable initial guess at the guest speed.
839 It will be corrected fairly quickly anyway. */
840 timers_state.icount_time_shift = 3;
842 /* Have both realtime and virtual time triggers for speed adjustment.
843 The realtime trigger catches emulated time passing too slowly,
844 the virtual time trigger catches emulated time passing too fast.
845 Realtime triggers occur even when idle, so use them less frequently
846 than VM triggers. */
847 timers_state.vm_clock_warp_start = -1;
848 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
849 icount_adjust_rt, NULL);
850 timer_mod(timers_state.icount_rt_timer,
851 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
852 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
853 icount_adjust_vm, NULL);
854 timer_mod(timers_state.icount_vm_timer,
855 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
856 NANOSECONDS_PER_SECOND / 10);
859 /***********************************************************/
860 /* TCG vCPU kick timer
862 * The kick timer is responsible for moving single threaded vCPU
863 * emulation on to the next vCPU. If more than one vCPU is running a
864 * timer event with force a cpu->exit so the next vCPU can get
865 * scheduled.
867 * The timer is removed if all vCPUs are idle and restarted again once
868 * idleness is complete.
871 static QEMUTimer *tcg_kick_vcpu_timer;
872 static CPUState *tcg_current_rr_cpu;
874 #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
876 static inline int64_t qemu_tcg_next_kick(void)
878 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
881 /* Kick the currently round-robin scheduled vCPU to next */
882 static void qemu_cpu_kick_rr_next_cpu(void)
884 CPUState *cpu;
885 do {
886 cpu = atomic_mb_read(&tcg_current_rr_cpu);
887 if (cpu) {
888 cpu_exit(cpu);
890 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
893 /* Kick all RR vCPUs */
894 static void qemu_cpu_kick_rr_cpus(void)
896 CPUState *cpu;
898 CPU_FOREACH(cpu) {
899 cpu_exit(cpu);
903 static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
907 void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
909 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
910 qemu_notify_event();
911 return;
914 if (qemu_in_vcpu_thread()) {
915 /* A CPU is currently running; kick it back out to the
916 * tcg_cpu_exec() loop so it will recalculate its
917 * icount deadline immediately.
919 qemu_cpu_kick(current_cpu);
920 } else if (first_cpu) {
921 /* qemu_cpu_kick is not enough to kick a halted CPU out of
922 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
923 * causes cpu_thread_is_idle to return false. This way,
924 * handle_icount_deadline can run.
925 * If we have no CPUs at all for some reason, we don't
926 * need to do anything.
928 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
932 static void kick_tcg_thread(void *opaque)
934 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
935 qemu_cpu_kick_rr_next_cpu();
938 static void start_tcg_kick_timer(void)
940 assert(!mttcg_enabled);
941 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
942 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
943 kick_tcg_thread, NULL);
945 if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
946 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
950 static void stop_tcg_kick_timer(void)
952 assert(!mttcg_enabled);
953 if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
954 timer_del(tcg_kick_vcpu_timer);
958 /***********************************************************/
959 void hw_error(const char *fmt, ...)
961 va_list ap;
962 CPUState *cpu;
964 va_start(ap, fmt);
965 fprintf(stderr, "qemu: hardware error: ");
966 vfprintf(stderr, fmt, ap);
967 fprintf(stderr, "\n");
968 CPU_FOREACH(cpu) {
969 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
970 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
972 va_end(ap);
973 abort();
976 void cpu_synchronize_all_states(void)
978 CPUState *cpu;
980 CPU_FOREACH(cpu) {
981 cpu_synchronize_state(cpu);
982 /* TODO: move to cpu_synchronize_state() */
983 if (hvf_enabled()) {
984 hvf_cpu_synchronize_state(cpu);
989 void cpu_synchronize_all_post_reset(void)
991 CPUState *cpu;
993 CPU_FOREACH(cpu) {
994 cpu_synchronize_post_reset(cpu);
995 /* TODO: move to cpu_synchronize_post_reset() */
996 if (hvf_enabled()) {
997 hvf_cpu_synchronize_post_reset(cpu);
1002 void cpu_synchronize_all_post_init(void)
1004 CPUState *cpu;
1006 CPU_FOREACH(cpu) {
1007 cpu_synchronize_post_init(cpu);
1008 /* TODO: move to cpu_synchronize_post_init() */
1009 if (hvf_enabled()) {
1010 hvf_cpu_synchronize_post_init(cpu);
1015 void cpu_synchronize_all_pre_loadvm(void)
1017 CPUState *cpu;
1019 CPU_FOREACH(cpu) {
1020 cpu_synchronize_pre_loadvm(cpu);
1024 static int do_vm_stop(RunState state, bool send_stop)
1026 int ret = 0;
1028 if (runstate_is_running()) {
1029 cpu_disable_ticks();
1030 pause_all_vcpus();
1031 runstate_set(state);
1032 vm_state_notify(0, state);
1033 if (send_stop) {
1034 qapi_event_send_stop();
1038 bdrv_drain_all();
1039 ret = bdrv_flush_all();
1041 return ret;
1044 /* Special vm_stop() variant for terminating the process. Historically clients
1045 * did not expect a QMP STOP event and so we need to retain compatibility.
1047 int vm_shutdown(void)
1049 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1052 static bool cpu_can_run(CPUState *cpu)
1054 if (cpu->stop) {
1055 return false;
1057 if (cpu_is_stopped(cpu)) {
1058 return false;
1060 return true;
1063 static void cpu_handle_guest_debug(CPUState *cpu)
1065 gdb_set_stop_cpu(cpu);
1066 qemu_system_debug_request();
1067 cpu->stopped = true;
1070 #ifdef CONFIG_LINUX
1071 static void sigbus_reraise(void)
1073 sigset_t set;
1074 struct sigaction action;
1076 memset(&action, 0, sizeof(action));
1077 action.sa_handler = SIG_DFL;
1078 if (!sigaction(SIGBUS, &action, NULL)) {
1079 raise(SIGBUS);
1080 sigemptyset(&set);
1081 sigaddset(&set, SIGBUS);
1082 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1084 perror("Failed to re-raise SIGBUS!\n");
1085 abort();
1088 static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
1090 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1091 sigbus_reraise();
1094 if (current_cpu) {
1095 /* Called asynchronously in VCPU thread. */
1096 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1097 sigbus_reraise();
1099 } else {
1100 /* Called synchronously (via signalfd) in main thread. */
1101 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1102 sigbus_reraise();
1107 static void qemu_init_sigbus(void)
1109 struct sigaction action;
1111 memset(&action, 0, sizeof(action));
1112 action.sa_flags = SA_SIGINFO;
1113 action.sa_sigaction = sigbus_handler;
1114 sigaction(SIGBUS, &action, NULL);
1116 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1118 #else /* !CONFIG_LINUX */
1119 static void qemu_init_sigbus(void)
1122 #endif /* !CONFIG_LINUX */
1124 static QemuThread io_thread;
1126 /* cpu creation */
1127 static QemuCond qemu_cpu_cond;
1128 /* system init */
1129 static QemuCond qemu_pause_cond;
1131 void qemu_init_cpu_loop(void)
1133 qemu_init_sigbus();
1134 qemu_cond_init(&qemu_cpu_cond);
1135 qemu_cond_init(&qemu_pause_cond);
1136 qemu_mutex_init(&qemu_global_mutex);
1138 qemu_thread_get_self(&io_thread);
1141 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
1143 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
1146 static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1148 if (kvm_destroy_vcpu(cpu) < 0) {
1149 error_report("kvm_destroy_vcpu failed");
1150 exit(EXIT_FAILURE);
1154 static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1158 static void qemu_cpu_stop(CPUState *cpu, bool exit)
1160 g_assert(qemu_cpu_is_self(cpu));
1161 cpu->stop = false;
1162 cpu->stopped = true;
1163 if (exit) {
1164 cpu_exit(cpu);
1166 qemu_cond_broadcast(&qemu_pause_cond);
1169 static void qemu_wait_io_event_common(CPUState *cpu)
1171 atomic_mb_set(&cpu->thread_kicked, false);
1172 if (cpu->stop) {
1173 qemu_cpu_stop(cpu, false);
1175 process_queued_cpu_work(cpu);
1178 static void qemu_tcg_rr_wait_io_event(void)
1180 CPUState *cpu;
1182 while (all_cpu_threads_idle()) {
1183 stop_tcg_kick_timer();
1184 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1187 start_tcg_kick_timer();
1189 CPU_FOREACH(cpu) {
1190 qemu_wait_io_event_common(cpu);
1194 static void qemu_wait_io_event(CPUState *cpu)
1196 bool slept = false;
1198 while (cpu_thread_is_idle(cpu)) {
1199 if (!slept) {
1200 slept = true;
1201 qemu_plugin_vcpu_idle_cb(cpu);
1203 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1205 if (slept) {
1206 qemu_plugin_vcpu_resume_cb(cpu);
1209 #ifdef _WIN32
1210 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1211 if (!tcg_enabled()) {
1212 SleepEx(0, TRUE);
1214 #endif
1215 qemu_wait_io_event_common(cpu);
1218 static void *qemu_kvm_cpu_thread_fn(void *arg)
1220 CPUState *cpu = arg;
1221 int r;
1223 rcu_register_thread();
1225 qemu_mutex_lock_iothread();
1226 qemu_thread_get_self(cpu->thread);
1227 cpu->thread_id = qemu_get_thread_id();
1228 cpu->can_do_io = 1;
1229 current_cpu = cpu;
1231 r = kvm_init_vcpu(cpu);
1232 if (r < 0) {
1233 error_report("kvm_init_vcpu failed: %s", strerror(-r));
1234 exit(1);
1237 kvm_init_cpu_signals(cpu);
1239 /* signal CPU creation */
1240 cpu->created = true;
1241 qemu_cond_signal(&qemu_cpu_cond);
1242 qemu_guest_random_seed_thread_part2(cpu->random_seed);
1244 do {
1245 if (cpu_can_run(cpu)) {
1246 r = kvm_cpu_exec(cpu);
1247 if (r == EXCP_DEBUG) {
1248 cpu_handle_guest_debug(cpu);
1251 qemu_wait_io_event(cpu);
1252 } while (!cpu->unplug || cpu_can_run(cpu));
1254 qemu_kvm_destroy_vcpu(cpu);
1255 cpu->created = false;
1256 qemu_cond_signal(&qemu_cpu_cond);
1257 qemu_mutex_unlock_iothread();
1258 rcu_unregister_thread();
1259 return NULL;
1262 static void *qemu_dummy_cpu_thread_fn(void *arg)
1264 #ifdef _WIN32
1265 error_report("qtest is not supported under Windows");
1266 exit(1);
1267 #else
1268 CPUState *cpu = arg;
1269 sigset_t waitset;
1270 int r;
1272 rcu_register_thread();
1274 qemu_mutex_lock_iothread();
1275 qemu_thread_get_self(cpu->thread);
1276 cpu->thread_id = qemu_get_thread_id();
1277 cpu->can_do_io = 1;
1278 current_cpu = cpu;
1280 sigemptyset(&waitset);
1281 sigaddset(&waitset, SIG_IPI);
1283 /* signal CPU creation */
1284 cpu->created = true;
1285 qemu_cond_signal(&qemu_cpu_cond);
1286 qemu_guest_random_seed_thread_part2(cpu->random_seed);
1288 do {
1289 qemu_mutex_unlock_iothread();
1290 do {
1291 int sig;
1292 r = sigwait(&waitset, &sig);
1293 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1294 if (r == -1) {
1295 perror("sigwait");
1296 exit(1);
1298 qemu_mutex_lock_iothread();
1299 qemu_wait_io_event(cpu);
1300 } while (!cpu->unplug);
1302 qemu_mutex_unlock_iothread();
1303 rcu_unregister_thread();
1304 return NULL;
1305 #endif
1308 static int64_t tcg_get_icount_limit(void)
1310 int64_t deadline;
1312 if (replay_mode != REPLAY_MODE_PLAY) {
1314 * Include all the timers, because they may need an attention.
1315 * Too long CPU execution may create unnecessary delay in UI.
1317 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
1318 QEMU_TIMER_ATTR_ALL);
1319 /* Check realtime timers, because they help with input processing */
1320 deadline = qemu_soonest_timeout(deadline,
1321 qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
1322 QEMU_TIMER_ATTR_ALL));
1324 /* Maintain prior (possibly buggy) behaviour where if no deadline
1325 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1326 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1327 * nanoseconds.
1329 if ((deadline < 0) || (deadline > INT32_MAX)) {
1330 deadline = INT32_MAX;
1333 return qemu_icount_round(deadline);
1334 } else {
1335 return replay_get_instructions();
1339 static void handle_icount_deadline(void)
1341 assert(qemu_in_vcpu_thread());
1342 if (use_icount) {
1343 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
1344 QEMU_TIMER_ATTR_ALL);
1346 if (deadline == 0) {
1347 /* Wake up other AioContexts. */
1348 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1349 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
1354 static void prepare_icount_for_run(CPUState *cpu)
1356 if (use_icount) {
1357 int insns_left;
1359 /* These should always be cleared by process_icount_data after
1360 * each vCPU execution. However u16.high can be raised
1361 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1363 g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
1364 g_assert(cpu->icount_extra == 0);
1366 cpu->icount_budget = tcg_get_icount_limit();
1367 insns_left = MIN(0xffff, cpu->icount_budget);
1368 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
1369 cpu->icount_extra = cpu->icount_budget - insns_left;
1371 replay_mutex_lock();
1375 static void process_icount_data(CPUState *cpu)
1377 if (use_icount) {
1378 /* Account for executed instructions */
1379 cpu_update_icount(cpu);
1381 /* Reset the counters */
1382 cpu_neg(cpu)->icount_decr.u16.low = 0;
1383 cpu->icount_extra = 0;
1384 cpu->icount_budget = 0;
1386 replay_account_executed_instructions();
1388 replay_mutex_unlock();
1393 static int tcg_cpu_exec(CPUState *cpu)
1395 int ret;
1396 #ifdef CONFIG_PROFILER
1397 int64_t ti;
1398 #endif
1400 assert(tcg_enabled());
1401 #ifdef CONFIG_PROFILER
1402 ti = profile_getclock();
1403 #endif
1404 cpu_exec_start(cpu);
1405 ret = cpu_exec(cpu);
1406 cpu_exec_end(cpu);
1407 #ifdef CONFIG_PROFILER
1408 atomic_set(&tcg_ctx->prof.cpu_exec_time,
1409 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
1410 #endif
1411 return ret;
1414 /* Destroy any remaining vCPUs which have been unplugged and have
1415 * finished running
1417 static void deal_with_unplugged_cpus(void)
1419 CPUState *cpu;
1421 CPU_FOREACH(cpu) {
1422 if (cpu->unplug && !cpu_can_run(cpu)) {
1423 qemu_tcg_destroy_vcpu(cpu);
1424 cpu->created = false;
1425 qemu_cond_signal(&qemu_cpu_cond);
1426 break;
1431 /* Single-threaded TCG
1433 * In the single-threaded case each vCPU is simulated in turn. If
1434 * there is more than a single vCPU we create a simple timer to kick
1435 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1436 * This is done explicitly rather than relying on side-effects
1437 * elsewhere.
1440 static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
1442 CPUState *cpu = arg;
1444 assert(tcg_enabled());
1445 rcu_register_thread();
1446 tcg_register_thread();
1448 qemu_mutex_lock_iothread();
1449 qemu_thread_get_self(cpu->thread);
1451 cpu->thread_id = qemu_get_thread_id();
1452 cpu->created = true;
1453 cpu->can_do_io = 1;
1454 qemu_cond_signal(&qemu_cpu_cond);
1455 qemu_guest_random_seed_thread_part2(cpu->random_seed);
1457 /* wait for initial kick-off after machine start */
1458 while (first_cpu->stopped) {
1459 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1461 /* process any pending work */
1462 CPU_FOREACH(cpu) {
1463 current_cpu = cpu;
1464 qemu_wait_io_event_common(cpu);
1468 start_tcg_kick_timer();
1470 cpu = first_cpu;
1472 /* process any pending work */
1473 cpu->exit_request = 1;
1475 while (1) {
1476 qemu_mutex_unlock_iothread();
1477 replay_mutex_lock();
1478 qemu_mutex_lock_iothread();
1479 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1480 qemu_account_warp_timer();
1482 /* Run the timers here. This is much more efficient than
1483 * waking up the I/O thread and waiting for completion.
1485 handle_icount_deadline();
1487 replay_mutex_unlock();
1489 if (!cpu) {
1490 cpu = first_cpu;
1493 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1495 atomic_mb_set(&tcg_current_rr_cpu, cpu);
1496 current_cpu = cpu;
1498 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1499 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1501 if (cpu_can_run(cpu)) {
1502 int r;
1504 qemu_mutex_unlock_iothread();
1505 prepare_icount_for_run(cpu);
1507 r = tcg_cpu_exec(cpu);
1509 process_icount_data(cpu);
1510 qemu_mutex_lock_iothread();
1512 if (r == EXCP_DEBUG) {
1513 cpu_handle_guest_debug(cpu);
1514 break;
1515 } else if (r == EXCP_ATOMIC) {
1516 qemu_mutex_unlock_iothread();
1517 cpu_exec_step_atomic(cpu);
1518 qemu_mutex_lock_iothread();
1519 break;
1521 } else if (cpu->stop) {
1522 if (cpu->unplug) {
1523 cpu = CPU_NEXT(cpu);
1525 break;
1528 cpu = CPU_NEXT(cpu);
1529 } /* while (cpu && !cpu->exit_request).. */
1531 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1532 atomic_set(&tcg_current_rr_cpu, NULL);
1534 if (cpu && cpu->exit_request) {
1535 atomic_mb_set(&cpu->exit_request, 0);
1538 if (use_icount && all_cpu_threads_idle()) {
1540 * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
1541 * in the main_loop, wake it up in order to start the warp timer.
1543 qemu_notify_event();
1546 qemu_tcg_rr_wait_io_event();
1547 deal_with_unplugged_cpus();
1550 rcu_unregister_thread();
1551 return NULL;
1554 static void *qemu_hax_cpu_thread_fn(void *arg)
1556 CPUState *cpu = arg;
1557 int r;
1559 rcu_register_thread();
1560 qemu_mutex_lock_iothread();
1561 qemu_thread_get_self(cpu->thread);
1563 cpu->thread_id = qemu_get_thread_id();
1564 cpu->created = true;
1565 current_cpu = cpu;
1567 hax_init_vcpu(cpu);
1568 qemu_cond_signal(&qemu_cpu_cond);
1569 qemu_guest_random_seed_thread_part2(cpu->random_seed);
1571 do {
1572 if (cpu_can_run(cpu)) {
1573 r = hax_smp_cpu_exec(cpu);
1574 if (r == EXCP_DEBUG) {
1575 cpu_handle_guest_debug(cpu);
1579 qemu_wait_io_event(cpu);
1580 } while (!cpu->unplug || cpu_can_run(cpu));
1581 rcu_unregister_thread();
1582 return NULL;
1585 /* The HVF-specific vCPU thread function. This one should only run when the host
1586 * CPU supports the VMX "unrestricted guest" feature. */
1587 static void *qemu_hvf_cpu_thread_fn(void *arg)
1589 CPUState *cpu = arg;
1591 int r;
1593 assert(hvf_enabled());
1595 rcu_register_thread();
1597 qemu_mutex_lock_iothread();
1598 qemu_thread_get_self(cpu->thread);
1600 cpu->thread_id = qemu_get_thread_id();
1601 cpu->can_do_io = 1;
1602 current_cpu = cpu;
1604 hvf_init_vcpu(cpu);
1606 /* signal CPU creation */
1607 cpu->created = true;
1608 qemu_cond_signal(&qemu_cpu_cond);
1609 qemu_guest_random_seed_thread_part2(cpu->random_seed);
1611 do {
1612 if (cpu_can_run(cpu)) {
1613 r = hvf_vcpu_exec(cpu);
1614 if (r == EXCP_DEBUG) {
1615 cpu_handle_guest_debug(cpu);
1618 qemu_wait_io_event(cpu);
1619 } while (!cpu->unplug || cpu_can_run(cpu));
1621 hvf_vcpu_destroy(cpu);
1622 cpu->created = false;
1623 qemu_cond_signal(&qemu_cpu_cond);
1624 qemu_mutex_unlock_iothread();
1625 rcu_unregister_thread();
1626 return NULL;
1629 static void *qemu_whpx_cpu_thread_fn(void *arg)
1631 CPUState *cpu = arg;
1632 int r;
1634 rcu_register_thread();
1636 qemu_mutex_lock_iothread();
1637 qemu_thread_get_self(cpu->thread);
1638 cpu->thread_id = qemu_get_thread_id();
1639 current_cpu = cpu;
1641 r = whpx_init_vcpu(cpu);
1642 if (r < 0) {
1643 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1644 exit(1);
1647 /* signal CPU creation */
1648 cpu->created = true;
1649 qemu_cond_signal(&qemu_cpu_cond);
1650 qemu_guest_random_seed_thread_part2(cpu->random_seed);
1652 do {
1653 if (cpu_can_run(cpu)) {
1654 r = whpx_vcpu_exec(cpu);
1655 if (r == EXCP_DEBUG) {
1656 cpu_handle_guest_debug(cpu);
1659 while (cpu_thread_is_idle(cpu)) {
1660 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1662 qemu_wait_io_event_common(cpu);
1663 } while (!cpu->unplug || cpu_can_run(cpu));
1665 whpx_destroy_vcpu(cpu);
1666 cpu->created = false;
1667 qemu_cond_signal(&qemu_cpu_cond);
1668 qemu_mutex_unlock_iothread();
1669 rcu_unregister_thread();
1670 return NULL;
1673 #ifdef _WIN32
1674 static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1677 #endif
1679 /* Multi-threaded TCG
1681 * In the multi-threaded case each vCPU has its own thread. The TLS
1682 * variable current_cpu can be used deep in the code to find the
1683 * current CPUState for a given thread.
1686 static void *qemu_tcg_cpu_thread_fn(void *arg)
1688 CPUState *cpu = arg;
1690 assert(tcg_enabled());
1691 g_assert(!use_icount);
1693 rcu_register_thread();
1694 tcg_register_thread();
1696 qemu_mutex_lock_iothread();
1697 qemu_thread_get_self(cpu->thread);
1699 cpu->thread_id = qemu_get_thread_id();
1700 cpu->created = true;
1701 cpu->can_do_io = 1;
1702 current_cpu = cpu;
1703 qemu_cond_signal(&qemu_cpu_cond);
1704 qemu_guest_random_seed_thread_part2(cpu->random_seed);
1706 /* process any pending work */
1707 cpu->exit_request = 1;
1709 do {
1710 if (cpu_can_run(cpu)) {
1711 int r;
1712 qemu_mutex_unlock_iothread();
1713 r = tcg_cpu_exec(cpu);
1714 qemu_mutex_lock_iothread();
1715 switch (r) {
1716 case EXCP_DEBUG:
1717 cpu_handle_guest_debug(cpu);
1718 break;
1719 case EXCP_HALTED:
1720 /* during start-up the vCPU is reset and the thread is
1721 * kicked several times. If we don't ensure we go back
1722 * to sleep in the halted state we won't cleanly
1723 * start-up when the vCPU is enabled.
1725 * cpu->halted should ensure we sleep in wait_io_event
1727 g_assert(cpu->halted);
1728 break;
1729 case EXCP_ATOMIC:
1730 qemu_mutex_unlock_iothread();
1731 cpu_exec_step_atomic(cpu);
1732 qemu_mutex_lock_iothread();
1733 default:
1734 /* Ignore everything else? */
1735 break;
1739 atomic_mb_set(&cpu->exit_request, 0);
1740 qemu_wait_io_event(cpu);
1741 } while (!cpu->unplug || cpu_can_run(cpu));
1743 qemu_tcg_destroy_vcpu(cpu);
1744 cpu->created = false;
1745 qemu_cond_signal(&qemu_cpu_cond);
1746 qemu_mutex_unlock_iothread();
1747 rcu_unregister_thread();
1748 return NULL;
1751 static void qemu_cpu_kick_thread(CPUState *cpu)
1753 #ifndef _WIN32
1754 int err;
1756 if (cpu->thread_kicked) {
1757 return;
1759 cpu->thread_kicked = true;
1760 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1761 if (err && err != ESRCH) {
1762 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1763 exit(1);
1765 #else /* _WIN32 */
1766 if (!qemu_cpu_is_self(cpu)) {
1767 if (whpx_enabled()) {
1768 whpx_vcpu_kick(cpu);
1769 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
1770 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1771 __func__, GetLastError());
1772 exit(1);
1775 #endif
1778 void qemu_cpu_kick(CPUState *cpu)
1780 qemu_cond_broadcast(cpu->halt_cond);
1781 if (tcg_enabled()) {
1782 if (qemu_tcg_mttcg_enabled()) {
1783 cpu_exit(cpu);
1784 } else {
1785 qemu_cpu_kick_rr_cpus();
1787 } else {
1788 if (hax_enabled()) {
1790 * FIXME: race condition with the exit_request check in
1791 * hax_vcpu_hax_exec
1793 cpu->exit_request = 1;
1795 qemu_cpu_kick_thread(cpu);
1799 void qemu_cpu_kick_self(void)
1801 assert(current_cpu);
1802 qemu_cpu_kick_thread(current_cpu);
1805 bool qemu_cpu_is_self(CPUState *cpu)
1807 return qemu_thread_is_self(cpu->thread);
1810 bool qemu_in_vcpu_thread(void)
1812 return current_cpu && qemu_cpu_is_self(current_cpu);
1815 static __thread bool iothread_locked = false;
1817 bool qemu_mutex_iothread_locked(void)
1819 return iothread_locked;
1823 * The BQL is taken from so many places that it is worth profiling the
1824 * callers directly, instead of funneling them all through a single function.
1826 void qemu_mutex_lock_iothread_impl(const char *file, int line)
1828 QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
1830 g_assert(!qemu_mutex_iothread_locked());
1831 bql_lock(&qemu_global_mutex, file, line);
1832 iothread_locked = true;
1835 void qemu_mutex_unlock_iothread(void)
1837 g_assert(qemu_mutex_iothread_locked());
1838 iothread_locked = false;
1839 qemu_mutex_unlock(&qemu_global_mutex);
1842 void qemu_cond_wait_iothread(QemuCond *cond)
1844 qemu_cond_wait(cond, &qemu_global_mutex);
1847 static bool all_vcpus_paused(void)
1849 CPUState *cpu;
1851 CPU_FOREACH(cpu) {
1852 if (!cpu->stopped) {
1853 return false;
1857 return true;
1860 void pause_all_vcpus(void)
1862 CPUState *cpu;
1864 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1865 CPU_FOREACH(cpu) {
1866 if (qemu_cpu_is_self(cpu)) {
1867 qemu_cpu_stop(cpu, true);
1868 } else {
1869 cpu->stop = true;
1870 qemu_cpu_kick(cpu);
1874 /* We need to drop the replay_lock so any vCPU threads woken up
1875 * can finish their replay tasks
1877 replay_mutex_unlock();
1879 while (!all_vcpus_paused()) {
1880 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1881 CPU_FOREACH(cpu) {
1882 qemu_cpu_kick(cpu);
1886 qemu_mutex_unlock_iothread();
1887 replay_mutex_lock();
1888 qemu_mutex_lock_iothread();
1891 void cpu_resume(CPUState *cpu)
1893 cpu->stop = false;
1894 cpu->stopped = false;
1895 qemu_cpu_kick(cpu);
1898 void resume_all_vcpus(void)
1900 CPUState *cpu;
1902 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1903 CPU_FOREACH(cpu) {
1904 cpu_resume(cpu);
1908 void cpu_remove_sync(CPUState *cpu)
1910 cpu->stop = true;
1911 cpu->unplug = true;
1912 qemu_cpu_kick(cpu);
1913 qemu_mutex_unlock_iothread();
1914 qemu_thread_join(cpu->thread);
1915 qemu_mutex_lock_iothread();
1918 /* For temporary buffers for forming a name */
1919 #define VCPU_THREAD_NAME_SIZE 16
1921 static void qemu_tcg_init_vcpu(CPUState *cpu)
1923 char thread_name[VCPU_THREAD_NAME_SIZE];
1924 static QemuCond *single_tcg_halt_cond;
1925 static QemuThread *single_tcg_cpu_thread;
1926 static int tcg_region_inited;
1928 assert(tcg_enabled());
1930 * Initialize TCG regions--once. Now is a good time, because:
1931 * (1) TCG's init context, prologue and target globals have been set up.
1932 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1933 * -accel flag is processed, so the check doesn't work then).
1935 if (!tcg_region_inited) {
1936 tcg_region_inited = 1;
1937 tcg_region_init();
1940 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
1941 cpu->thread = g_malloc0(sizeof(QemuThread));
1942 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1943 qemu_cond_init(cpu->halt_cond);
1945 if (qemu_tcg_mttcg_enabled()) {
1946 /* create a thread per vCPU with TCG (MTTCG) */
1947 parallel_cpus = true;
1948 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1949 cpu->cpu_index);
1951 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1952 cpu, QEMU_THREAD_JOINABLE);
1954 } else {
1955 /* share a single thread for all cpus with TCG */
1956 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1957 qemu_thread_create(cpu->thread, thread_name,
1958 qemu_tcg_rr_cpu_thread_fn,
1959 cpu, QEMU_THREAD_JOINABLE);
1961 single_tcg_halt_cond = cpu->halt_cond;
1962 single_tcg_cpu_thread = cpu->thread;
1964 #ifdef _WIN32
1965 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1966 #endif
1967 } else {
1968 /* For non-MTTCG cases we share the thread */
1969 cpu->thread = single_tcg_cpu_thread;
1970 cpu->halt_cond = single_tcg_halt_cond;
1971 cpu->thread_id = first_cpu->thread_id;
1972 cpu->can_do_io = 1;
1973 cpu->created = true;
1977 static void qemu_hax_start_vcpu(CPUState *cpu)
1979 char thread_name[VCPU_THREAD_NAME_SIZE];
1981 cpu->thread = g_malloc0(sizeof(QemuThread));
1982 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1983 qemu_cond_init(cpu->halt_cond);
1985 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1986 cpu->cpu_index);
1987 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1988 cpu, QEMU_THREAD_JOINABLE);
1989 #ifdef _WIN32
1990 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1991 #endif
1994 static void qemu_kvm_start_vcpu(CPUState *cpu)
1996 char thread_name[VCPU_THREAD_NAME_SIZE];
1998 cpu->thread = g_malloc0(sizeof(QemuThread));
1999 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2000 qemu_cond_init(cpu->halt_cond);
2001 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
2002 cpu->cpu_index);
2003 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
2004 cpu, QEMU_THREAD_JOINABLE);
2007 static void qemu_hvf_start_vcpu(CPUState *cpu)
2009 char thread_name[VCPU_THREAD_NAME_SIZE];
2011 /* HVF currently does not support TCG, and only runs in
2012 * unrestricted-guest mode. */
2013 assert(hvf_enabled());
2015 cpu->thread = g_malloc0(sizeof(QemuThread));
2016 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2017 qemu_cond_init(cpu->halt_cond);
2019 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
2020 cpu->cpu_index);
2021 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
2022 cpu, QEMU_THREAD_JOINABLE);
2025 static void qemu_whpx_start_vcpu(CPUState *cpu)
2027 char thread_name[VCPU_THREAD_NAME_SIZE];
2029 cpu->thread = g_malloc0(sizeof(QemuThread));
2030 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2031 qemu_cond_init(cpu->halt_cond);
2032 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
2033 cpu->cpu_index);
2034 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
2035 cpu, QEMU_THREAD_JOINABLE);
2036 #ifdef _WIN32
2037 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2038 #endif
2041 static void qemu_dummy_start_vcpu(CPUState *cpu)
2043 char thread_name[VCPU_THREAD_NAME_SIZE];
2045 cpu->thread = g_malloc0(sizeof(QemuThread));
2046 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2047 qemu_cond_init(cpu->halt_cond);
2048 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
2049 cpu->cpu_index);
2050 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
2051 QEMU_THREAD_JOINABLE);
2054 void qemu_init_vcpu(CPUState *cpu)
2056 MachineState *ms = MACHINE(qdev_get_machine());
2058 cpu->nr_cores = ms->smp.cores;
2059 cpu->nr_threads = ms->smp.threads;
2060 cpu->stopped = true;
2061 cpu->random_seed = qemu_guest_random_seed_thread_part1();
2063 if (!cpu->as) {
2064 /* If the target cpu hasn't set up any address spaces itself,
2065 * give it the default one.
2067 cpu->num_ases = 1;
2068 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
2071 if (kvm_enabled()) {
2072 qemu_kvm_start_vcpu(cpu);
2073 } else if (hax_enabled()) {
2074 qemu_hax_start_vcpu(cpu);
2075 } else if (hvf_enabled()) {
2076 qemu_hvf_start_vcpu(cpu);
2077 } else if (tcg_enabled()) {
2078 qemu_tcg_init_vcpu(cpu);
2079 } else if (whpx_enabled()) {
2080 qemu_whpx_start_vcpu(cpu);
2081 } else {
2082 qemu_dummy_start_vcpu(cpu);
2085 while (!cpu->created) {
2086 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2090 void cpu_stop_current(void)
2092 if (current_cpu) {
2093 current_cpu->stop = true;
2094 cpu_exit(current_cpu);
2098 int vm_stop(RunState state)
2100 if (qemu_in_vcpu_thread()) {
2101 qemu_system_vmstop_request_prepare();
2102 qemu_system_vmstop_request(state);
2104 * FIXME: should not return to device code in case
2105 * vm_stop() has been requested.
2107 cpu_stop_current();
2108 return 0;
2111 return do_vm_stop(state, true);
2115 * Prepare for (re)starting the VM.
2116 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2117 * running or in case of an error condition), 0 otherwise.
2119 int vm_prepare_start(void)
2121 RunState requested;
2123 qemu_vmstop_requested(&requested);
2124 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2125 return -1;
2128 /* Ensure that a STOP/RESUME pair of events is emitted if a
2129 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2130 * example, according to documentation is always followed by
2131 * the STOP event.
2133 if (runstate_is_running()) {
2134 qapi_event_send_stop();
2135 qapi_event_send_resume();
2136 return -1;
2139 /* We are sending this now, but the CPUs will be resumed shortly later */
2140 qapi_event_send_resume();
2142 cpu_enable_ticks();
2143 runstate_set(RUN_STATE_RUNNING);
2144 vm_state_notify(1, RUN_STATE_RUNNING);
2145 return 0;
2148 void vm_start(void)
2150 if (!vm_prepare_start()) {
2151 resume_all_vcpus();
2155 /* does a state transition even if the VM is already stopped,
2156 current state is forgotten forever */
2157 int vm_stop_force_state(RunState state)
2159 if (runstate_is_running()) {
2160 return vm_stop(state);
2161 } else {
2162 runstate_set(state);
2164 bdrv_drain_all();
2165 /* Make sure to return an error if the flush in a previous vm_stop()
2166 * failed. */
2167 return bdrv_flush_all();
2171 void list_cpus(const char *optarg)
2173 /* XXX: implement xxx_cpu_list for targets that still miss it */
2174 #if defined(cpu_list)
2175 cpu_list();
2176 #endif
2179 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2180 bool has_cpu, int64_t cpu_index, Error **errp)
2182 FILE *f;
2183 uint32_t l;
2184 CPUState *cpu;
2185 uint8_t buf[1024];
2186 int64_t orig_addr = addr, orig_size = size;
2188 if (!has_cpu) {
2189 cpu_index = 0;
2192 cpu = qemu_get_cpu(cpu_index);
2193 if (cpu == NULL) {
2194 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2195 "a CPU number");
2196 return;
2199 f = fopen(filename, "wb");
2200 if (!f) {
2201 error_setg_file_open(errp, errno, filename);
2202 return;
2205 while (size != 0) {
2206 l = sizeof(buf);
2207 if (l > size)
2208 l = size;
2209 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
2210 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2211 " specified", orig_addr, orig_size);
2212 goto exit;
2214 if (fwrite(buf, 1, l, f) != l) {
2215 error_setg(errp, QERR_IO_ERROR);
2216 goto exit;
2218 addr += l;
2219 size -= l;
2222 exit:
2223 fclose(f);
2226 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2227 Error **errp)
2229 FILE *f;
2230 uint32_t l;
2231 uint8_t buf[1024];
2233 f = fopen(filename, "wb");
2234 if (!f) {
2235 error_setg_file_open(errp, errno, filename);
2236 return;
2239 while (size != 0) {
2240 l = sizeof(buf);
2241 if (l > size)
2242 l = size;
2243 cpu_physical_memory_read(addr, buf, l);
2244 if (fwrite(buf, 1, l, f) != l) {
2245 error_setg(errp, QERR_IO_ERROR);
2246 goto exit;
2248 addr += l;
2249 size -= l;
2252 exit:
2253 fclose(f);
2256 void qmp_inject_nmi(Error **errp)
2258 nmi_monitor_handle(monitor_get_cpu_index(), errp);
2261 void dump_drift_info(void)
2263 if (!use_icount) {
2264 return;
2267 qemu_printf("Host - Guest clock %"PRIi64" ms\n",
2268 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2269 if (icount_align_option) {
2270 qemu_printf("Max guest delay %"PRIi64" ms\n",
2271 -max_delay / SCALE_MS);
2272 qemu_printf("Max guest advance %"PRIi64" ms\n",
2273 max_advance / SCALE_MS);
2274 } else {
2275 qemu_printf("Max guest delay NA\n");
2276 qemu_printf("Max guest advance NA\n");