qcow2: Add subcluster support to zero_in_l2_slice()
[qemu.git] / monitor / monitor.c
blobb385a3d56978339e5a66da0e623cfcda56384d52
1 /*
2 * QEMU monitor
4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "monitor-internal.h"
27 #include "qapi/error.h"
28 #include "qapi/opts-visitor.h"
29 #include "qapi/qapi-emit-events.h"
30 #include "qapi/qapi-visit-control.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qapi/qmp/qstring.h"
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "sysemu/qtest.h"
36 #include "sysemu/sysemu.h"
37 #include "trace.h"
40 * To prevent flooding clients, events can be throttled. The
41 * throttling is calculated globally, rather than per-Monitor
42 * instance.
44 typedef struct MonitorQAPIEventState {
45 QAPIEvent event; /* Throttling state for this event type and... */
46 QDict *data; /* ... data, see qapi_event_throttle_equal() */
47 QEMUTimer *timer; /* Timer for handling delayed events */
48 QDict *qdict; /* Delayed event (if any) */
49 } MonitorQAPIEventState;
51 typedef struct {
52 int64_t rate; /* Minimum time (in ns) between two events */
53 } MonitorQAPIEventConf;
55 /* Shared monitor I/O thread */
56 IOThread *mon_iothread;
58 /* Bottom half to dispatch the requests received from I/O thread */
59 QEMUBH *qmp_dispatcher_bh;
61 /* Protects mon_list, monitor_qapi_event_state, monitor_destroyed. */
62 QemuMutex monitor_lock;
63 static GHashTable *monitor_qapi_event_state;
65 MonitorList mon_list;
66 int mon_refcount;
67 static bool monitor_destroyed;
69 __thread Monitor *cur_mon;
71 /**
72 * Is the current monitor, if any, a QMP monitor?
74 bool monitor_cur_is_qmp(void)
76 return cur_mon && monitor_is_qmp(cur_mon);
79 /**
80 * Is @mon is using readline?
81 * Note: not all HMP monitors use readline, e.g., gdbserver has a
82 * non-interactive HMP monitor, so readline is not used there.
84 static inline bool monitor_uses_readline(const MonitorHMP *mon)
86 return mon->use_readline;
89 static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
91 if (monitor_is_qmp(mon)) {
92 return false;
95 return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
98 static void monitor_flush_locked(Monitor *mon);
100 static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
101 void *opaque)
103 Monitor *mon = opaque;
105 qemu_mutex_lock(&mon->mon_lock);
106 mon->out_watch = 0;
107 monitor_flush_locked(mon);
108 qemu_mutex_unlock(&mon->mon_lock);
109 return FALSE;
112 /* Caller must hold mon->mon_lock */
113 static void monitor_flush_locked(Monitor *mon)
115 int rc;
116 size_t len;
117 const char *buf;
119 if (mon->skip_flush) {
120 return;
123 buf = qstring_get_str(mon->outbuf);
124 len = qstring_get_length(mon->outbuf);
126 if (len && !mon->mux_out) {
127 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
128 if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
129 /* all flushed or error */
130 qobject_unref(mon->outbuf);
131 mon->outbuf = qstring_new();
132 return;
134 if (rc > 0) {
135 /* partial write */
136 QString *tmp = qstring_from_str(buf + rc);
137 qobject_unref(mon->outbuf);
138 mon->outbuf = tmp;
140 if (mon->out_watch == 0) {
141 mon->out_watch =
142 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
143 monitor_unblocked, mon);
148 void monitor_flush(Monitor *mon)
150 qemu_mutex_lock(&mon->mon_lock);
151 monitor_flush_locked(mon);
152 qemu_mutex_unlock(&mon->mon_lock);
155 /* flush at every end of line */
156 int monitor_puts(Monitor *mon, const char *str)
158 int i;
159 char c;
161 qemu_mutex_lock(&mon->mon_lock);
162 for (i = 0; str[i]; i++) {
163 c = str[i];
164 if (c == '\n') {
165 qstring_append_chr(mon->outbuf, '\r');
167 qstring_append_chr(mon->outbuf, c);
168 if (c == '\n') {
169 monitor_flush_locked(mon);
172 qemu_mutex_unlock(&mon->mon_lock);
174 return i;
177 int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
179 char *buf;
180 int n;
182 if (!mon) {
183 return -1;
186 if (monitor_is_qmp(mon)) {
187 return -1;
190 buf = g_strdup_vprintf(fmt, ap);
191 n = monitor_puts(mon, buf);
192 g_free(buf);
193 return n;
196 int monitor_printf(Monitor *mon, const char *fmt, ...)
198 int ret;
200 va_list ap;
201 va_start(ap, fmt);
202 ret = monitor_vprintf(mon, fmt, ap);
203 va_end(ap);
204 return ret;
208 * Print to current monitor if we have one, else to stderr.
210 int error_vprintf(const char *fmt, va_list ap)
212 if (cur_mon && !monitor_cur_is_qmp()) {
213 return monitor_vprintf(cur_mon, fmt, ap);
215 return vfprintf(stderr, fmt, ap);
218 int error_vprintf_unless_qmp(const char *fmt, va_list ap)
220 if (!cur_mon) {
221 return vfprintf(stderr, fmt, ap);
223 if (!monitor_cur_is_qmp()) {
224 return monitor_vprintf(cur_mon, fmt, ap);
226 return -1;
230 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
231 /* Limit guest-triggerable events to 1 per second */
232 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
233 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
234 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
235 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
236 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS },
237 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
238 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS },
242 * Return the clock to use for recording an event's time.
243 * It's QEMU_CLOCK_REALTIME, except for qtests it's
244 * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
245 * Beware: result is invalid before configure_accelerator().
247 static inline QEMUClockType monitor_get_event_clock(void)
249 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
253 * Broadcast an event to all monitors.
254 * @qdict is the event object. Its member "event" must match @event.
255 * Caller must hold monitor_lock.
257 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
259 Monitor *mon;
260 MonitorQMP *qmp_mon;
262 trace_monitor_protocol_event_emit(event, qdict);
263 QTAILQ_FOREACH(mon, &mon_list, entry) {
264 if (!monitor_is_qmp(mon)) {
265 continue;
268 qmp_mon = container_of(mon, MonitorQMP, common);
269 if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
270 qmp_send_response(qmp_mon, qdict);
275 static void monitor_qapi_event_handler(void *opaque);
278 * Queue a new event for emission to Monitor instances,
279 * applying any rate limiting if required.
281 static void
282 monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
284 MonitorQAPIEventConf *evconf;
285 MonitorQAPIEventState *evstate;
287 assert(event < QAPI_EVENT__MAX);
288 evconf = &monitor_qapi_event_conf[event];
289 trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
291 qemu_mutex_lock(&monitor_lock);
293 if (!evconf->rate) {
294 /* Unthrottled event */
295 monitor_qapi_event_emit(event, qdict);
296 } else {
297 QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
298 MonitorQAPIEventState key = { .event = event, .data = data };
300 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
301 assert(!evstate || timer_pending(evstate->timer));
303 if (evstate) {
305 * Timer is pending for (at least) evconf->rate ns after
306 * last send. Store event for sending when timer fires,
307 * replacing a prior stored event if any.
309 qobject_unref(evstate->qdict);
310 evstate->qdict = qobject_ref(qdict);
311 } else {
313 * Last send was (at least) evconf->rate ns ago.
314 * Send immediately, and arm the timer to call
315 * monitor_qapi_event_handler() in evconf->rate ns. Any
316 * events arriving before then will be delayed until then.
318 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
320 monitor_qapi_event_emit(event, qdict);
322 evstate = g_new(MonitorQAPIEventState, 1);
323 evstate->event = event;
324 evstate->data = qobject_ref(data);
325 evstate->qdict = NULL;
326 evstate->timer = timer_new_ns(monitor_get_event_clock(),
327 monitor_qapi_event_handler,
328 evstate);
329 g_hash_table_add(monitor_qapi_event_state, evstate);
330 timer_mod_ns(evstate->timer, now + evconf->rate);
334 qemu_mutex_unlock(&monitor_lock);
337 void qapi_event_emit(QAPIEvent event, QDict *qdict)
340 * monitor_qapi_event_queue_no_reenter() is not reentrant: it
341 * would deadlock on monitor_lock. Work around by queueing
342 * events in thread-local storage.
343 * TODO: remove this, make it re-enter safe.
345 typedef struct MonitorQapiEvent {
346 QAPIEvent event;
347 QDict *qdict;
348 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
349 } MonitorQapiEvent;
350 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
351 static __thread bool reentered;
352 MonitorQapiEvent *ev;
354 if (!reentered) {
355 QSIMPLEQ_INIT(&event_queue);
358 ev = g_new(MonitorQapiEvent, 1);
359 ev->qdict = qobject_ref(qdict);
360 ev->event = event;
361 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
362 if (reentered) {
363 return;
366 reentered = true;
368 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
369 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
370 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
371 qobject_unref(ev->qdict);
372 g_free(ev);
375 reentered = false;
379 * This function runs evconf->rate ns after sending a throttled
380 * event.
381 * If another event has since been stored, send it.
383 static void monitor_qapi_event_handler(void *opaque)
385 MonitorQAPIEventState *evstate = opaque;
386 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
388 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
389 qemu_mutex_lock(&monitor_lock);
391 if (evstate->qdict) {
392 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
394 monitor_qapi_event_emit(evstate->event, evstate->qdict);
395 qobject_unref(evstate->qdict);
396 evstate->qdict = NULL;
397 timer_mod_ns(evstate->timer, now + evconf->rate);
398 } else {
399 g_hash_table_remove(monitor_qapi_event_state, evstate);
400 qobject_unref(evstate->data);
401 timer_free(evstate->timer);
402 g_free(evstate);
405 qemu_mutex_unlock(&monitor_lock);
408 static unsigned int qapi_event_throttle_hash(const void *key)
410 const MonitorQAPIEventState *evstate = key;
411 unsigned int hash = evstate->event * 255;
413 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
414 hash += g_str_hash(qdict_get_str(evstate->data, "id"));
417 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
418 hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
421 return hash;
424 static gboolean qapi_event_throttle_equal(const void *a, const void *b)
426 const MonitorQAPIEventState *eva = a;
427 const MonitorQAPIEventState *evb = b;
429 if (eva->event != evb->event) {
430 return FALSE;
433 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
434 return !strcmp(qdict_get_str(eva->data, "id"),
435 qdict_get_str(evb->data, "id"));
438 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
439 return !strcmp(qdict_get_str(eva->data, "node-name"),
440 qdict_get_str(evb->data, "node-name"));
443 return TRUE;
446 int monitor_suspend(Monitor *mon)
448 if (monitor_is_hmp_non_interactive(mon)) {
449 return -ENOTTY;
452 atomic_inc(&mon->suspend_cnt);
454 if (mon->use_io_thread) {
456 * Kick I/O thread to make sure this takes effect. It'll be
457 * evaluated again in prepare() of the watch object.
459 aio_notify(iothread_get_aio_context(mon_iothread));
462 trace_monitor_suspend(mon, 1);
463 return 0;
466 static void monitor_accept_input(void *opaque)
468 Monitor *mon = opaque;
470 qemu_chr_fe_accept_input(&mon->chr);
473 void monitor_resume(Monitor *mon)
475 if (monitor_is_hmp_non_interactive(mon)) {
476 return;
479 if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
480 AioContext *ctx;
482 if (mon->use_io_thread) {
483 ctx = iothread_get_aio_context(mon_iothread);
484 } else {
485 ctx = qemu_get_aio_context();
488 if (!monitor_is_qmp(mon)) {
489 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
490 assert(hmp_mon->rs);
491 readline_show_prompt(hmp_mon->rs);
494 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
497 trace_monitor_suspend(mon, -1);
500 int monitor_can_read(void *opaque)
502 Monitor *mon = opaque;
504 return !atomic_mb_read(&mon->suspend_cnt);
507 void monitor_list_append(Monitor *mon)
509 qemu_mutex_lock(&monitor_lock);
511 * This prevents inserting new monitors during monitor_cleanup().
512 * A cleaner solution would involve the main thread telling other
513 * threads to terminate, waiting for their termination.
515 if (!monitor_destroyed) {
516 QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
517 mon = NULL;
519 qemu_mutex_unlock(&monitor_lock);
521 if (mon) {
522 monitor_data_destroy(mon);
523 g_free(mon);
527 static void monitor_iothread_init(void)
529 mon_iothread = iothread_create("mon_iothread", &error_abort);
532 void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
533 bool use_io_thread)
535 if (use_io_thread && !mon_iothread) {
536 monitor_iothread_init();
538 qemu_mutex_init(&mon->mon_lock);
539 mon->is_qmp = is_qmp;
540 mon->outbuf = qstring_new();
541 mon->skip_flush = skip_flush;
542 mon->use_io_thread = use_io_thread;
545 void monitor_data_destroy(Monitor *mon)
547 g_free(mon->mon_cpu_path);
548 qemu_chr_fe_deinit(&mon->chr, false);
549 if (monitor_is_qmp(mon)) {
550 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
551 } else {
552 readline_free(container_of(mon, MonitorHMP, common)->rs);
554 qobject_unref(mon->outbuf);
555 qemu_mutex_destroy(&mon->mon_lock);
558 void monitor_cleanup(void)
561 * We need to explicitly stop the I/O thread (but not destroy it),
562 * clean up the monitor resources, then destroy the I/O thread since
563 * we need to unregister from chardev below in
564 * monitor_data_destroy(), and chardev is not thread-safe yet
566 if (mon_iothread) {
567 iothread_stop(mon_iothread);
570 /* Flush output buffers and destroy monitors */
571 qemu_mutex_lock(&monitor_lock);
572 monitor_destroyed = true;
573 while (!QTAILQ_EMPTY(&mon_list)) {
574 Monitor *mon = QTAILQ_FIRST(&mon_list);
575 QTAILQ_REMOVE(&mon_list, mon, entry);
576 /* Permit QAPI event emission from character frontend release */
577 qemu_mutex_unlock(&monitor_lock);
578 monitor_flush(mon);
579 monitor_data_destroy(mon);
580 qemu_mutex_lock(&monitor_lock);
581 g_free(mon);
583 qemu_mutex_unlock(&monitor_lock);
585 /* QEMUBHs needs to be deleted before destroying the I/O thread */
586 qemu_bh_delete(qmp_dispatcher_bh);
587 qmp_dispatcher_bh = NULL;
588 if (mon_iothread) {
589 iothread_destroy(mon_iothread);
590 mon_iothread = NULL;
594 static void monitor_qapi_event_init(void)
596 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
597 qapi_event_throttle_equal);
600 void monitor_init_globals_core(void)
602 monitor_qapi_event_init();
603 qemu_mutex_init(&monitor_lock);
606 * The dispatcher BH must run in the main loop thread, since we
607 * have commands assuming that context. It would be nice to get
608 * rid of those assumptions.
610 qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
611 monitor_qmp_bh_dispatcher,
612 NULL);
615 int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp)
617 Chardev *chr;
618 Error *local_err = NULL;
620 chr = qemu_chr_find(opts->chardev);
621 if (chr == NULL) {
622 error_setg(errp, "chardev \"%s\" not found", opts->chardev);
623 return -1;
626 if (!opts->has_mode) {
627 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL;
630 switch (opts->mode) {
631 case MONITOR_MODE_CONTROL:
632 monitor_init_qmp(chr, opts->pretty, &local_err);
633 break;
634 case MONITOR_MODE_READLINE:
635 if (!allow_hmp) {
636 error_setg(errp, "Only QMP is supported");
637 return -1;
639 if (opts->pretty) {
640 warn_report("'pretty' is deprecated for HMP monitors, it has no "
641 "effect and will be removed in future versions");
643 monitor_init_hmp(chr, true, &local_err);
644 break;
645 default:
646 g_assert_not_reached();
649 if (local_err) {
650 error_propagate(errp, local_err);
651 return -1;
653 return 0;
656 int monitor_init_opts(QemuOpts *opts, Error **errp)
658 Visitor *v;
659 MonitorOptions *options;
660 int ret;
662 v = opts_visitor_new(opts);
663 visit_type_MonitorOptions(v, NULL, &options, errp);
664 visit_free(v);
665 if (!options) {
666 return -1;
669 ret = monitor_init(options, true, errp);
670 qapi_free_MonitorOptions(options);
671 return ret;
674 QemuOptsList qemu_mon_opts = {
675 .name = "mon",
676 .implied_opt_name = "chardev",
677 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
678 .desc = {
680 .name = "mode",
681 .type = QEMU_OPT_STRING,
683 .name = "chardev",
684 .type = QEMU_OPT_STRING,
686 .name = "pretty",
687 .type = QEMU_OPT_BOOL,
689 { /* end of list */ }