Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-6.1-pull-request...
[qemu/ar7.git] / monitor / monitor.c
blobb90c0f40516f1404396da9deb0e880ddce23158b
1 /*
2 * QEMU monitor
4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "monitor-internal.h"
27 #include "qapi/error.h"
28 #include "qapi/opts-visitor.h"
29 #include "qapi/qapi-emit-events.h"
30 #include "qapi/qapi-visit-control.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qemu/error-report.h"
33 #include "qemu/option.h"
34 #include "sysemu/qtest.h"
35 #include "trace.h"
38 * To prevent flooding clients, events can be throttled. The
39 * throttling is calculated globally, rather than per-Monitor
40 * instance.
42 typedef struct MonitorQAPIEventState {
43 QAPIEvent event; /* Throttling state for this event type and... */
44 QDict *data; /* ... data, see qapi_event_throttle_equal() */
45 QEMUTimer *timer; /* Timer for handling delayed events */
46 QDict *qdict; /* Delayed event (if any) */
47 } MonitorQAPIEventState;
49 typedef struct {
50 int64_t rate; /* Minimum time (in ns) between two events */
51 } MonitorQAPIEventConf;
53 /* Shared monitor I/O thread */
54 IOThread *mon_iothread;
56 /* Coroutine to dispatch the requests received from I/O thread */
57 Coroutine *qmp_dispatcher_co;
59 /* Set to true when the dispatcher coroutine should terminate */
60 bool qmp_dispatcher_co_shutdown;
63 * qmp_dispatcher_co_busy is used for synchronisation between the
64 * monitor thread and the main thread to ensure that the dispatcher
65 * coroutine never gets scheduled a second time when it's already
66 * scheduled (scheduling the same coroutine twice is forbidden).
68 * It is true if the coroutine is active and processing requests.
69 * Additional requests may then be pushed onto mon->qmp_requests,
70 * and @qmp_dispatcher_co_shutdown may be set without further ado.
71 * @qmp_dispatcher_co_busy must not be woken up in this case.
73 * If false, you also have to set @qmp_dispatcher_co_busy to true and
74 * wake up @qmp_dispatcher_co after pushing the new requests.
76 * The coroutine will automatically change this variable back to false
77 * before it yields. Nobody else may set the variable to false.
79 * Access must be atomic for thread safety.
81 bool qmp_dispatcher_co_busy;
84 * Protects mon_list, monitor_qapi_event_state, coroutine_mon,
85 * monitor_destroyed.
87 QemuMutex monitor_lock;
88 static GHashTable *monitor_qapi_event_state;
89 static GHashTable *coroutine_mon; /* Maps Coroutine* to Monitor* */
91 MonitorList mon_list;
92 int mon_refcount;
93 static bool monitor_destroyed;
95 Monitor *monitor_cur(void)
97 Monitor *mon;
99 qemu_mutex_lock(&monitor_lock);
100 mon = g_hash_table_lookup(coroutine_mon, qemu_coroutine_self());
101 qemu_mutex_unlock(&monitor_lock);
103 return mon;
107 * Sets a new current monitor and returns the old one.
109 * If a non-NULL monitor is set for a coroutine, another call
110 * resetting it to NULL is required before the coroutine terminates,
111 * otherwise a stale entry would remain in the hash table.
113 Monitor *monitor_set_cur(Coroutine *co, Monitor *mon)
115 Monitor *old_monitor = monitor_cur();
117 qemu_mutex_lock(&monitor_lock);
118 if (mon) {
119 g_hash_table_replace(coroutine_mon, co, mon);
120 } else {
121 g_hash_table_remove(coroutine_mon, co);
123 qemu_mutex_unlock(&monitor_lock);
125 return old_monitor;
129 * Is the current monitor, if any, a QMP monitor?
131 bool monitor_cur_is_qmp(void)
133 Monitor *cur_mon = monitor_cur();
135 return cur_mon && monitor_is_qmp(cur_mon);
139 * Is @mon is using readline?
140 * Note: not all HMP monitors use readline, e.g., gdbserver has a
141 * non-interactive HMP monitor, so readline is not used there.
143 static inline bool monitor_uses_readline(const MonitorHMP *mon)
145 return mon->use_readline;
148 static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
150 if (monitor_is_qmp(mon)) {
151 return false;
154 return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
157 static void monitor_flush_locked(Monitor *mon);
159 static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
160 void *opaque)
162 Monitor *mon = opaque;
164 qemu_mutex_lock(&mon->mon_lock);
165 mon->out_watch = 0;
166 monitor_flush_locked(mon);
167 qemu_mutex_unlock(&mon->mon_lock);
168 return FALSE;
171 /* Caller must hold mon->mon_lock */
172 static void monitor_flush_locked(Monitor *mon)
174 int rc;
175 size_t len;
176 const char *buf;
178 if (mon->skip_flush) {
179 return;
182 buf = mon->outbuf->str;
183 len = mon->outbuf->len;
185 if (len && !mon->mux_out) {
186 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
187 if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
188 /* all flushed or error */
189 g_string_truncate(mon->outbuf, 0);
190 return;
192 if (rc > 0) {
193 /* partial write */
194 g_string_erase(mon->outbuf, 0, rc);
196 if (mon->out_watch == 0) {
197 mon->out_watch =
198 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
199 monitor_unblocked, mon);
204 void monitor_flush(Monitor *mon)
206 qemu_mutex_lock(&mon->mon_lock);
207 monitor_flush_locked(mon);
208 qemu_mutex_unlock(&mon->mon_lock);
211 /* flush at every end of line */
212 int monitor_puts(Monitor *mon, const char *str)
214 int i;
215 char c;
217 qemu_mutex_lock(&mon->mon_lock);
218 for (i = 0; str[i]; i++) {
219 c = str[i];
220 if (c == '\n') {
221 g_string_append_c(mon->outbuf, '\r');
223 g_string_append_c(mon->outbuf, c);
224 if (c == '\n') {
225 monitor_flush_locked(mon);
228 qemu_mutex_unlock(&mon->mon_lock);
230 return i;
233 int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
235 char *buf;
236 int n;
238 if (!mon) {
239 return -1;
242 if (monitor_is_qmp(mon)) {
243 return -1;
246 buf = g_strdup_vprintf(fmt, ap);
247 n = monitor_puts(mon, buf);
248 g_free(buf);
249 return n;
252 int monitor_printf(Monitor *mon, const char *fmt, ...)
254 int ret;
256 va_list ap;
257 va_start(ap, fmt);
258 ret = monitor_vprintf(mon, fmt, ap);
259 va_end(ap);
260 return ret;
264 * Print to current monitor if we have one, else to stderr.
266 int error_vprintf(const char *fmt, va_list ap)
268 Monitor *cur_mon = monitor_cur();
270 if (cur_mon && !monitor_cur_is_qmp()) {
271 return monitor_vprintf(cur_mon, fmt, ap);
273 return vfprintf(stderr, fmt, ap);
276 int error_vprintf_unless_qmp(const char *fmt, va_list ap)
278 Monitor *cur_mon = monitor_cur();
280 if (!cur_mon) {
281 return vfprintf(stderr, fmt, ap);
283 if (!monitor_cur_is_qmp()) {
284 return monitor_vprintf(cur_mon, fmt, ap);
286 return -1;
290 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
291 /* Limit guest-triggerable events to 1 per second */
292 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
293 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
294 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
295 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
296 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS },
297 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
298 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS },
302 * Return the clock to use for recording an event's time.
303 * It's QEMU_CLOCK_REALTIME, except for qtests it's
304 * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
305 * Beware: result is invalid before configure_accelerator().
307 static inline QEMUClockType monitor_get_event_clock(void)
309 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
313 * Broadcast an event to all monitors.
314 * @qdict is the event object. Its member "event" must match @event.
315 * Caller must hold monitor_lock.
317 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
319 Monitor *mon;
320 MonitorQMP *qmp_mon;
322 trace_monitor_protocol_event_emit(event, qdict);
323 QTAILQ_FOREACH(mon, &mon_list, entry) {
324 if (!monitor_is_qmp(mon)) {
325 continue;
328 qmp_mon = container_of(mon, MonitorQMP, common);
329 if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
330 qmp_send_response(qmp_mon, qdict);
335 static void monitor_qapi_event_handler(void *opaque);
338 * Queue a new event for emission to Monitor instances,
339 * applying any rate limiting if required.
341 static void
342 monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
344 MonitorQAPIEventConf *evconf;
345 MonitorQAPIEventState *evstate;
347 assert(event < QAPI_EVENT__MAX);
348 evconf = &monitor_qapi_event_conf[event];
349 trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
351 QEMU_LOCK_GUARD(&monitor_lock);
353 if (!evconf->rate) {
354 /* Unthrottled event */
355 monitor_qapi_event_emit(event, qdict);
356 } else {
357 QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
358 MonitorQAPIEventState key = { .event = event, .data = data };
360 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
361 assert(!evstate || timer_pending(evstate->timer));
363 if (evstate) {
365 * Timer is pending for (at least) evconf->rate ns after
366 * last send. Store event for sending when timer fires,
367 * replacing a prior stored event if any.
369 qobject_unref(evstate->qdict);
370 evstate->qdict = qobject_ref(qdict);
371 } else {
373 * Last send was (at least) evconf->rate ns ago.
374 * Send immediately, and arm the timer to call
375 * monitor_qapi_event_handler() in evconf->rate ns. Any
376 * events arriving before then will be delayed until then.
378 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
380 monitor_qapi_event_emit(event, qdict);
382 evstate = g_new(MonitorQAPIEventState, 1);
383 evstate->event = event;
384 evstate->data = qobject_ref(data);
385 evstate->qdict = NULL;
386 evstate->timer = timer_new_ns(monitor_get_event_clock(),
387 monitor_qapi_event_handler,
388 evstate);
389 g_hash_table_add(monitor_qapi_event_state, evstate);
390 timer_mod_ns(evstate->timer, now + evconf->rate);
395 void qapi_event_emit(QAPIEvent event, QDict *qdict)
398 * monitor_qapi_event_queue_no_reenter() is not reentrant: it
399 * would deadlock on monitor_lock. Work around by queueing
400 * events in thread-local storage.
401 * TODO: remove this, make it re-enter safe.
403 typedef struct MonitorQapiEvent {
404 QAPIEvent event;
405 QDict *qdict;
406 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
407 } MonitorQapiEvent;
408 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
409 static __thread bool reentered;
410 MonitorQapiEvent *ev;
412 if (!reentered) {
413 QSIMPLEQ_INIT(&event_queue);
416 ev = g_new(MonitorQapiEvent, 1);
417 ev->qdict = qobject_ref(qdict);
418 ev->event = event;
419 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
420 if (reentered) {
421 return;
424 reentered = true;
426 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
427 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
428 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
429 qobject_unref(ev->qdict);
430 g_free(ev);
433 reentered = false;
437 * This function runs evconf->rate ns after sending a throttled
438 * event.
439 * If another event has since been stored, send it.
441 static void monitor_qapi_event_handler(void *opaque)
443 MonitorQAPIEventState *evstate = opaque;
444 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
446 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
447 QEMU_LOCK_GUARD(&monitor_lock);
449 if (evstate->qdict) {
450 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
452 monitor_qapi_event_emit(evstate->event, evstate->qdict);
453 qobject_unref(evstate->qdict);
454 evstate->qdict = NULL;
455 timer_mod_ns(evstate->timer, now + evconf->rate);
456 } else {
457 g_hash_table_remove(monitor_qapi_event_state, evstate);
458 qobject_unref(evstate->data);
459 timer_free(evstate->timer);
460 g_free(evstate);
464 static unsigned int qapi_event_throttle_hash(const void *key)
466 const MonitorQAPIEventState *evstate = key;
467 unsigned int hash = evstate->event * 255;
469 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
470 hash += g_str_hash(qdict_get_str(evstate->data, "id"));
473 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
474 hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
477 return hash;
480 static gboolean qapi_event_throttle_equal(const void *a, const void *b)
482 const MonitorQAPIEventState *eva = a;
483 const MonitorQAPIEventState *evb = b;
485 if (eva->event != evb->event) {
486 return FALSE;
489 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
490 return !strcmp(qdict_get_str(eva->data, "id"),
491 qdict_get_str(evb->data, "id"));
494 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
495 return !strcmp(qdict_get_str(eva->data, "node-name"),
496 qdict_get_str(evb->data, "node-name"));
499 return TRUE;
502 int monitor_suspend(Monitor *mon)
504 if (monitor_is_hmp_non_interactive(mon)) {
505 return -ENOTTY;
508 qatomic_inc(&mon->suspend_cnt);
510 if (mon->use_io_thread) {
512 * Kick I/O thread to make sure this takes effect. It'll be
513 * evaluated again in prepare() of the watch object.
515 aio_notify(iothread_get_aio_context(mon_iothread));
518 trace_monitor_suspend(mon, 1);
519 return 0;
522 static void monitor_accept_input(void *opaque)
524 Monitor *mon = opaque;
526 qemu_chr_fe_accept_input(&mon->chr);
529 void monitor_resume(Monitor *mon)
531 if (monitor_is_hmp_non_interactive(mon)) {
532 return;
535 if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) {
536 AioContext *ctx;
538 if (mon->use_io_thread) {
539 ctx = iothread_get_aio_context(mon_iothread);
540 } else {
541 ctx = qemu_get_aio_context();
544 if (!monitor_is_qmp(mon)) {
545 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
546 assert(hmp_mon->rs);
547 readline_show_prompt(hmp_mon->rs);
550 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
553 trace_monitor_suspend(mon, -1);
556 int monitor_can_read(void *opaque)
558 Monitor *mon = opaque;
560 return !qatomic_mb_read(&mon->suspend_cnt);
563 void monitor_list_append(Monitor *mon)
565 qemu_mutex_lock(&monitor_lock);
567 * This prevents inserting new monitors during monitor_cleanup().
568 * A cleaner solution would involve the main thread telling other
569 * threads to terminate, waiting for their termination.
571 if (!monitor_destroyed) {
572 QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
573 mon = NULL;
575 qemu_mutex_unlock(&monitor_lock);
577 if (mon) {
578 monitor_data_destroy(mon);
579 g_free(mon);
583 static void monitor_iothread_init(void)
585 mon_iothread = iothread_create("mon_iothread", &error_abort);
588 void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
589 bool use_io_thread)
591 if (use_io_thread && !mon_iothread) {
592 monitor_iothread_init();
594 qemu_mutex_init(&mon->mon_lock);
595 mon->is_qmp = is_qmp;
596 mon->outbuf = g_string_new(NULL);
597 mon->skip_flush = skip_flush;
598 mon->use_io_thread = use_io_thread;
601 void monitor_data_destroy(Monitor *mon)
603 g_free(mon->mon_cpu_path);
604 qemu_chr_fe_deinit(&mon->chr, false);
605 if (monitor_is_qmp(mon)) {
606 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
607 } else {
608 readline_free(container_of(mon, MonitorHMP, common)->rs);
610 g_string_free(mon->outbuf, true);
611 qemu_mutex_destroy(&mon->mon_lock);
614 void monitor_cleanup(void)
617 * The dispatcher needs to stop before destroying the monitor and
618 * the I/O thread.
620 * We need to poll both qemu_aio_context and iohandler_ctx to make
621 * sure that the dispatcher coroutine keeps making progress and
622 * eventually terminates. qemu_aio_context is automatically
623 * polled by calling AIO_WAIT_WHILE on it, but we must poll
624 * iohandler_ctx manually.
626 * Letting the iothread continue while shutting down the dispatcher
627 * means that new requests may still be coming in. This is okay,
628 * we'll just leave them in the queue without sending a response
629 * and monitor_data_destroy() will free them.
631 qmp_dispatcher_co_shutdown = true;
632 if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
633 aio_co_wake(qmp_dispatcher_co);
636 AIO_WAIT_WHILE(qemu_get_aio_context(),
637 (aio_poll(iohandler_get_aio_context(), false),
638 qatomic_mb_read(&qmp_dispatcher_co_busy)));
641 * We need to explicitly stop the I/O thread (but not destroy it),
642 * clean up the monitor resources, then destroy the I/O thread since
643 * we need to unregister from chardev below in
644 * monitor_data_destroy(), and chardev is not thread-safe yet
646 if (mon_iothread) {
647 iothread_stop(mon_iothread);
650 /* Flush output buffers and destroy monitors */
651 qemu_mutex_lock(&monitor_lock);
652 monitor_destroyed = true;
653 while (!QTAILQ_EMPTY(&mon_list)) {
654 Monitor *mon = QTAILQ_FIRST(&mon_list);
655 QTAILQ_REMOVE(&mon_list, mon, entry);
656 /* Permit QAPI event emission from character frontend release */
657 qemu_mutex_unlock(&monitor_lock);
658 monitor_flush(mon);
659 monitor_data_destroy(mon);
660 qemu_mutex_lock(&monitor_lock);
661 g_free(mon);
663 qemu_mutex_unlock(&monitor_lock);
665 if (mon_iothread) {
666 iothread_destroy(mon_iothread);
667 mon_iothread = NULL;
671 static void monitor_qapi_event_init(void)
673 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
674 qapi_event_throttle_equal);
677 void monitor_init_globals_core(void)
679 monitor_qapi_event_init();
680 qemu_mutex_init(&monitor_lock);
681 coroutine_mon = g_hash_table_new(NULL, NULL);
684 * The dispatcher BH must run in the main loop thread, since we
685 * have commands assuming that context. It would be nice to get
686 * rid of those assumptions.
688 qmp_dispatcher_co = qemu_coroutine_create(monitor_qmp_dispatcher_co, NULL);
689 qatomic_mb_set(&qmp_dispatcher_co_busy, true);
690 aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
693 int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp)
695 Chardev *chr;
696 Error *local_err = NULL;
698 chr = qemu_chr_find(opts->chardev);
699 if (chr == NULL) {
700 error_setg(errp, "chardev \"%s\" not found", opts->chardev);
701 return -1;
704 if (!opts->has_mode) {
705 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL;
708 switch (opts->mode) {
709 case MONITOR_MODE_CONTROL:
710 monitor_init_qmp(chr, opts->pretty, &local_err);
711 break;
712 case MONITOR_MODE_READLINE:
713 if (!allow_hmp) {
714 error_setg(errp, "Only QMP is supported");
715 return -1;
717 if (opts->pretty) {
718 error_setg(errp, "'pretty' is not compatible with HMP monitors");
719 return -1;
721 monitor_init_hmp(chr, true, &local_err);
722 break;
723 default:
724 g_assert_not_reached();
727 if (local_err) {
728 error_propagate(errp, local_err);
729 return -1;
731 return 0;
734 int monitor_init_opts(QemuOpts *opts, Error **errp)
736 Visitor *v;
737 MonitorOptions *options;
738 int ret;
740 v = opts_visitor_new(opts);
741 visit_type_MonitorOptions(v, NULL, &options, errp);
742 visit_free(v);
743 if (!options) {
744 return -1;
747 ret = monitor_init(options, true, errp);
748 qapi_free_MonitorOptions(options);
749 return ret;
752 QemuOptsList qemu_mon_opts = {
753 .name = "mon",
754 .implied_opt_name = "chardev",
755 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
756 .desc = {
758 .name = "mode",
759 .type = QEMU_OPT_STRING,
761 .name = "chardev",
762 .type = QEMU_OPT_STRING,
764 .name = "pretty",
765 .type = QEMU_OPT_BOOL,
767 { /* end of list */ }