scsi/scsi_bus: switch search direction in scsi_device_find
[qemu/ar7.git] / monitor / monitor.c
blobceffe1a83b798efc250e0a3745a78c75e89466bf
1 /*
2 * QEMU monitor
4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "monitor-internal.h"
27 #include "qapi/error.h"
28 #include "qapi/opts-visitor.h"
29 #include "qapi/qapi-emit-events.h"
30 #include "qapi/qapi-visit-control.h"
31 #include "qapi/qmp/qdict.h"
32 #include "qapi/qmp/qstring.h"
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "sysemu/qtest.h"
36 #include "sysemu/sysemu.h"
37 #include "trace.h"
40 * To prevent flooding clients, events can be throttled. The
41 * throttling is calculated globally, rather than per-Monitor
42 * instance.
44 typedef struct MonitorQAPIEventState {
45 QAPIEvent event; /* Throttling state for this event type and... */
46 QDict *data; /* ... data, see qapi_event_throttle_equal() */
47 QEMUTimer *timer; /* Timer for handling delayed events */
48 QDict *qdict; /* Delayed event (if any) */
49 } MonitorQAPIEventState;
51 typedef struct {
52 int64_t rate; /* Minimum time (in ns) between two events */
53 } MonitorQAPIEventConf;
55 /* Shared monitor I/O thread */
56 IOThread *mon_iothread;
58 /* Coroutine to dispatch the requests received from I/O thread */
59 Coroutine *qmp_dispatcher_co;
61 /* Set to true when the dispatcher coroutine should terminate */
62 bool qmp_dispatcher_co_shutdown;
65 * qmp_dispatcher_co_busy is used for synchronisation between the
66 * monitor thread and the main thread to ensure that the dispatcher
67 * coroutine never gets scheduled a second time when it's already
68 * scheduled (scheduling the same coroutine twice is forbidden).
70 * It is true if the coroutine is active and processing requests.
71 * Additional requests may then be pushed onto mon->qmp_requests,
72 * and @qmp_dispatcher_co_shutdown may be set without further ado.
73 * @qmp_dispatcher_co_busy must not be woken up in this case.
75 * If false, you also have to set @qmp_dispatcher_co_busy to true and
76 * wake up @qmp_dispatcher_co after pushing the new requests.
78 * The coroutine will automatically change this variable back to false
79 * before it yields. Nobody else may set the variable to false.
81 * Access must be atomic for thread safety.
83 bool qmp_dispatcher_co_busy;
86 * Protects mon_list, monitor_qapi_event_state, coroutine_mon,
87 * monitor_destroyed.
89 QemuMutex monitor_lock;
90 static GHashTable *monitor_qapi_event_state;
91 static GHashTable *coroutine_mon; /* Maps Coroutine* to Monitor* */
93 MonitorList mon_list;
94 int mon_refcount;
95 static bool monitor_destroyed;
97 Monitor *monitor_cur(void)
99 Monitor *mon;
101 qemu_mutex_lock(&monitor_lock);
102 mon = g_hash_table_lookup(coroutine_mon, qemu_coroutine_self());
103 qemu_mutex_unlock(&monitor_lock);
105 return mon;
109 * Sets a new current monitor and returns the old one.
111 * If a non-NULL monitor is set for a coroutine, another call
112 * resetting it to NULL is required before the coroutine terminates,
113 * otherwise a stale entry would remain in the hash table.
115 Monitor *monitor_set_cur(Coroutine *co, Monitor *mon)
117 Monitor *old_monitor = monitor_cur();
119 qemu_mutex_lock(&monitor_lock);
120 if (mon) {
121 g_hash_table_replace(coroutine_mon, co, mon);
122 } else {
123 g_hash_table_remove(coroutine_mon, co);
125 qemu_mutex_unlock(&monitor_lock);
127 return old_monitor;
131 * Is the current monitor, if any, a QMP monitor?
133 bool monitor_cur_is_qmp(void)
135 Monitor *cur_mon = monitor_cur();
137 return cur_mon && monitor_is_qmp(cur_mon);
141 * Is @mon is using readline?
142 * Note: not all HMP monitors use readline, e.g., gdbserver has a
143 * non-interactive HMP monitor, so readline is not used there.
145 static inline bool monitor_uses_readline(const MonitorHMP *mon)
147 return mon->use_readline;
150 static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
152 if (monitor_is_qmp(mon)) {
153 return false;
156 return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
159 static void monitor_flush_locked(Monitor *mon);
161 static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
162 void *opaque)
164 Monitor *mon = opaque;
166 qemu_mutex_lock(&mon->mon_lock);
167 mon->out_watch = 0;
168 monitor_flush_locked(mon);
169 qemu_mutex_unlock(&mon->mon_lock);
170 return FALSE;
173 /* Caller must hold mon->mon_lock */
174 static void monitor_flush_locked(Monitor *mon)
176 int rc;
177 size_t len;
178 const char *buf;
180 if (mon->skip_flush) {
181 return;
184 buf = qstring_get_str(mon->outbuf);
185 len = qstring_get_length(mon->outbuf);
187 if (len && !mon->mux_out) {
188 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
189 if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
190 /* all flushed or error */
191 qobject_unref(mon->outbuf);
192 mon->outbuf = qstring_new();
193 return;
195 if (rc > 0) {
196 /* partial write */
197 QString *tmp = qstring_from_str(buf + rc);
198 qobject_unref(mon->outbuf);
199 mon->outbuf = tmp;
201 if (mon->out_watch == 0) {
202 mon->out_watch =
203 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
204 monitor_unblocked, mon);
209 void monitor_flush(Monitor *mon)
211 qemu_mutex_lock(&mon->mon_lock);
212 monitor_flush_locked(mon);
213 qemu_mutex_unlock(&mon->mon_lock);
216 /* flush at every end of line */
217 int monitor_puts(Monitor *mon, const char *str)
219 int i;
220 char c;
222 qemu_mutex_lock(&mon->mon_lock);
223 for (i = 0; str[i]; i++) {
224 c = str[i];
225 if (c == '\n') {
226 qstring_append_chr(mon->outbuf, '\r');
228 qstring_append_chr(mon->outbuf, c);
229 if (c == '\n') {
230 monitor_flush_locked(mon);
233 qemu_mutex_unlock(&mon->mon_lock);
235 return i;
238 int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
240 char *buf;
241 int n;
243 if (!mon) {
244 return -1;
247 if (monitor_is_qmp(mon)) {
248 return -1;
251 buf = g_strdup_vprintf(fmt, ap);
252 n = monitor_puts(mon, buf);
253 g_free(buf);
254 return n;
257 int monitor_printf(Monitor *mon, const char *fmt, ...)
259 int ret;
261 va_list ap;
262 va_start(ap, fmt);
263 ret = monitor_vprintf(mon, fmt, ap);
264 va_end(ap);
265 return ret;
269 * Print to current monitor if we have one, else to stderr.
271 int error_vprintf(const char *fmt, va_list ap)
273 Monitor *cur_mon = monitor_cur();
275 if (cur_mon && !monitor_cur_is_qmp()) {
276 return monitor_vprintf(cur_mon, fmt, ap);
278 return vfprintf(stderr, fmt, ap);
281 int error_vprintf_unless_qmp(const char *fmt, va_list ap)
283 Monitor *cur_mon = monitor_cur();
285 if (!cur_mon) {
286 return vfprintf(stderr, fmt, ap);
288 if (!monitor_cur_is_qmp()) {
289 return monitor_vprintf(cur_mon, fmt, ap);
291 return -1;
295 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
296 /* Limit guest-triggerable events to 1 per second */
297 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
298 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
299 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
300 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
301 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS },
302 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
303 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS },
307 * Return the clock to use for recording an event's time.
308 * It's QEMU_CLOCK_REALTIME, except for qtests it's
309 * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
310 * Beware: result is invalid before configure_accelerator().
312 static inline QEMUClockType monitor_get_event_clock(void)
314 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
318 * Broadcast an event to all monitors.
319 * @qdict is the event object. Its member "event" must match @event.
320 * Caller must hold monitor_lock.
322 static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
324 Monitor *mon;
325 MonitorQMP *qmp_mon;
327 trace_monitor_protocol_event_emit(event, qdict);
328 QTAILQ_FOREACH(mon, &mon_list, entry) {
329 if (!monitor_is_qmp(mon)) {
330 continue;
333 qmp_mon = container_of(mon, MonitorQMP, common);
334 if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
335 qmp_send_response(qmp_mon, qdict);
340 static void monitor_qapi_event_handler(void *opaque);
343 * Queue a new event for emission to Monitor instances,
344 * applying any rate limiting if required.
346 static void
347 monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
349 MonitorQAPIEventConf *evconf;
350 MonitorQAPIEventState *evstate;
352 assert(event < QAPI_EVENT__MAX);
353 evconf = &monitor_qapi_event_conf[event];
354 trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
356 qemu_mutex_lock(&monitor_lock);
358 if (!evconf->rate) {
359 /* Unthrottled event */
360 monitor_qapi_event_emit(event, qdict);
361 } else {
362 QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
363 MonitorQAPIEventState key = { .event = event, .data = data };
365 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
366 assert(!evstate || timer_pending(evstate->timer));
368 if (evstate) {
370 * Timer is pending for (at least) evconf->rate ns after
371 * last send. Store event for sending when timer fires,
372 * replacing a prior stored event if any.
374 qobject_unref(evstate->qdict);
375 evstate->qdict = qobject_ref(qdict);
376 } else {
378 * Last send was (at least) evconf->rate ns ago.
379 * Send immediately, and arm the timer to call
380 * monitor_qapi_event_handler() in evconf->rate ns. Any
381 * events arriving before then will be delayed until then.
383 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
385 monitor_qapi_event_emit(event, qdict);
387 evstate = g_new(MonitorQAPIEventState, 1);
388 evstate->event = event;
389 evstate->data = qobject_ref(data);
390 evstate->qdict = NULL;
391 evstate->timer = timer_new_ns(monitor_get_event_clock(),
392 monitor_qapi_event_handler,
393 evstate);
394 g_hash_table_add(monitor_qapi_event_state, evstate);
395 timer_mod_ns(evstate->timer, now + evconf->rate);
399 qemu_mutex_unlock(&monitor_lock);
402 void qapi_event_emit(QAPIEvent event, QDict *qdict)
405 * monitor_qapi_event_queue_no_reenter() is not reentrant: it
406 * would deadlock on monitor_lock. Work around by queueing
407 * events in thread-local storage.
408 * TODO: remove this, make it re-enter safe.
410 typedef struct MonitorQapiEvent {
411 QAPIEvent event;
412 QDict *qdict;
413 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
414 } MonitorQapiEvent;
415 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
416 static __thread bool reentered;
417 MonitorQapiEvent *ev;
419 if (!reentered) {
420 QSIMPLEQ_INIT(&event_queue);
423 ev = g_new(MonitorQapiEvent, 1);
424 ev->qdict = qobject_ref(qdict);
425 ev->event = event;
426 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
427 if (reentered) {
428 return;
431 reentered = true;
433 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
434 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
435 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
436 qobject_unref(ev->qdict);
437 g_free(ev);
440 reentered = false;
444 * This function runs evconf->rate ns after sending a throttled
445 * event.
446 * If another event has since been stored, send it.
448 static void monitor_qapi_event_handler(void *opaque)
450 MonitorQAPIEventState *evstate = opaque;
451 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
453 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
454 qemu_mutex_lock(&monitor_lock);
456 if (evstate->qdict) {
457 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
459 monitor_qapi_event_emit(evstate->event, evstate->qdict);
460 qobject_unref(evstate->qdict);
461 evstate->qdict = NULL;
462 timer_mod_ns(evstate->timer, now + evconf->rate);
463 } else {
464 g_hash_table_remove(monitor_qapi_event_state, evstate);
465 qobject_unref(evstate->data);
466 timer_free(evstate->timer);
467 g_free(evstate);
470 qemu_mutex_unlock(&monitor_lock);
473 static unsigned int qapi_event_throttle_hash(const void *key)
475 const MonitorQAPIEventState *evstate = key;
476 unsigned int hash = evstate->event * 255;
478 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
479 hash += g_str_hash(qdict_get_str(evstate->data, "id"));
482 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
483 hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
486 return hash;
489 static gboolean qapi_event_throttle_equal(const void *a, const void *b)
491 const MonitorQAPIEventState *eva = a;
492 const MonitorQAPIEventState *evb = b;
494 if (eva->event != evb->event) {
495 return FALSE;
498 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
499 return !strcmp(qdict_get_str(eva->data, "id"),
500 qdict_get_str(evb->data, "id"));
503 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
504 return !strcmp(qdict_get_str(eva->data, "node-name"),
505 qdict_get_str(evb->data, "node-name"));
508 return TRUE;
511 int monitor_suspend(Monitor *mon)
513 if (monitor_is_hmp_non_interactive(mon)) {
514 return -ENOTTY;
517 qatomic_inc(&mon->suspend_cnt);
519 if (mon->use_io_thread) {
521 * Kick I/O thread to make sure this takes effect. It'll be
522 * evaluated again in prepare() of the watch object.
524 aio_notify(iothread_get_aio_context(mon_iothread));
527 trace_monitor_suspend(mon, 1);
528 return 0;
531 static void monitor_accept_input(void *opaque)
533 Monitor *mon = opaque;
535 qemu_chr_fe_accept_input(&mon->chr);
538 void monitor_resume(Monitor *mon)
540 if (monitor_is_hmp_non_interactive(mon)) {
541 return;
544 if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) {
545 AioContext *ctx;
547 if (mon->use_io_thread) {
548 ctx = iothread_get_aio_context(mon_iothread);
549 } else {
550 ctx = qemu_get_aio_context();
553 if (!monitor_is_qmp(mon)) {
554 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
555 assert(hmp_mon->rs);
556 readline_show_prompt(hmp_mon->rs);
559 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
562 trace_monitor_suspend(mon, -1);
565 int monitor_can_read(void *opaque)
567 Monitor *mon = opaque;
569 return !qatomic_mb_read(&mon->suspend_cnt);
572 void monitor_list_append(Monitor *mon)
574 qemu_mutex_lock(&monitor_lock);
576 * This prevents inserting new monitors during monitor_cleanup().
577 * A cleaner solution would involve the main thread telling other
578 * threads to terminate, waiting for their termination.
580 if (!monitor_destroyed) {
581 QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
582 mon = NULL;
584 qemu_mutex_unlock(&monitor_lock);
586 if (mon) {
587 monitor_data_destroy(mon);
588 g_free(mon);
592 static void monitor_iothread_init(void)
594 mon_iothread = iothread_create("mon_iothread", &error_abort);
597 void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
598 bool use_io_thread)
600 if (use_io_thread && !mon_iothread) {
601 monitor_iothread_init();
603 qemu_mutex_init(&mon->mon_lock);
604 mon->is_qmp = is_qmp;
605 mon->outbuf = qstring_new();
606 mon->skip_flush = skip_flush;
607 mon->use_io_thread = use_io_thread;
610 void monitor_data_destroy(Monitor *mon)
612 g_free(mon->mon_cpu_path);
613 qemu_chr_fe_deinit(&mon->chr, false);
614 if (monitor_is_qmp(mon)) {
615 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
616 } else {
617 readline_free(container_of(mon, MonitorHMP, common)->rs);
619 qobject_unref(mon->outbuf);
620 qemu_mutex_destroy(&mon->mon_lock);
623 void monitor_cleanup(void)
626 * We need to explicitly stop the I/O thread (but not destroy it),
627 * clean up the monitor resources, then destroy the I/O thread since
628 * we need to unregister from chardev below in
629 * monitor_data_destroy(), and chardev is not thread-safe yet
631 if (mon_iothread) {
632 iothread_stop(mon_iothread);
635 /* Flush output buffers and destroy monitors */
636 qemu_mutex_lock(&monitor_lock);
637 monitor_destroyed = true;
638 while (!QTAILQ_EMPTY(&mon_list)) {
639 Monitor *mon = QTAILQ_FIRST(&mon_list);
640 QTAILQ_REMOVE(&mon_list, mon, entry);
641 /* Permit QAPI event emission from character frontend release */
642 qemu_mutex_unlock(&monitor_lock);
643 monitor_flush(mon);
644 monitor_data_destroy(mon);
645 qemu_mutex_lock(&monitor_lock);
646 g_free(mon);
648 qemu_mutex_unlock(&monitor_lock);
651 * The dispatcher needs to stop before destroying the I/O thread.
653 * We need to poll both qemu_aio_context and iohandler_ctx to make
654 * sure that the dispatcher coroutine keeps making progress and
655 * eventually terminates. qemu_aio_context is automatically
656 * polled by calling AIO_WAIT_WHILE on it, but we must poll
657 * iohandler_ctx manually.
659 qmp_dispatcher_co_shutdown = true;
660 if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
661 aio_co_wake(qmp_dispatcher_co);
664 AIO_WAIT_WHILE(qemu_get_aio_context(),
665 (aio_poll(iohandler_get_aio_context(), false),
666 qatomic_mb_read(&qmp_dispatcher_co_busy)));
668 if (mon_iothread) {
669 iothread_destroy(mon_iothread);
670 mon_iothread = NULL;
674 static void monitor_qapi_event_init(void)
676 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
677 qapi_event_throttle_equal);
680 void monitor_init_globals_core(void)
682 monitor_qapi_event_init();
683 qemu_mutex_init(&monitor_lock);
684 coroutine_mon = g_hash_table_new(NULL, NULL);
687 * The dispatcher BH must run in the main loop thread, since we
688 * have commands assuming that context. It would be nice to get
689 * rid of those assumptions.
691 qmp_dispatcher_co = qemu_coroutine_create(monitor_qmp_dispatcher_co, NULL);
692 qatomic_mb_set(&qmp_dispatcher_co_busy, true);
693 aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
696 int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp)
698 Chardev *chr;
699 Error *local_err = NULL;
701 chr = qemu_chr_find(opts->chardev);
702 if (chr == NULL) {
703 error_setg(errp, "chardev \"%s\" not found", opts->chardev);
704 return -1;
707 if (!opts->has_mode) {
708 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL;
711 switch (opts->mode) {
712 case MONITOR_MODE_CONTROL:
713 monitor_init_qmp(chr, opts->pretty, &local_err);
714 break;
715 case MONITOR_MODE_READLINE:
716 if (!allow_hmp) {
717 error_setg(errp, "Only QMP is supported");
718 return -1;
720 if (opts->pretty) {
721 warn_report("'pretty' is deprecated for HMP monitors, it has no "
722 "effect and will be removed in future versions");
724 monitor_init_hmp(chr, true, &local_err);
725 break;
726 default:
727 g_assert_not_reached();
730 if (local_err) {
731 error_propagate(errp, local_err);
732 return -1;
734 return 0;
737 int monitor_init_opts(QemuOpts *opts, Error **errp)
739 Visitor *v;
740 MonitorOptions *options;
741 int ret;
743 v = opts_visitor_new(opts);
744 visit_type_MonitorOptions(v, NULL, &options, errp);
745 visit_free(v);
746 if (!options) {
747 return -1;
750 ret = monitor_init(options, true, errp);
751 qapi_free_MonitorOptions(options);
752 return ret;
755 QemuOptsList qemu_mon_opts = {
756 .name = "mon",
757 .implied_opt_name = "chardev",
758 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
759 .desc = {
761 .name = "mode",
762 .type = QEMU_OPT_STRING,
764 .name = "chardev",
765 .type = QEMU_OPT_STRING,
767 .name = "pretty",
768 .type = QEMU_OPT_BOOL,
770 { /* end of list */ }