4 * Copyright Red Hat Inc., 2013
7 * Stefan Hajnoczi <stefanha@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qom/object.h"
16 #include "qom/object_interfaces.h"
17 #include "qemu/module.h"
18 #include "block/aio.h"
19 #include "block/block.h"
20 #include "sysemu/iothread.h"
21 #include "qmp-commands.h"
22 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
26 typedef ObjectClass IOThreadClass
;
28 #define IOTHREAD_GET_CLASS(obj) \
29 OBJECT_GET_CLASS(IOThreadClass, obj, TYPE_IOTHREAD)
30 #define IOTHREAD_CLASS(klass) \
31 OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD)
33 /* Benchmark results from 2016 on NVMe SSD drives show max polling times around
34 * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32
37 #define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL
39 static __thread IOThread
*my_iothread
;
41 AioContext
*qemu_get_current_aio_context(void)
43 return my_iothread
? my_iothread
->ctx
: qemu_get_aio_context();
46 static void *iothread_run(void *opaque
)
48 IOThread
*iothread
= opaque
;
50 rcu_register_thread();
52 my_iothread
= iothread
;
53 qemu_mutex_lock(&iothread
->init_done_lock
);
54 iothread
->thread_id
= qemu_get_thread_id();
55 qemu_cond_signal(&iothread
->init_done_cond
);
56 qemu_mutex_unlock(&iothread
->init_done_lock
);
58 while (iothread
->running
) {
59 aio_poll(iothread
->ctx
, true);
61 if (atomic_read(&iothread
->worker_context
)) {
64 g_main_context_push_thread_default(iothread
->worker_context
);
66 g_main_loop_new(iothread
->worker_context
, TRUE
);
67 loop
= iothread
->main_loop
;
69 g_main_loop_run(iothread
->main_loop
);
70 iothread
->main_loop
= NULL
;
71 g_main_loop_unref(loop
);
73 g_main_context_pop_thread_default(iothread
->worker_context
);
77 rcu_unregister_thread();
81 /* Runs in iothread_run() thread */
82 static void iothread_stop_bh(void *opaque
)
84 IOThread
*iothread
= opaque
;
86 iothread
->running
= false; /* stop iothread_run() */
88 if (iothread
->main_loop
) {
89 g_main_loop_quit(iothread
->main_loop
);
93 void iothread_stop(IOThread
*iothread
)
95 if (!iothread
->ctx
|| iothread
->stopping
) {
98 iothread
->stopping
= true;
99 aio_bh_schedule_oneshot(iothread
->ctx
, iothread_stop_bh
, iothread
);
100 qemu_thread_join(&iothread
->thread
);
103 static int iothread_stop_iter(Object
*object
, void *opaque
)
107 iothread
= (IOThread
*)object_dynamic_cast(object
, TYPE_IOTHREAD
);
111 iothread_stop(iothread
);
115 static void iothread_instance_init(Object
*obj
)
117 IOThread
*iothread
= IOTHREAD(obj
);
119 iothread
->poll_max_ns
= IOTHREAD_POLL_MAX_NS_DEFAULT
;
122 static void iothread_instance_finalize(Object
*obj
)
124 IOThread
*iothread
= IOTHREAD(obj
);
126 iothread_stop(iothread
);
127 if (iothread
->worker_context
) {
128 g_main_context_unref(iothread
->worker_context
);
129 iothread
->worker_context
= NULL
;
131 qemu_cond_destroy(&iothread
->init_done_cond
);
132 qemu_mutex_destroy(&iothread
->init_done_lock
);
133 if (!iothread
->ctx
) {
136 aio_context_unref(iothread
->ctx
);
139 static void iothread_complete(UserCreatable
*obj
, Error
**errp
)
141 Error
*local_error
= NULL
;
142 IOThread
*iothread
= IOTHREAD(obj
);
143 char *name
, *thread_name
;
145 iothread
->stopping
= false;
146 iothread
->running
= true;
147 iothread
->thread_id
= -1;
148 iothread
->ctx
= aio_context_new(&local_error
);
149 if (!iothread
->ctx
) {
150 error_propagate(errp
, local_error
);
154 aio_context_set_poll_params(iothread
->ctx
,
155 iothread
->poll_max_ns
,
157 iothread
->poll_shrink
,
160 error_propagate(errp
, local_error
);
161 aio_context_unref(iothread
->ctx
);
162 iothread
->ctx
= NULL
;
166 qemu_mutex_init(&iothread
->init_done_lock
);
167 qemu_cond_init(&iothread
->init_done_cond
);
168 iothread
->once
= (GOnce
) G_ONCE_INIT
;
170 /* This assumes we are called from a thread with useful CPU affinity for us
173 name
= object_get_canonical_path_component(OBJECT(obj
));
174 thread_name
= g_strdup_printf("IO %s", name
);
175 qemu_thread_create(&iothread
->thread
, thread_name
, iothread_run
,
176 iothread
, QEMU_THREAD_JOINABLE
);
180 /* Wait for initialization to complete */
181 qemu_mutex_lock(&iothread
->init_done_lock
);
182 while (iothread
->thread_id
== -1) {
183 qemu_cond_wait(&iothread
->init_done_cond
,
184 &iothread
->init_done_lock
);
186 qemu_mutex_unlock(&iothread
->init_done_lock
);
191 ptrdiff_t offset
; /* field's byte offset in IOThread struct */
194 static PollParamInfo poll_max_ns_info
= {
195 "poll-max-ns", offsetof(IOThread
, poll_max_ns
),
197 static PollParamInfo poll_grow_info
= {
198 "poll-grow", offsetof(IOThread
, poll_grow
),
200 static PollParamInfo poll_shrink_info
= {
201 "poll-shrink", offsetof(IOThread
, poll_shrink
),
204 static void iothread_get_poll_param(Object
*obj
, Visitor
*v
,
205 const char *name
, void *opaque
, Error
**errp
)
207 IOThread
*iothread
= IOTHREAD(obj
);
208 PollParamInfo
*info
= opaque
;
209 int64_t *field
= (void *)iothread
+ info
->offset
;
211 visit_type_int64(v
, name
, field
, errp
);
214 static void iothread_set_poll_param(Object
*obj
, Visitor
*v
,
215 const char *name
, void *opaque
, Error
**errp
)
217 IOThread
*iothread
= IOTHREAD(obj
);
218 PollParamInfo
*info
= opaque
;
219 int64_t *field
= (void *)iothread
+ info
->offset
;
220 Error
*local_err
= NULL
;
223 visit_type_int64(v
, name
, &value
, &local_err
);
229 error_setg(&local_err
, "%s value must be in range [0, %"PRId64
"]",
230 info
->name
, INT64_MAX
);
237 aio_context_set_poll_params(iothread
->ctx
,
238 iothread
->poll_max_ns
,
240 iothread
->poll_shrink
,
245 error_propagate(errp
, local_err
);
248 static void iothread_class_init(ObjectClass
*klass
, void *class_data
)
250 UserCreatableClass
*ucc
= USER_CREATABLE_CLASS(klass
);
251 ucc
->complete
= iothread_complete
;
253 object_class_property_add(klass
, "poll-max-ns", "int",
254 iothread_get_poll_param
,
255 iothread_set_poll_param
,
256 NULL
, &poll_max_ns_info
, &error_abort
);
257 object_class_property_add(klass
, "poll-grow", "int",
258 iothread_get_poll_param
,
259 iothread_set_poll_param
,
260 NULL
, &poll_grow_info
, &error_abort
);
261 object_class_property_add(klass
, "poll-shrink", "int",
262 iothread_get_poll_param
,
263 iothread_set_poll_param
,
264 NULL
, &poll_shrink_info
, &error_abort
);
267 static const TypeInfo iothread_info
= {
268 .name
= TYPE_IOTHREAD
,
269 .parent
= TYPE_OBJECT
,
270 .class_init
= iothread_class_init
,
271 .instance_size
= sizeof(IOThread
),
272 .instance_init
= iothread_instance_init
,
273 .instance_finalize
= iothread_instance_finalize
,
274 .interfaces
= (InterfaceInfo
[]) {
275 {TYPE_USER_CREATABLE
},
280 static void iothread_register_types(void)
282 type_register_static(&iothread_info
);
285 type_init(iothread_register_types
)
287 char *iothread_get_id(IOThread
*iothread
)
289 return object_get_canonical_path_component(OBJECT(iothread
));
292 AioContext
*iothread_get_aio_context(IOThread
*iothread
)
294 return iothread
->ctx
;
297 static int query_one_iothread(Object
*object
, void *opaque
)
299 IOThreadInfoList
***prev
= opaque
;
300 IOThreadInfoList
*elem
;
304 iothread
= (IOThread
*)object_dynamic_cast(object
, TYPE_IOTHREAD
);
309 info
= g_new0(IOThreadInfo
, 1);
310 info
->id
= iothread_get_id(iothread
);
311 info
->thread_id
= iothread
->thread_id
;
312 info
->poll_max_ns
= iothread
->poll_max_ns
;
313 info
->poll_grow
= iothread
->poll_grow
;
314 info
->poll_shrink
= iothread
->poll_shrink
;
316 elem
= g_new0(IOThreadInfoList
, 1);
325 IOThreadInfoList
*qmp_query_iothreads(Error
**errp
)
327 IOThreadInfoList
*head
= NULL
;
328 IOThreadInfoList
**prev
= &head
;
329 Object
*container
= object_get_objects_root();
331 object_child_foreach(container
, query_one_iothread
, &prev
);
335 void iothread_stop_all(void)
337 Object
*container
= object_get_objects_root();
338 BlockDriverState
*bs
;
341 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
342 AioContext
*ctx
= bdrv_get_aio_context(bs
);
343 if (ctx
== qemu_get_aio_context()) {
346 aio_context_acquire(ctx
);
347 bdrv_set_aio_context(bs
, qemu_get_aio_context());
348 aio_context_release(ctx
);
351 object_child_foreach(container
, iothread_stop_iter
, NULL
);
354 static gpointer
iothread_g_main_context_init(gpointer opaque
)
357 IOThread
*iothread
= opaque
;
360 iothread
->worker_context
= g_main_context_new();
362 ctx
= iothread_get_aio_context(iothread
);
363 source
= aio_get_g_source(ctx
);
364 g_source_attach(source
, iothread
->worker_context
);
365 g_source_unref(source
);
367 aio_notify(iothread
->ctx
);
371 GMainContext
*iothread_get_g_main_context(IOThread
*iothread
)
373 g_once(&iothread
->once
, iothread_g_main_context_init
, iothread
);
375 return iothread
->worker_context
;
378 IOThread
*iothread_create(const char *id
, Error
**errp
)
382 obj
= object_new_with_props(TYPE_IOTHREAD
,
383 object_get_internal_root(),
386 return IOTHREAD(obj
);
389 void iothread_destroy(IOThread
*iothread
)
391 object_unparent(OBJECT(iothread
));
394 /* Lookup IOThread by its id. Only finds user-created objects, not internal
395 * iothread_create() objects. */
396 IOThread
*iothread_by_id(const char *id
)
398 return IOTHREAD(object_resolve_path_type(id
, TYPE_IOTHREAD
, NULL
));