2 * ucontext coroutine initialization code
4 * Copyright (C) 2006 Anthony Liguori <anthony@codemonkey.ws>
5 * Copyright (C) 2011 Kevin Wolf <kwolf@redhat.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.0 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* XXX Is there a nicer way to disable glibc's stack check for longjmp? */
22 #undef _FORTIFY_SOURCE
23 #define _FORTIFY_SOURCE 0
25 #include "qemu/osdep.h"
27 #include "qemu/coroutine_int.h"
28 #include "qemu/coroutine-tls.h"
30 #ifdef CONFIG_VALGRIND_H
31 #include <valgrind/valgrind.h>
34 #ifdef QEMU_SANITIZE_ADDRESS
35 #ifdef CONFIG_ASAN_IFACE_FIBER
37 #include <sanitizer/asan_interface.h>
42 #include <sanitizer/tsan_interface.h>
49 #ifdef CONFIG_SAFESTACK
50 /* Need an unsafe stack for each coroutine */
52 size_t unsafe_stack_size
;
58 void *tsan_caller_fiber
;
61 #ifdef CONFIG_VALGRIND_H
62 unsigned int valgrind_stack_id
;
68 * Per-thread coroutine bookkeeping
70 QEMU_DEFINE_STATIC_CO_TLS(Coroutine
*, current
);
71 QEMU_DEFINE_STATIC_CO_TLS(CoroutineUContext
, leader
);
74 * va_args to makecontext() must be type 'int', so passing
75 * the pointer we need may require several int args. This
76 * union is a quick hack to let us do that
84 * QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it.
85 * always_inline is required to avoid TSan runtime fatal errors.
87 static inline __attribute__((always_inline
))
88 void on_new_fiber(CoroutineUContext
*co
)
91 co
->tsan_co_fiber
= __tsan_create_fiber(0); /* flags: sync on switch */
92 co
->tsan_caller_fiber
= __tsan_get_current_fiber();
96 /* always_inline is required to avoid TSan runtime fatal errors. */
97 static inline __attribute__((always_inline
))
98 void finish_switch_fiber(void *fake_stack_save
)
101 CoroutineUContext
*leaderp
= get_ptr_leader();
102 const void *bottom_old
;
105 __sanitizer_finish_switch_fiber(fake_stack_save
, &bottom_old
, &size_old
);
107 if (!leaderp
->stack
) {
108 leaderp
->stack
= (void *)bottom_old
;
109 leaderp
->stack_size
= size_old
;
113 if (fake_stack_save
) {
114 __tsan_release(fake_stack_save
);
115 __tsan_switch_to_fiber(fake_stack_save
, 0); /* 0=synchronize */
120 /* always_inline is required to avoid TSan runtime fatal errors. */
121 static inline __attribute__((always_inline
))
122 void start_switch_fiber_asan(CoroutineAction action
, void **fake_stack_save
,
123 const void *bottom
, size_t size
)
126 __sanitizer_start_switch_fiber(
127 action
== COROUTINE_TERMINATE
? NULL
: fake_stack_save
,
132 /* always_inline is required to avoid TSan runtime fatal errors. */
133 static inline __attribute__((always_inline
))
134 void start_switch_fiber_tsan(void **fake_stack_save
,
135 CoroutineUContext
*co
,
139 void *new_fiber
= caller
?
140 co
->tsan_caller_fiber
:
142 void *curr_fiber
= __tsan_get_current_fiber();
143 __tsan_acquire(curr_fiber
);
145 *fake_stack_save
= curr_fiber
;
146 __tsan_switch_to_fiber(new_fiber
, 0); /* 0=synchronize */
150 static void coroutine_trampoline(int i0
, int i1
)
153 CoroutineUContext
*self
;
155 void *fake_stack_save
= NULL
;
157 finish_switch_fiber(NULL
);
164 /* Initialize longjmp environment and switch back the caller */
165 if (!sigsetjmp(self
->env
, 0)) {
166 CoroutineUContext
*leaderp
= get_ptr_leader();
168 start_switch_fiber_asan(COROUTINE_YIELD
, &fake_stack_save
,
169 leaderp
->stack
, leaderp
->stack_size
);
170 start_switch_fiber_tsan(&fake_stack_save
, self
, true); /* true=caller */
171 siglongjmp(*(sigjmp_buf
*)co
->entry_arg
, 1);
174 finish_switch_fiber(fake_stack_save
);
177 co
->entry(co
->entry_arg
);
178 qemu_coroutine_switch(co
, co
->caller
, COROUTINE_TERMINATE
);
182 Coroutine
*qemu_coroutine_new(void)
184 CoroutineUContext
*co
;
185 ucontext_t old_uc
, uc
;
187 union cc_arg arg
= {0};
188 void *fake_stack_save
= NULL
;
190 /* The ucontext functions preserve signal masks which incurs a
191 * system call overhead. sigsetjmp(buf, 0)/siglongjmp() does not
192 * preserve signal masks but only works on the current stack.
193 * Since we need a way to create and switch to a new stack, use
194 * the ucontext functions for that but sigsetjmp()/siglongjmp() for
198 if (getcontext(&uc
) == -1) {
202 co
= g_malloc0(sizeof(*co
));
203 co
->stack_size
= COROUTINE_STACK_SIZE
;
204 co
->stack
= qemu_alloc_stack(&co
->stack_size
);
205 #ifdef CONFIG_SAFESTACK
206 co
->unsafe_stack_size
= COROUTINE_STACK_SIZE
;
207 co
->unsafe_stack
= qemu_alloc_stack(&co
->unsafe_stack_size
);
209 co
->base
.entry_arg
= &old_env
; /* stash away our jmp_buf */
211 uc
.uc_link
= &old_uc
;
212 uc
.uc_stack
.ss_sp
= co
->stack
;
213 uc
.uc_stack
.ss_size
= co
->stack_size
;
214 uc
.uc_stack
.ss_flags
= 0;
216 #ifdef CONFIG_VALGRIND_H
217 co
->valgrind_stack_id
=
218 VALGRIND_STACK_REGISTER(co
->stack
, co
->stack
+ co
->stack_size
);
224 makecontext(&uc
, (void (*)(void))coroutine_trampoline
,
225 2, arg
.i
[0], arg
.i
[1]);
227 /* swapcontext() in, siglongjmp() back out */
228 if (!sigsetjmp(old_env
, 0)) {
229 start_switch_fiber_asan(COROUTINE_YIELD
, &fake_stack_save
, co
->stack
,
231 start_switch_fiber_tsan(&fake_stack_save
,
232 co
, false); /* false=not caller */
234 #ifdef CONFIG_SAFESTACK
236 * Before we swap the context, set the new unsafe stack
237 * The unsafe stack grows just like the normal stack, so start from
238 * the last usable location of the memory area.
239 * NOTE: we don't have to re-set the usp afterwards because we are
240 * coming back to this context through a siglongjmp.
241 * The compiler already wrapped the corresponding sigsetjmp call with
242 * code that saves the usp on the (safe) stack before the call, and
243 * restores it right after (which is where we return with siglongjmp).
245 void *usp
= co
->unsafe_stack
+ co
->unsafe_stack_size
;
246 __safestack_unsafe_stack_ptr
= usp
;
249 swapcontext(&old_uc
, &uc
);
252 finish_switch_fiber(fake_stack_save
);
257 #ifdef CONFIG_VALGRIND_H
258 /* Work around an unused variable in the valgrind.h macro... */
259 #if !defined(__clang__)
260 #pragma GCC diagnostic push
261 #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
263 static inline void valgrind_stack_deregister(CoroutineUContext
*co
)
265 VALGRIND_STACK_DEREGISTER(co
->valgrind_stack_id
);
267 #if !defined(__clang__)
268 #pragma GCC diagnostic pop
272 void qemu_coroutine_delete(Coroutine
*co_
)
274 CoroutineUContext
*co
= DO_UPCAST(CoroutineUContext
, base
, co_
);
276 #ifdef CONFIG_VALGRIND_H
277 valgrind_stack_deregister(co
);
280 qemu_free_stack(co
->stack
, co
->stack_size
);
281 #ifdef CONFIG_SAFESTACK
282 qemu_free_stack(co
->unsafe_stack
, co
->unsafe_stack_size
);
287 /* This function is marked noinline to prevent GCC from inlining it
288 * into coroutine_trampoline(). If we allow it to do that then it
289 * hoists the code to get the address of the TLS variable "current"
290 * out of the while() loop. This is an invalid transformation because
291 * the sigsetjmp() call may be called when running thread A but
292 * return in thread B, and so we might be in a different thread
293 * context each time round the loop.
295 CoroutineAction
__attribute__((noinline
))
296 qemu_coroutine_switch(Coroutine
*from_
, Coroutine
*to_
,
297 CoroutineAction action
)
299 CoroutineUContext
*from
= DO_UPCAST(CoroutineUContext
, base
, from_
);
300 CoroutineUContext
*to
= DO_UPCAST(CoroutineUContext
, base
, to_
);
302 void *fake_stack_save
= NULL
;
306 ret
= sigsetjmp(from
->env
, 0);
308 start_switch_fiber_asan(action
, &fake_stack_save
, to
->stack
,
310 start_switch_fiber_tsan(&fake_stack_save
,
311 to
, false); /* false=not caller */
312 siglongjmp(to
->env
, action
);
315 finish_switch_fiber(fake_stack_save
);
320 Coroutine
*qemu_coroutine_self(void)
322 Coroutine
*self
= get_current();
323 CoroutineUContext
*leaderp
= get_ptr_leader();
326 self
= &leaderp
->base
;
330 if (!leaderp
->tsan_co_fiber
) {
331 leaderp
->tsan_co_fiber
= __tsan_get_current_fiber();
337 bool qemu_in_coroutine(void)
339 Coroutine
*self
= get_current();
341 return self
&& self
->caller
;