2 * CPU thread main loop - common bits for user and system mode emulation
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "exec/cpu-common.h"
24 #include "sysemu/cpus.h"
26 static QemuMutex qemu_cpu_list_lock
;
27 static QemuCond exclusive_cond
;
28 static QemuCond exclusive_resume
;
29 static QemuCond qemu_work_cond
;
31 /* >= 1 if a thread is inside start_exclusive/end_exclusive. Written
32 * under qemu_cpu_list_lock, read with atomic operations.
34 static int pending_cpus
;
36 void qemu_init_cpu_list(void)
38 /* This is needed because qemu_init_cpu_list is also called by the
39 * child process in a fork. */
42 qemu_mutex_init(&qemu_cpu_list_lock
);
43 qemu_cond_init(&exclusive_cond
);
44 qemu_cond_init(&exclusive_resume
);
45 qemu_cond_init(&qemu_work_cond
);
48 void cpu_list_lock(void)
50 qemu_mutex_lock(&qemu_cpu_list_lock
);
53 void cpu_list_unlock(void)
55 qemu_mutex_unlock(&qemu_cpu_list_lock
);
58 static bool cpu_index_auto_assigned
;
60 static int cpu_get_free_index(void)
65 cpu_index_auto_assigned
= true;
66 CPU_FOREACH(some_cpu
) {
72 static void finish_safe_work(CPUState
*cpu
)
78 void cpu_list_add(CPUState
*cpu
)
80 qemu_mutex_lock(&qemu_cpu_list_lock
);
81 if (cpu
->cpu_index
== UNASSIGNED_CPU_INDEX
) {
82 cpu
->cpu_index
= cpu_get_free_index();
83 assert(cpu
->cpu_index
!= UNASSIGNED_CPU_INDEX
);
85 assert(!cpu_index_auto_assigned
);
87 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
88 qemu_mutex_unlock(&qemu_cpu_list_lock
);
90 finish_safe_work(cpu
);
93 void cpu_list_remove(CPUState
*cpu
)
95 qemu_mutex_lock(&qemu_cpu_list_lock
);
96 if (!QTAILQ_IN_USE(cpu
, node
)) {
97 /* there is nothing to undo since cpu_exec_init() hasn't been called */
98 qemu_mutex_unlock(&qemu_cpu_list_lock
);
102 assert(!(cpu_index_auto_assigned
&& cpu
!= QTAILQ_LAST(&cpus
, CPUTailQ
)));
104 QTAILQ_REMOVE(&cpus
, cpu
, node
);
105 cpu
->cpu_index
= UNASSIGNED_CPU_INDEX
;
106 qemu_mutex_unlock(&qemu_cpu_list_lock
);
109 struct qemu_work_item
{
110 struct qemu_work_item
*next
;
111 run_on_cpu_func func
;
112 run_on_cpu_data data
;
113 bool free
, exclusive
, done
;
116 static void queue_work_on_cpu(CPUState
*cpu
, struct qemu_work_item
*wi
)
118 qemu_mutex_lock(&cpu
->work_mutex
);
119 if (cpu
->queued_work_first
== NULL
) {
120 cpu
->queued_work_first
= wi
;
122 cpu
->queued_work_last
->next
= wi
;
124 cpu
->queued_work_last
= wi
;
127 qemu_mutex_unlock(&cpu
->work_mutex
);
132 void do_run_on_cpu(CPUState
*cpu
, run_on_cpu_func func
, run_on_cpu_data data
,
135 struct qemu_work_item wi
;
137 if (qemu_cpu_is_self(cpu
)) {
146 wi
.exclusive
= false;
148 queue_work_on_cpu(cpu
, &wi
);
149 while (!atomic_mb_read(&wi
.done
)) {
150 CPUState
*self_cpu
= current_cpu
;
152 qemu_cond_wait(&qemu_work_cond
, mutex
);
153 current_cpu
= self_cpu
;
157 void async_run_on_cpu(CPUState
*cpu
, run_on_cpu_func func
, run_on_cpu_data data
)
159 struct qemu_work_item
*wi
;
161 wi
= g_malloc0(sizeof(struct qemu_work_item
));
166 queue_work_on_cpu(cpu
, wi
);
169 /* Wait for pending exclusive operations to complete. The CPU list lock
171 static inline void exclusive_idle(void)
173 while (pending_cpus
) {
174 qemu_cond_wait(&exclusive_resume
, &qemu_cpu_list_lock
);
178 /* Start an exclusive operation.
179 Must only be called from outside cpu_exec. */
180 void start_exclusive(void)
185 qemu_mutex_lock(&qemu_cpu_list_lock
);
188 /* Make all other cpus stop executing. */
189 atomic_set(&pending_cpus
, 1);
191 /* Write pending_cpus before reading other_cpu->running. */
194 CPU_FOREACH(other_cpu
) {
195 if (atomic_read(&other_cpu
->running
)) {
196 other_cpu
->has_waiter
= true;
198 qemu_cpu_kick(other_cpu
);
202 atomic_set(&pending_cpus
, running_cpus
+ 1);
203 while (pending_cpus
> 1) {
204 qemu_cond_wait(&exclusive_cond
, &qemu_cpu_list_lock
);
207 /* Can release mutex, no one will enter another exclusive
208 * section until end_exclusive resets pending_cpus to 0.
210 qemu_mutex_unlock(&qemu_cpu_list_lock
);
213 /* Finish an exclusive operation. */
214 void end_exclusive(void)
216 qemu_mutex_lock(&qemu_cpu_list_lock
);
217 atomic_set(&pending_cpus
, 0);
218 qemu_cond_broadcast(&exclusive_resume
);
219 qemu_mutex_unlock(&qemu_cpu_list_lock
);
222 /* Wait for exclusive ops to finish, and begin cpu execution. */
223 void cpu_exec_start(CPUState
*cpu
)
225 atomic_set(&cpu
->running
, true);
227 /* Write cpu->running before reading pending_cpus. */
230 /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
231 * After taking the lock we'll see cpu->has_waiter == true and run---not
232 * for long because start_exclusive kicked us. cpu_exec_end will
233 * decrement pending_cpus and signal the waiter.
235 * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
236 * This includes the case when an exclusive item is running now.
237 * Then we'll see cpu->has_waiter == false and wait for the item to
240 * 3. pending_cpus == 0. Then start_exclusive is definitely going to
241 * see cpu->running == true, and it will kick the CPU.
243 if (unlikely(atomic_read(&pending_cpus
))) {
244 qemu_mutex_lock(&qemu_cpu_list_lock
);
245 if (!cpu
->has_waiter
) {
246 /* Not counted in pending_cpus, let the exclusive item
247 * run. Since we have the lock, just set cpu->running to true
248 * while holding it; no need to check pending_cpus again.
250 atomic_set(&cpu
->running
, false);
252 /* Now pending_cpus is zero. */
253 atomic_set(&cpu
->running
, true);
255 /* Counted in pending_cpus, go ahead and release the
256 * waiter at cpu_exec_end.
259 qemu_mutex_unlock(&qemu_cpu_list_lock
);
263 /* Mark cpu as not executing, and release pending exclusive ops. */
264 void cpu_exec_end(CPUState
*cpu
)
266 atomic_set(&cpu
->running
, false);
268 /* Write cpu->running before reading pending_cpus. */
271 /* 1. start_exclusive saw cpu->running == true. Then it will increment
272 * pending_cpus and wait for exclusive_cond. After taking the lock
273 * we'll see cpu->has_waiter == true.
275 * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
276 * This includes the case when an exclusive item started after setting
277 * cpu->running to false and before we read pending_cpus. Then we'll see
278 * cpu->has_waiter == false and not touch pending_cpus. The next call to
279 * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
280 * for the item to complete.
282 * 3. pending_cpus == 0. Then start_exclusive is definitely going to
283 * see cpu->running == false, and it can ignore this CPU until the
284 * next cpu_exec_start.
286 if (unlikely(atomic_read(&pending_cpus
))) {
287 qemu_mutex_lock(&qemu_cpu_list_lock
);
288 if (cpu
->has_waiter
) {
289 cpu
->has_waiter
= false;
290 atomic_set(&pending_cpus
, pending_cpus
- 1);
291 if (pending_cpus
== 1) {
292 qemu_cond_signal(&exclusive_cond
);
295 qemu_mutex_unlock(&qemu_cpu_list_lock
);
299 void async_safe_run_on_cpu(CPUState
*cpu
, run_on_cpu_func func
,
300 run_on_cpu_data data
)
302 struct qemu_work_item
*wi
;
304 wi
= g_malloc0(sizeof(struct qemu_work_item
));
308 wi
->exclusive
= true;
310 queue_work_on_cpu(cpu
, wi
);
313 void process_queued_cpu_work(CPUState
*cpu
)
315 struct qemu_work_item
*wi
;
317 if (cpu
->queued_work_first
== NULL
) {
321 qemu_mutex_lock(&cpu
->work_mutex
);
322 while (cpu
->queued_work_first
!= NULL
) {
323 wi
= cpu
->queued_work_first
;
324 cpu
->queued_work_first
= wi
->next
;
325 if (!cpu
->queued_work_first
) {
326 cpu
->queued_work_last
= NULL
;
328 qemu_mutex_unlock(&cpu
->work_mutex
);
330 /* Running work items outside the BQL avoids the following deadlock:
331 * 1) start_exclusive() is called with the BQL taken while another
332 * CPU is running; 2) cpu_exec in the other CPU tries to takes the
333 * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
334 * neither CPU can proceed.
336 qemu_mutex_unlock_iothread();
338 wi
->func(cpu
, wi
->data
);
340 qemu_mutex_lock_iothread();
342 wi
->func(cpu
, wi
->data
);
344 qemu_mutex_lock(&cpu
->work_mutex
);
348 atomic_mb_set(&wi
->done
, true);
351 qemu_mutex_unlock(&cpu
->work_mutex
);
352 qemu_cond_broadcast(&qemu_work_cond
);