1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2006 Dan Everton
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
25 #include <SDL_thread.h>
27 #include <string.h> /* memset() */
29 #include "system-sdl.h"
30 #include "thread-sdl.h"
36 /* Define this as 1 to show informational messages that are not errors. */
37 #define THREAD_SDL_DEBUGF_ENABLED 0
39 #if THREAD_SDL_DEBUGF_ENABLED
40 #define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__)
41 static char __name
[32];
42 #define THREAD_SDL_GET_NAME(thread) \
43 ({ thread_get_name(__name, ARRAYLEN(__name), thread); __name; })
45 #define THREAD_SDL_DEBUGF(...)
46 #define THREAD_SDL_GET_NAME(thread)
49 #define THREAD_PANICF(str...) \
50 ({ fprintf(stderr, str); exit(-1); })
52 /* Thread/core entries as in rockbox core */
53 static struct core_entry cores
[NUM_CORES
];
54 struct thread_entry threads
[MAXTHREADS
];
55 /* Jump buffers for graceful exit - kernel threads don't stay neatly
56 * in their start routines responding to messages so this is the only
57 * way to get them back in there so they may exit */
58 static jmp_buf thread_jmpbufs
[MAXTHREADS
];
59 /* this mutex locks out other Rockbox threads while one runs,
60 * that enables us to simulate a cooperative environment even if
61 * the host is preemptive */
63 static volatile bool threads_exit
= false;
65 extern long start_tick
;
67 void sim_do_exit(SDL_mutex
*m
);
69 void sim_thread_shutdown(void)
73 /* This *has* to be a push operation from a thread not in the pool
74 so that they may be dislodged from their blocking calls. */
76 /* Tell all threads jump back to their start routines, unlock and exit
77 gracefully - we'll check each one in turn for it's status. Threads
78 _could_ terminate via remove_thread or multiple threads could exit
79 on each unlock but that is safe. */
81 /* Do this before trying to acquire lock */
87 /* Signal all threads on delay or block */
88 for (i
= 0; i
< MAXTHREADS
; i
++)
90 struct thread_entry
*thread
= &threads
[i
];
91 if (thread
->context
.s
== NULL
)
93 SDL_SemPost(thread
->context
.s
);
96 /* Wait for all threads to finish and cleanup old ones. */
97 for (i
= 0; i
< MAXTHREADS
; i
++)
99 struct thread_entry
*thread
= &threads
[i
];
100 SDL_Thread
*t
= thread
->context
.t
;
105 /* Wait for it to finish */
106 SDL_WaitThread(t
, NULL
);
107 /* Relock for next thread signal */
109 /* Already waited and exiting thread would have waited .told,
110 * replacing it with t. */
111 thread
->context
.told
= NULL
;
115 /* Wait on any previous thread in this location-- could be one not quite
116 * finished exiting but has just unlocked the mutex. If it's NULL, the
117 * call returns immediately.
119 * See remove_thread below for more information. */
120 SDL_WaitThread(thread
->context
.told
, NULL
);
127 static void new_thread_id(unsigned int slot_num
,
128 struct thread_entry
*thread
)
130 unsigned int version
=
131 (thread
->id
+ (1u << THREAD_ID_VERSION_SHIFT
))
132 & THREAD_ID_VERSION_MASK
;
135 version
= 1u << THREAD_ID_VERSION_SHIFT
;
137 thread
->id
= version
| (slot_num
& THREAD_ID_SLOT_MASK
);
140 static struct thread_entry
* find_empty_thread_slot(void)
142 struct thread_entry
*thread
= NULL
;
145 for (n
= 0; n
< MAXTHREADS
; n
++)
147 int state
= threads
[n
].state
;
149 if (state
== STATE_KILLED
)
151 thread
= &threads
[n
];
160 /* Initialize SDL threading */
161 void init_threads(void)
163 struct thread_entry
*thread
;
166 memset(cores
, 0, sizeof(cores
));
167 memset(threads
, 0, sizeof(threads
));
169 m
= SDL_CreateMutex();
171 if (SDL_LockMutex(m
) == -1)
173 fprintf(stderr
, "Couldn't lock mutex\n");
177 /* Initialize all IDs */
178 for (n
= 0; n
< MAXTHREADS
; n
++)
179 threads
[n
].id
= THREAD_ID_INIT(n
);
181 /* Slot 0 is reserved for the main thread - initialize it here and
182 then create the SDL thread - it is possible to have a quick, early
183 shutdown try to access the structure. */
184 thread
= &threads
[0];
185 thread
->stack
= (uintptr_t *)" ";
186 thread
->stack_size
= 8;
187 thread
->name
= "main";
188 thread
->state
= STATE_RUNNING
;
189 thread
->context
.s
= SDL_CreateSemaphore(0);
190 thread
->context
.t
= NULL
; /* NULL for the implicit main thread */
191 cores
[CURRENT_CORE
].running
= thread
;
193 if (thread
->context
.s
== NULL
)
195 fprintf(stderr
, "Failed to create main semaphore\n");
199 /* Tell all threads jump back to their start routines, unlock and exit
200 gracefully - we'll check each one in turn for it's status. Threads
201 _could_ terminate via remove_thread or multiple threads could exit
202 on each unlock but that is safe. */
204 /* Setup jump for exit */
205 if (setjmp(thread_jmpbufs
[0]) == 0)
207 THREAD_SDL_DEBUGF("Main thread: %p\n", thread
);
217 void sim_thread_exception_wait(void)
227 /* A way to yield and leave the threading system for extended periods */
228 void sim_thread_lock(void *me
)
231 cores
[CURRENT_CORE
].running
= (struct thread_entry
*)me
;
237 void * sim_thread_unlock(void)
239 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
244 struct thread_entry
* thread_id_entry(unsigned int thread_id
)
246 return (thread_id
== THREAD_ID_CURRENT
) ?
247 cores
[CURRENT_CORE
].running
:
248 &threads
[thread_id
& THREAD_ID_SLOT_MASK
];
251 static void add_to_list_l(struct thread_entry
**list
,
252 struct thread_entry
*thread
)
256 /* Insert into unoccupied list */
257 thread
->l
.next
= thread
;
258 thread
->l
.prev
= thread
;
264 thread
->l
.next
= *list
;
265 thread
->l
.prev
= (*list
)->l
.prev
;
266 thread
->l
.prev
->l
.next
= thread
;
267 (*list
)->l
.prev
= thread
;
271 static void remove_from_list_l(struct thread_entry
**list
,
272 struct thread_entry
*thread
)
274 if (thread
== thread
->l
.next
)
283 /* List becomes next item */
284 *list
= thread
->l
.next
;
287 /* Fix links to jump over the removed entry. */
288 thread
->l
.prev
->l
.next
= thread
->l
.next
;
289 thread
->l
.next
->l
.prev
= thread
->l
.prev
;
292 unsigned int thread_get_current(void)
294 return cores
[CURRENT_CORE
].running
->id
;
297 void switch_thread(void)
299 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
303 switch (current
->state
)
308 /* Any other thread waiting already will get it first */
311 } /* STATE_RUNNING: */
318 SDL_SemWait(current
->context
.s
);
321 oldlevel
= disable_irq_save();
322 current
->state
= STATE_RUNNING
;
323 restore_irq(oldlevel
);
325 } /* STATE_BLOCKED: */
327 case STATE_BLOCKED_W_TMO
:
329 int result
, oldlevel
;
332 result
= SDL_SemWaitTimeout(current
->context
.s
, current
->tmo_tick
);
335 oldlevel
= disable_irq_save();
337 if (current
->state
== STATE_BLOCKED_W_TMO
)
340 remove_from_list_l(current
->bqp
, current
);
342 #ifdef HAVE_WAKEUP_EXT_CB
343 if (current
->wakeup_ext_cb
!= NULL
)
344 current
->wakeup_ext_cb(current
);
346 current
->state
= STATE_RUNNING
;
349 if (result
== SDL_MUTEX_TIMEDOUT
)
351 /* Other signals from an explicit wake could have been made before
352 * arriving here if we timed out waiting for the semaphore. Make
353 * sure the count is reset. */
354 while (SDL_SemValue(current
->context
.s
) > 0)
355 SDL_SemTryWait(current
->context
.s
);
358 restore_irq(oldlevel
);
360 } /* STATE_BLOCKED_W_TMO: */
365 SDL_SemWaitTimeout(current
->context
.s
, current
->tmo_tick
);
367 current
->state
= STATE_RUNNING
;
369 } /* STATE_SLEEPING: */
372 cores
[CURRENT_CORE
].running
= current
;
378 void sleep_thread(int ticks
)
380 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
383 current
->state
= STATE_SLEEPING
;
385 rem
= (SDL_GetTicks() - start_tick
) % (1000/HZ
);
389 current
->tmo_tick
= (1000/HZ
) * ticks
+ ((1000/HZ
)-1) - rem
;
392 void block_thread(struct thread_entry
*current
)
394 current
->state
= STATE_BLOCKED
;
395 add_to_list_l(current
->bqp
, current
);
398 void block_thread_w_tmo(struct thread_entry
*current
, int ticks
)
400 current
->state
= STATE_BLOCKED_W_TMO
;
401 current
->tmo_tick
= (1000/HZ
)*ticks
;
402 add_to_list_l(current
->bqp
, current
);
405 unsigned int wakeup_thread(struct thread_entry
**list
)
407 struct thread_entry
*thread
= *list
;
411 switch (thread
->state
)
414 case STATE_BLOCKED_W_TMO
:
415 remove_from_list_l(list
, thread
);
416 thread
->state
= STATE_RUNNING
;
417 SDL_SemPost(thread
->context
.s
);
425 unsigned int thread_queue_wake(struct thread_entry
**list
)
427 unsigned int result
= THREAD_NONE
;
431 unsigned int rc
= wakeup_thread(list
);
433 if (rc
== THREAD_NONE
)
442 void thread_thaw(unsigned int thread_id
)
444 struct thread_entry
*thread
= thread_id_entry(thread_id
);
446 if (thread
->id
== thread_id
&& thread
->state
== STATE_FROZEN
)
448 thread
->state
= STATE_RUNNING
;
449 SDL_SemPost(thread
->context
.s
);
453 int runthread(void *data
)
455 struct thread_entry
*current
;
456 jmp_buf *current_jmpbuf
;
458 /* Cannot access thread variables before locking the mutex as the
459 data structures may not be filled-in yet. */
461 cores
[CURRENT_CORE
].running
= (struct thread_entry
*)data
;
462 current
= cores
[CURRENT_CORE
].running
;
463 current_jmpbuf
= &thread_jmpbufs
[current
- threads
];
465 /* Setup jump for exit */
466 if (setjmp(*current_jmpbuf
) == 0)
468 /* Run the thread routine */
469 if (current
->state
== STATE_FROZEN
)
472 SDL_SemWait(current
->context
.s
);
474 cores
[CURRENT_CORE
].running
= current
;
479 current
->context
.start();
480 THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n",
481 current
- threads
, THREAD_SDL_GET_NAME(current
));
482 /* Thread routine returned - suicide */
489 /* Unlock and exit */
496 unsigned int create_thread(void (*function
)(void),
497 void* stack
, size_t stack_size
,
498 unsigned flags
, const char *name
)
500 struct thread_entry
*thread
;
504 THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name
? name
: "");
506 thread
= find_empty_thread_slot();
509 DEBUGF("Failed to find thread slot\n");
513 s
= SDL_CreateSemaphore(0);
516 DEBUGF("Failed to create semaphore\n");
520 t
= SDL_CreateThread(runthread
, thread
);
523 DEBUGF("Failed to create SDL thread\n");
524 SDL_DestroySemaphore(s
);
528 thread
->stack
= stack
;
529 thread
->stack_size
= stack_size
;
531 thread
->state
= (flags
& CREATE_THREAD_FROZEN
) ?
532 STATE_FROZEN
: STATE_RUNNING
;
533 thread
->context
.start
= function
;
534 thread
->context
.t
= t
;
535 thread
->context
.s
= s
;
537 THREAD_SDL_DEBUGF("New Thread: %d (%s)\n",
538 thread
- threads
, THREAD_SDL_GET_NAME(thread
));
543 #ifndef ALLOW_REMOVE_THREAD
544 static void remove_thread(unsigned int thread_id
)
546 void remove_thread(unsigned int thread_id
)
549 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
550 struct thread_entry
*thread
= thread_id_entry(thread_id
);
555 if (thread_id
!= THREAD_ID_CURRENT
&& thread
->id
!= thread_id
)
558 int oldlevel
= disable_irq_save();
560 t
= thread
->context
.t
;
561 s
= thread
->context
.s
;
563 /* Wait the last thread here and keep this one or SDL will leak it since
564 * it doesn't free its own library allocations unless a wait is performed.
565 * Such behavior guards against the memory being invalid by the time
566 * SDL_WaitThread is reached and also against two different threads having
567 * the same pointer. It also makes SDL_WaitThread a non-concurrent function.
569 * However, see more below about SDL_KillThread.
571 SDL_WaitThread(thread
->context
.told
, NULL
);
573 thread
->context
.t
= NULL
;
574 thread
->context
.s
= NULL
;
575 thread
->context
.told
= t
;
577 if (thread
!= current
)
579 switch (thread
->state
)
582 case STATE_BLOCKED_W_TMO
:
583 /* Remove thread from object it's waiting on */
584 remove_from_list_l(thread
->bqp
, thread
);
586 #ifdef HAVE_WAKEUP_EXT_CB
587 if (thread
->wakeup_ext_cb
!= NULL
)
588 thread
->wakeup_ext_cb(thread
);
596 THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n",
597 thread
- threads
, THREAD_SDL_GET_NAME(thread
));
599 new_thread_id(thread
->id
, thread
);
600 thread
->state
= STATE_KILLED
;
601 thread_queue_wake(&thread
->queue
);
603 SDL_DestroySemaphore(s
);
605 if (thread
== current
)
607 /* Do a graceful exit - perform the longjmp back into the thread
608 function to return */
609 restore_irq(oldlevel
);
610 longjmp(thread_jmpbufs
[current
- threads
], 1);
613 /* SDL_KillThread frees the old pointer too because it uses SDL_WaitThread
614 * to wait for the host to remove it. */
615 thread
->context
.told
= NULL
;
617 restore_irq(oldlevel
);
620 void thread_exit(void)
622 remove_thread(THREAD_ID_CURRENT
);
625 void thread_wait(unsigned int thread_id
)
627 struct thread_entry
*current
= cores
[CURRENT_CORE
].running
;
628 struct thread_entry
*thread
= thread_id_entry(thread_id
);
630 if (thread_id
== THREAD_ID_CURRENT
||
631 (thread
->id
== thread_id
&& thread
->state
!= STATE_KILLED
))
633 current
->bqp
= &thread
->queue
;
634 block_thread(current
);
639 int thread_stack_usage(const struct thread_entry
*thread
)
645 /* Return name if one or ID if none */
646 void thread_get_name(char *buffer
, int size
,
647 struct thread_entry
*thread
)
656 /* Display thread name if one or ID if none */
657 bool named
= thread
->name
&& *thread
->name
;
658 const char *fmt
= named
? "%s" : "%08lX";
659 intptr_t name
= named
?
660 (intptr_t)thread
->name
: (intptr_t)thread
;
661 snprintf(buffer
, size
, fmt
, name
);