Add target/hosted/unix/thread-pth.c, which is a working thread implementation
[kugel-rb.git] / firmware / target / hosted / unix / thread-pth.c
blob58601a1e062e6da41965bf2e46d486a76c11d4cb
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2006 Thomas Martitz
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
22 #include <stdbool.h>
23 #include <pth.h>
24 #include <stdlib.h>
25 #include <sys/time.h>
26 #include <memory.h>
27 #include <stdio.h>
28 #include "system.h"
29 #include "kernel.h"
30 #include "thread.h"
31 #include "thread-pth.h"
32 #include "debug.h"
33 #include <SDL.h> /* SDL_GetTicks() */
35 /* Define this as 1 to show informational messages that are not errors. */
36 #define HAVE_SIM_THREAD_DEBUG 0
38 #if HAVE_SIM_THREAD_DEBUG
40 #undef THREAD_EXTRA_CHECKS
41 #define THREAD_EXTRA_CHECKS /* always when debugging */
43 #define SIM_THREAD_DEBUGF(...) DEBUGF(__VA_ARGS__)
44 static char __name[32];
45 #define SIM_THREAD_GET_NAME(thread) \
46 ({ thread_get_name(__name, ARRAYLEN(__name), thread); __name; })
47 #else
48 #define SIM_THREAD_DEBUGF(...)
49 #define SIM_THREAD_GET_NAME(thread)
50 #endif
52 #ifdef WIN32
53 #define NEEDLE '\\'
54 #else
55 #define NEEDLE '/'
56 #endif
58 #define THREAD_PANICF(str...) \
59 do { \
60 fprintf(stderr, "%s:%d:%s(): ", strrchr(__FILE__,NEEDLE)+1, __LINE__, __func__); \
61 fprintf(stderr, str); \
62 exit(-1); \
63 } while (0)
66 /* Thread entries as in rockbox core */
67 struct thread_entry *current_thread; /* cache current cache */
68 struct thread_entry threads[MAXTHREADS];
70 extern long start_tick;
73 #ifdef HAVE_PRIORITY_SCHEDULING
74 /* map Rockbox prio to pth prios, LUT gives more freedom than
75 * calculative conversion; index is the corresponding rockbox thread level */
76 #if (PTH_PRIO_MAX != 5 || PTH_PRIO_MIN != -5 || PTH_PRIO_STD != 0)
77 #error Priority map out of sync
78 #endif
79 static int prio_map[] =
81 5, 5, 5, 5, 5, /* HIGHEST_PRIORITY, PRIORITY_REALTIME_* */
82 4, 4, 4, /* PRIORITY_PLAYBACK_MAX */
83 3, 3, 3,
84 2, 2, 2,
85 1, 1, /* PRIORITY_BUFFERING */
86 0, 0, /* PRIORITY_USER_INTERFACE, _RECORDING, _PLAYBACK */
87 -1, -1, /* PRIORITY_SYSTEM */
88 -2, -2, -2, /* PRIORITY_BACKGROUND */
89 -3, -3, -3,
90 -4, -4, -4,
91 -5, -5, -5, /* LOWEST_PRIORITY */
93 #endif
96 static int check_wakeup_cb(void* data)
98 struct thread_entry *t = (struct thread_entry*)data;
99 return (t->state == STATE_RUNNING);
102 void sim_thread_shutdown(void)
104 int i;
105 /* wait for all threads to exit gracefully - pth_exit() is called
106 * during pth_cancel()
107 * pth_exit() goes back in the call stack to return in thread's entry
108 * routine and then runthread() */
110 /* main thread is removed last */
111 for (i = 1; i < MAXTHREADS; i++)
113 struct thread_entry *thread = &threads[i];
114 if (thread != NULL)
116 pth_t t = thread->context.t;
117 if (t)
119 /* force thread runnable */
120 thread->state = STATE_RUNNING;
121 /* Wait for it to finish */
122 SIM_THREAD_DEBUGF("Waiting for (%s) to finish\n",
123 SIM_THREAD_GET_NAME(thread));
124 pth_cancel(t);
128 pth_cancel(threads[0].context.t);
131 static void new_thread_id(unsigned int slot_num,
132 struct thread_entry *thread)
134 unsigned int version =
135 (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
136 & THREAD_ID_VERSION_MASK;
138 if (version == 0)
139 version = 1u << THREAD_ID_VERSION_SHIFT;
141 thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
144 static struct thread_entry * find_empty_thread_slot(void)
146 struct thread_entry *thread = NULL;
147 int n;
149 for (n = 0; n < MAXTHREADS; n++)
151 int state = threads[n].state;
153 if (state == STATE_KILLED)
155 thread = &threads[n];
156 break;
160 return thread;
164 #if HAVE_SIM_THREAD_DEBUG
165 static void dump_threads(int sig)
167 if (sig != SIGSEGV)
168 pth_ctrl(PTH_CTRL_DUMPSTATE, stderr);
169 switch (sig)
171 case SIGABRT:
172 case SIGSEGV:
173 pth_kill();
174 default:
175 break;
178 #endif
181 void thread_wait(unsigned int id)
183 pth_event_t ev;
184 struct thread_entry *thread = thread_id_entry(id);
185 struct thread_entry *current = current_thread;
186 current->bqp = &thread->queue;
187 ev = pth_event(PTH_EVENT_TID | PTH_UNTIL_TID_DEAD, thread->context.t);
188 pth_wait(ev);
191 /* Initialize threading */
192 void init_threads(void)
194 int i;
195 struct thread_entry* thread;
197 memset(threads, 0, sizeof(threads));
198 /* Initialize all IDs */
199 for (i = 0; i < MAXTHREADS; i++)
200 threads[i].id = THREAD_ID_INIT(i);
202 if (pth_init() != true)
204 THREAD_PANICF("Failed to initialize pth threads\n");
205 return;
207 #if defined(HAVE_PRIORITY_SCHEDULING) && defined(THREAD_EXTRA_CHECKS)
208 /* sanity check */
209 if (ARRAYLEN(prio_map) != NUM_PRIORITIES)
210 THREAD_PANICF("Priority map out of sync\n");
211 #endif
213 #if HAVE_SIM_THREAD_DEBUG
214 signal(SIGABRT, dump_threads);
215 signal(SIGINT, dump_threads);
216 #endif
218 thread = &threads[0];
219 thread->stack = (uintptr_t *)" ";
220 thread->stack_size = 8;
221 thread->name = "main";
222 thread->state = STATE_RUNNING;
223 thread->context.t = NULL; /* NULL for the implicit main thread */
224 current_thread = thread;
226 SIM_THREAD_DEBUGF("Main thread: %p\n", thread_id_entry(i));
227 return;
230 void sim_thread_exception_wait(void)
232 while (1)
233 sleep(HZ/10);
236 /* not applicable under cooperative threading */
237 void sim_thread_lock(void *me)
239 (void)me;
242 void * sim_thread_unlock(void)
244 return current_thread;
247 struct thread_entry * thread_id_entry(unsigned int thread_id)
249 struct thread_entry *thread = (thread_id == THREAD_ID_CURRENT) ?
250 current_thread : &threads[thread_id & THREAD_ID_SLOT_MASK];
251 #ifdef THREAD_EXTRA_CHECKS
252 if (thread_id == THREAD_ID_CURRENT && thread->context.t != pth_self())
253 THREAD_PANICF("Current thread corrupted\n");
254 #endif
255 return thread;
258 static void add_to_list_l(struct thread_entry **list,
259 struct thread_entry *thread)
261 if (*list == NULL)
263 /* Insert into unoccupied list */
264 thread->l.next = thread;
265 thread->l.prev = thread;
266 *list = thread;
268 else
270 /* Insert last */
271 thread->l.next = *list;
272 thread->l.prev = (*list)->l.prev;
273 thread->l.prev->l.next = thread;
274 (*list)->l.prev = thread;
278 static void remove_from_list_l(struct thread_entry **list,
279 struct thread_entry *thread)
281 if (thread == thread->l.next)
283 /* The only item */
284 *list = NULL;
285 return;
288 if (thread == *list)
290 /* List becomes next item */
291 *list = thread->l.next;
294 /* Fix links to jump over the removed entry. */
295 thread->l.prev->l.next = thread->l.next;
296 thread->l.next->l.prev = thread->l.prev;
299 unsigned int thread_get_current(void)
301 return current_thread->id;
304 void switch_thread(void)
306 struct thread_entry *current = current_thread;
308 enable_irq();
310 switch (current->state)
312 case STATE_RUNNING:
314 /* let other threads run,
315 * pass NULL to let pth decide which thread to run next based */
316 pth_yield(NULL);
317 break;
318 } /* STATE_RUNNING: */
319 case STATE_BLOCKED:
321 int oldlevel;
322 pth_event_t ev = pth_event(PTH_EVENT_FUNC, check_wakeup_cb,
323 current, pth_time(0, 1000));
324 /* wait for something being posted on the event */
325 pth_wait(ev);
326 pth_event_free(ev, PTH_FREE_THIS);
327 oldlevel = disable_irq_save();
328 current->state = STATE_RUNNING;
329 restore_irq(oldlevel);
330 break;
331 } /* STATE_BLOCKED: */
332 case STATE_BLOCKED_W_TMO:
334 int oldlevel;
335 long usecs = current->tmo_tick;
336 long secs = usecs / 1000000;
337 /* create the message event as for BLOCKED */
338 pth_event_t msg_ev = pth_event(PTH_EVENT_FUNC, check_wakeup_cb,
339 current, pth_time(0, 1000));
340 /* add an additional timeout event */
341 pth_event_t time_ev = pth_event(PTH_EVENT_TIME, pth_timeout(secs,
342 usecs % 1000000));
344 /* wait for either event to fire first */
345 pth_event_t ring = pth_event_concat(msg_ev, time_ev, NULL);
347 pth_wait(ring);
348 pth_event_free(ring, PTH_FREE_ALL);
350 oldlevel = disable_irq_save();
352 if (current->state == STATE_BLOCKED_W_TMO)
354 /* Timed out */
355 remove_from_list_l(current->bqp, current);
357 #ifdef HAVE_WAKEUP_EXT_CB
358 if (current->wakeup_ext_cb != NULL)
359 current->wakeup_ext_cb(current);
360 #endif
361 current->state = STATE_RUNNING;
364 restore_irq(oldlevel);
365 break;
366 } /* STATE_BLOCKED_W_TMO: */
368 case STATE_SLEEPING:
370 pth_nap(pth_time(0, (unsigned)current->tmo_tick));
371 current->state = STATE_RUNNING;
372 break;
373 } /* STATE_SLEEPING: */
376 current_thread = current;
379 void sleep_thread(int ticks)
381 struct thread_entry *current = current_thread;
382 int rem;
384 current->state = STATE_SLEEPING;
386 /* add a reminder for more accurate sleeps */
387 rem = (SDL_GetTicks() - start_tick) % (1000/HZ);
388 if (rem < 0)
389 rem = 0;
390 /* tick to ms to us, then add remainder ms */
391 current->tmo_tick = (ticks*(1000/HZ) + ((1000/HZ)-1) - rem)*1000;
394 void block_thread(struct thread_entry *current)
396 current->state = STATE_BLOCKED;
397 add_to_list_l(current->bqp, current);
400 void block_thread_w_tmo(struct thread_entry *current, int ticks)
402 current->state = STATE_BLOCKED_W_TMO;
403 current->tmo_tick = ticks*(1000/HZ)*1000;
404 add_to_list_l(current->bqp, current);
407 unsigned int wakeup_thread(struct thread_entry **list)
409 struct thread_entry *thread = *list;
411 if (thread != NULL)
413 switch (thread->state)
415 case STATE_BLOCKED:
416 case STATE_BLOCKED_W_TMO:
418 unsigned retval = THREAD_OK;
419 #ifdef HAVE_PRIORITY_SCHEDULING
420 struct thread_entry *bl = thread->blocker ?
421 thread->blocker->thread:NULL;
422 if (thread->priority <= prio_map[PRIORITY_REALTIME] ||
423 (bl != NULL && bl->priority > thread->priority))
424 retval |= THREAD_SWITCH;
425 #endif
427 remove_from_list_l(list, thread);
428 thread->state = STATE_RUNNING;
429 return retval;
434 return THREAD_NONE;
437 unsigned int thread_queue_wake(struct thread_entry **list)
439 unsigned int result = THREAD_NONE;
441 for (;;)
443 unsigned int rc = wakeup_thread(list);
445 if (rc == THREAD_NONE)
446 break;
448 result |= rc;
451 return result;
454 void thread_thaw(unsigned int thread_id)
456 struct thread_entry *thread = thread_id_entry(thread_id);
458 if (thread->state == STATE_FROZEN)
460 thread->state = STATE_RUNNING;
461 /* wakeup */
462 pth_resume(thread->context.t);
467 * removes the thread entry struct for the scheduler */
468 static void thread_cleanup(void *data)
470 struct thread_entry *thread = (struct thread_entry*)data;
472 thread->context.t = NULL;
473 /* the slot can now be reassigned */
474 thread->state = STATE_KILLED;
477 #ifdef HAVE_PRIORITY_SCHEDULING
479 * stubs are sufficient for the next two, as pth handles priorities itself
481 * kernel.c pulls these in. fortunately only thread.c (which this file replaces)
482 * actually calls them so returning NULL should be ok */
483 struct thread_entry *wakeup_priority_protocol_transfer(struct thread_entry*t)
485 (void)t;
486 return NULL;
489 struct thread_entry *wakeup_priority_protocol_release(struct thread_entry*t)
491 (void)t;
492 return NULL;
495 int thread_set_priority(unsigned id, int prio)
497 struct thread_entry *thread;
498 pth_attr_t attr;
499 int old_level;
500 int old_base_prio = -1;
501 if (prio < HIGHEST_PRIORITY || prio > LOWEST_PRIORITY)
502 return -1;
505 thread = thread_id_entry(id);
506 if (thread == NULL)
507 return -2;
508 old_level = disable_irq_save();
510 attr = pth_attr_of(thread->context.t);
511 pth_attr_set(attr, PTH_ATTR_PRIO, prio_map[prio]);
512 old_base_prio = thread->base_priority;
513 /* keep the rockbox prio scheme for the outside */
514 thread->priority = thread->base_priority = prio;
516 restore_irq(old_level);
517 return old_base_prio;
522 * copied as is from firmware/thread.c */
523 int thread_get_priority(unsigned thread_id)
525 struct thread_entry *thread = thread_id_entry(thread_id);
526 int base_priority = thread->base_priority;
528 /* Simply check without locking slot. It may or may not be valid by the
529 * time the function returns anyway. If all tests pass, it is the
530 * correct value for when it was valid. */
531 if (thread_id != THREAD_ID_CURRENT &&
532 (thread->id != thread_id || thread->state == STATE_KILLED))
533 base_priority = -1;
535 return base_priority;
538 #endif
541 #ifdef HAVE_IO_PRIORITY
542 int thread_get_io_priority(unsigned int thread_id)
544 struct thread_entry *thread = thread_id_entry(thread_id);
545 return thread->io_priority;
548 void thread_set_io_priority(unsigned int thread_id,int io_priority)
550 struct thread_entry *thread = thread_id_entry(thread_id);
551 thread->io_priority = io_priority;
553 #endif
555 * Wrap entering the thread's entry point for easier exiting */
556 static void* runthread(void *thread)
558 struct thread_entry *current;
560 current = current_thread = thread;
561 current->context.start();
562 SIM_THREAD_DEBUGF("Thread Done: %ld (%s)\n",
563 current - threads, SIM_THREAD_GET_NAME(current));
564 /* Thread routine returned - terminate */
565 return NULL;
569 unsigned int create_thread(void (*function)(void),
570 void* stack, size_t stack_size,
571 unsigned flags, const char *name IF_PRIO(,int prio))
573 /* TODO: implement priority */
574 struct thread_entry *thread;
575 pth_t t;
576 pth_attr_t attr;
577 IF_PRIO(
578 int pth_prio = prio_map[prio];
581 * pth wants the stack on the heap, so ignore the most probably static stack
582 * buffer passed to us
583 * also, we seem to need much more stack on a pc */
584 stack_size = MAX(stack_size, 16u<<10);
585 stack = malloc(stack_size);
587 if (stack == NULL)
589 THREAD_PANICF("Out of memory\n", __func__);
590 return 0;
593 SIM_THREAD_DEBUGF("Creating thread: (%s)\n", name ? name : "");
595 thread = find_empty_thread_slot();
596 if (thread == NULL)
598 DEBUGF("Failed to find thread slot\n");
599 free(stack);
600 return 0;
603 attr = pth_attr_new();
604 pth_attr_set(attr, PTH_ATTR_NAME, name);
605 #ifdef HAVE_PRIORITY_SCHEDULING
606 pth_attr_set(attr, PTH_ATTR_PRIO, pth_prio);
607 #endif
608 /* explicitely set joinable we do need it for sane exiting */
609 pth_attr_set(attr, PTH_ATTR_JOINABLE, true);
610 pth_attr_set(attr, PTH_ATTR_STACK_ADDR, stack);
611 pth_attr_set(attr, PTH_ATTR_STACK_SIZE, stack_size);
612 /* we could make cancellation asynchronous so it happens immediately,
613 * but making the cancel request pending so that the thread is cancelled
614 * when the thread runs next is considered cleaner */
615 pth_attr_set(attr, PTH_ATTR_CANCEL_STATE,
616 PTH_CANCEL_ENABLE|PTH_CANCEL_ASYNCHRONOUS);
618 /* remember the thread for killing */
619 t = pth_spawn(attr, runthread, thread);
620 if (t == NULL)
622 DEBUGF("Failed to create thread\n");
623 free(stack);
624 return 0;
627 /* enable sane exiting */
628 pth_cleanup_push(thread_cleanup, thread);
630 thread->context.t = t;
631 thread->stack = stack; // pth wants stack on heap
632 thread->stack_size = stack_size;
633 thread->name = name;
634 thread->state = (flags & CREATE_THREAD_FROZEN) ?
635 STATE_FROZEN : STATE_RUNNING;
636 #ifdef HAVE_PRIORITY_SCHEDULING
637 thread->priority = thread->skip_count = thread->base_priority = prio;
638 thread->blocker = NULL;
639 #endif
641 /* sleep until thread_thaw() is called for it */
642 if (thread->state == STATE_FROZEN)
643 pth_suspend(thread->context.t);
645 thread->context.start = function;
647 SIM_THREAD_DEBUGF("New Thread: %ld (%s)\n",
648 thread->id, SIM_THREAD_GET_NAME(thread));
650 return thread->id;
654 #ifndef ALLOW_REMOVE_THREAD
655 static
656 #endif
657 void remove_thread(unsigned int thread_id)
659 struct thread_entry *current = current_thread;
660 struct thread_entry *thread = thread_id_entry(thread_id);
661 pth_t t;
663 if (thread_id != THREAD_ID_CURRENT && thread->id != thread_id)
664 return;
666 int oldlevel = disable_irq_save();
669 t = thread->context.t;
671 if (thread != current)
673 switch (thread->state)
675 case STATE_BLOCKED:
676 case STATE_BLOCKED_W_TMO:
677 /* Remove thread from object it's waiting on */
678 remove_from_list_l(thread->bqp, thread);
680 /* wake the thread up */
681 thread->state = STATE_RUNNING;
682 #ifdef HAVE_WAKEUP_EXT_CB
683 if (thread->wakeup_ext_cb != NULL)
684 thread->wakeup_ext_cb(thread);
685 #endif
686 break;
687 case STATE_SLEEPING:
688 /* let the thread sleep until it wakes up, for now */
689 switch_thread();
690 break;
694 SIM_THREAD_DEBUGF("Removing thread: %ld (%s)\n",
695 thread->id, SIM_THREAD_GET_NAME(thread));
697 new_thread_id(thread->id, thread);
698 thread->state = STATE_KILLED;
699 thread_queue_wake(&thread->queue);
701 if (thread == current)
703 /* Do a graceful exit */
704 restore_irq(oldlevel);
705 /* terminate the current thread - it works by returning in the
706 * threads entry function */
707 pth_exit(NULL);
709 else
711 pth_cancel(t);
712 restore_irq(oldlevel);
717 void thread_exit(void)
719 remove_thread(THREAD_ID_CURRENT);
723 int thread_stack_usage(const struct thread_entry *thread)
725 return 50;
726 (void)thread;
730 /* Return name if one or its address if none */
731 void thread_get_name(char *buffer, int size,
732 struct thread_entry *thread)
734 if (size <= 0)
735 return;
737 *buffer = '\0';
739 if (thread)
741 bool named = thread->name && *thread->name;
742 const char *fmt = named ? "%s" : "%08lX";
743 intptr_t name = named ?
744 (intptr_t)thread->name : (intptr_t)thread;
745 snprintf(buffer, size, fmt, name);