Remove solved issue
[maemo-rb.git] / firmware / export / thread.h
blob0db849ab0a0fc38d89978a434e90bfba22e72b53
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #ifndef THREAD_H
22 #define THREAD_H
24 #include "config.h"
25 #include <inttypes.h>
26 #include <stddef.h>
27 #include <stdbool.h>
28 #include "gcc_extensions.h"
30 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
31 * by giving high priority threads more CPU time than lower priority threads
32 * when they need it. Priority is differential such that the priority
33 * difference between a lower priority runnable thread and the highest priority
34 * runnable thread determines the amount of aging necessary for the lower
35 * priority thread to be scheduled in order to prevent starvation.
37 * If software playback codec pcm buffer is going down to critical, codec
38 * can gradually raise its own priority to override user interface and
39 * prevent playback skipping.
41 #define PRIORITY_RESERVED_HIGH 0 /* Reserved */
42 #define PRIORITY_RESERVED_LOW 32 /* Reserved */
43 #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
44 #define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
45 /* Realtime range reserved for threads that will not allow threads of lower
46 * priority to age and run (future expansion) */
47 #define PRIORITY_REALTIME_1 1
48 #define PRIORITY_REALTIME_2 2
49 #define PRIORITY_REALTIME_3 3
50 #define PRIORITY_REALTIME_4 4
51 #define PRIORITY_REALTIME 4 /* Lowest realtime range */
52 #define PRIORITY_BUFFERING 15 /* Codec buffering thread */
53 #define PRIORITY_USER_INTERFACE 16 /* The main thread */
54 #define PRIORITY_RECORDING 16 /* Recording thread */
55 #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
56 #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
57 #define PRIORITY_SYSTEM 18 /* All other firmware threads */
58 #define PRIORITY_BACKGROUND 20 /* Normal application threads */
59 #define NUM_PRIORITIES 32
60 #define PRIORITY_IDLE 32 /* Priority representative of no tasks */
62 #define IO_PRIORITY_IMMEDIATE 0
63 #define IO_PRIORITY_BACKGROUND 32
65 #if CONFIG_CODEC == SWCODEC
67 #ifdef HAVE_RECORDING
68 #define BASETHREADS 17
69 #else
70 #define BASETHREADS 16
71 #endif
73 #else
74 #define BASETHREADS 11
75 #endif /* CONFIG_CODE == * */
77 #ifndef TARGET_EXTRA_THREADS
78 #define TARGET_EXTRA_THREADS 0
79 #endif
81 #define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
84 * We need more stack when we run under a host
85 * maybe more expensive C lib functions?
87 * simulator (possibly) doesn't simulate stack usage anyway but well ... */
88 #ifdef HAVE_SIGALTSTACK_THREADS
89 #include <signal.h>
90 /* MINSIGSTKSZ for the OS to deliver the signal + 0x3000 for us */
91 #define DEFAULT_STACK_SIZE (MINSIGSTKSZ+0x3000) /* Bytes */
92 #elif (CONFIG_PLATFORM & PLATFORM_ANDROID) || defined(HAVE_WIN32_FIBER_THREADS)
93 #define DEFAULT_STACK_SIZE 0x1000 /* Bytes */
94 #else /* native threads, sdl threads */
95 #define DEFAULT_STACK_SIZE 0x400 /* Bytes */
96 #endif
99 #if defined(ASSEMBLER_THREADS)
100 /* Need to keep structures inside the header file because debug_menu
101 * needs them. */
102 #ifdef CPU_COLDFIRE
103 struct regs
105 uint32_t macsr; /* 0 - EMAC status register */
106 uint32_t d[6]; /* 4-24 - d2-d7 */
107 uint32_t a[5]; /* 28-44 - a2-a6 */
108 uint32_t sp; /* 48 - Stack pointer (a7) */
109 uint32_t start; /* 52 - Thread start address, or NULL when started */
111 #elif CONFIG_CPU == SH7034
112 struct regs
114 uint32_t r[7]; /* 0-24 - Registers r8 thru r14 */
115 uint32_t sp; /* 28 - Stack pointer (r15) */
116 uint32_t pr; /* 32 - Procedure register */
117 uint32_t start; /* 36 - Thread start address, or NULL when started */
119 #elif defined(CPU_ARM)
120 struct regs
122 uint32_t r[8]; /* 0-28 - Registers r4-r11 */
123 uint32_t sp; /* 32 - Stack pointer (r13) */
124 uint32_t lr; /* 36 - r14 (lr) */
125 uint32_t start; /* 40 - Thread start address, or NULL when started */
128 #elif defined(CPU_MIPS)
129 struct regs
131 uint32_t r[9]; /* 0-32 - Registers s0-s7, fp */
132 uint32_t sp; /* 36 - Stack pointer */
133 uint32_t ra; /* 40 - Return address */
134 uint32_t start; /* 44 - Thread start address, or NULL when started */
136 #endif /* CONFIG_CPU */
137 #elif (CONFIG_PLATFORM & PLATFORM_HOSTED) || defined(__PCTOOL__)
138 #ifndef HAVE_SDL_THREADS
139 struct regs
141 void (*start)(void); /* thread's entry point, or NULL when started */
142 void* uc; /* host thread handle */
143 uintptr_t sp; /* Stack pointer, unused */
144 size_t stack_size; /* stack size, not always used */
145 uintptr_t stack; /* pointer to start of the stack buffer */
147 #else /* SDL threads */
148 struct regs
150 void *t; /* OS thread */
151 void *told; /* Last thread in slot (explained in thead-sdl.c) */
152 void *s; /* Semaphore for blocking and wakeup */
153 void (*start)(void); /* Start function */
155 #endif
156 #endif /* PLATFORM_NATIVE */
158 #ifdef CPU_PP
159 #ifdef HAVE_CORELOCK_OBJECT
160 /* No reliable atomic instruction available - use Peterson's algorithm */
161 struct corelock
163 volatile unsigned char myl[NUM_CORES];
164 volatile unsigned char turn;
165 } __attribute__((packed));
167 /* Too big to inline everywhere */
168 void corelock_init(struct corelock *cl);
169 void corelock_lock(struct corelock *cl);
170 int corelock_try_lock(struct corelock *cl);
171 void corelock_unlock(struct corelock *cl);
172 #endif /* HAVE_CORELOCK_OBJECT */
173 #endif /* CPU_PP */
175 /* NOTE: The use of the word "queue" may also refer to a linked list of
176 threads being maintained that are normally dealt with in FIFO order
177 and not necessarily kernel event_queue */
178 enum
180 /* States without a timeout must be first */
181 STATE_KILLED = 0, /* Thread is killed (default) */
182 STATE_RUNNING, /* Thread is currently running */
183 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
184 /* These states involve adding the thread to the tmo list */
185 STATE_SLEEPING, /* Thread is sleeping with a timeout */
186 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
187 /* Miscellaneous states */
188 STATE_FROZEN, /* Thread is suspended and will not run until
189 thread_thaw is called with its ID */
190 THREAD_NUM_STATES,
191 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
194 #if NUM_CORES > 1
195 /* Pointer value for name field to indicate thread is being killed. Using
196 * an alternate STATE_* won't work since that would interfere with operation
197 * while the thread is still running. */
198 #define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
199 #endif
201 /* Link information for lists thread is in */
202 struct thread_entry; /* forward */
203 struct thread_list
205 struct thread_entry *prev; /* Previous thread in a list */
206 struct thread_entry *next; /* Next thread in a list */
209 #ifndef HAVE_CORELOCK_OBJECT
210 /* No atomic corelock op needed or just none defined */
211 #define corelock_init(cl)
212 #define corelock_lock(cl)
213 #define corelock_try_lock(cl)
214 #define corelock_unlock(cl)
215 #endif /* HAVE_CORELOCK_OBJECT */
217 #ifdef HAVE_PRIORITY_SCHEDULING
218 struct blocker
220 struct thread_entry * volatile thread; /* thread blocking other threads
221 (aka. object owner) */
222 int priority; /* highest priority waiter */
223 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
226 /* Choices of wakeup protocol */
228 /* For transfer of object ownership by one thread to another thread by
229 * the owning thread itself (mutexes) */
230 struct thread_entry *
231 wakeup_priority_protocol_transfer(struct thread_entry *thread);
233 /* For release by owner where ownership doesn't change - other threads,
234 * interrupts, timeouts, etc. (mutex timeout, queues) */
235 struct thread_entry *
236 wakeup_priority_protocol_release(struct thread_entry *thread);
239 struct priority_distribution
241 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
242 uint32_t mask; /* Bitmask of hist entries that are not zero */
245 #endif /* HAVE_PRIORITY_SCHEDULING */
247 /* Information kept in each thread slot
248 * members are arranged according to size - largest first - in order
249 * to ensure both alignment and packing at the same time.
251 struct thread_entry
253 struct regs context; /* Register context at switch -
254 _must_ be first member */
255 uintptr_t *stack; /* Pointer to top of stack */
256 const char *name; /* Thread name */
257 long tmo_tick; /* Tick when thread should be woken from
258 timeout -
259 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
260 struct thread_list l; /* Links for blocked/waking/running -
261 circular linkage in both directions */
262 struct thread_list tmo; /* Links for timeout list -
263 Circular in reverse direction, NULL-terminated in
264 forward direction -
265 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
266 struct thread_entry **bqp; /* Pointer to list variable in kernel
267 object where thread is blocked - used
268 for implicit unblock and explicit wake
269 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
270 #ifdef HAVE_CORELOCK_OBJECT
271 struct corelock *obj_cl; /* Object corelock where thead is blocked -
272 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
273 struct corelock waiter_cl; /* Corelock for thread_wait */
274 struct corelock slot_cl; /* Corelock to lock thread slot */
275 unsigned char core; /* The core to which thread belongs */
276 #endif
277 struct thread_entry *queue; /* List of threads waiting for thread to be
278 removed */
279 #ifdef HAVE_WAKEUP_EXT_CB
280 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
281 performs special steps needed when being
282 forced off of an object's wait queue that
283 go beyond the standard wait queue removal
284 and priority disinheritance */
285 /* Only enabled when using queue_send for now */
286 #endif
287 #if defined(HAVE_SEMAPHORE_OBJECTS) || \
288 defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
289 NUM_CORES > 1
290 volatile intptr_t retval; /* Return value from a blocked operation/
291 misc. use */
292 #endif
293 #ifdef HAVE_PRIORITY_SCHEDULING
294 /* Priority summary of owned objects that support inheritance */
295 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
296 on an object that supports PIP -
297 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
298 struct priority_distribution pdist; /* Priority summary of owned objects
299 that have blocked threads and thread's own
300 base priority */
301 int skip_count; /* Number of times skipped if higher priority
302 thread was running */
303 unsigned char base_priority; /* Base priority (set explicitly during
304 creation or thread_set_priority) */
305 unsigned char priority; /* Scheduled priority (higher of base or
306 all threads blocked by this one) */
307 #endif
308 uint16_t id; /* Current slot id */
309 unsigned short stack_size; /* Size of stack in bytes */
310 unsigned char state; /* Thread slot state (STATE_*) */
311 #ifdef HAVE_SCHEDULER_BOOSTCTRL
312 unsigned char cpu_boost; /* CPU frequency boost flag */
313 #endif
314 #ifdef HAVE_IO_PRIORITY
315 unsigned char io_priority;
316 #endif
319 /*** Macros for internal use ***/
320 /* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
321 #define THREAD_ID_VERSION_SHIFT 8
322 #define THREAD_ID_VERSION_MASK 0xff00
323 #define THREAD_ID_SLOT_MASK 0x00ff
324 #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
326 /* Specify current thread in a function taking an ID. */
327 #define THREAD_ID_CURRENT ((unsigned int)-1)
329 #ifdef HAVE_CORELOCK_OBJECT
330 /* Operations to be performed just before stopping a thread and starting
331 a new one if specified before calling switch_thread */
332 enum
334 TBOP_CLEAR = 0, /* No operation to do */
335 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
336 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
339 struct thread_blk_ops
341 struct corelock *cl_p; /* pointer to corelock */
342 unsigned char flags; /* TBOP_* flags */
344 #endif /* NUM_CORES > 1 */
346 /* Information kept for each core
347 * Members are arranged for the same reason as in thread_entry
349 struct core_entry
351 /* "Active" lists - core is constantly active on these and are never
352 locked and interrupts do not access them */
353 struct thread_entry *running; /* threads that are running (RTR) */
354 struct thread_entry *timeout; /* threads that are on a timeout before
355 running again */
356 struct thread_entry *block_task; /* Task going off running list */
357 #ifdef HAVE_PRIORITY_SCHEDULING
358 struct priority_distribution rtr; /* Summary of running and ready-to-run
359 threads */
360 #endif
361 long next_tmo_check; /* soonest time to check tmo threads */
362 #ifdef HAVE_CORELOCK_OBJECT
363 struct thread_blk_ops blk_ops; /* operations to perform when
364 blocking a thread */
365 struct corelock rtr_cl; /* Lock for rtr list */
366 #endif /* NUM_CORES */
369 #ifdef HAVE_PRIORITY_SCHEDULING
370 #define IF_PRIO(...) __VA_ARGS__
371 #define IFN_PRIO(...)
372 #else
373 #define IF_PRIO(...)
374 #define IFN_PRIO(...) __VA_ARGS__
375 #endif
377 void core_idle(void);
378 void core_wake(IF_COP_VOID(unsigned int core));
380 /* Initialize the scheduler */
381 void init_threads(void) INIT_ATTR;
383 /* Allocate a thread in the scheduler */
384 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
385 unsigned int create_thread(void (*function)(void),
386 void* stack, size_t stack_size,
387 unsigned flags, const char *name
388 IF_PRIO(, int priority)
389 IF_COP(, unsigned int core));
391 /* Set and clear the CPU frequency boost flag for the calling thread */
392 #ifdef HAVE_SCHEDULER_BOOSTCTRL
393 void trigger_cpu_boost(void);
394 void cancel_cpu_boost(void);
395 #else
396 #define trigger_cpu_boost() do { } while(0)
397 #define cancel_cpu_boost() do { } while(0)
398 #endif
399 /* Return thread entry from id */
400 struct thread_entry *thread_id_entry(unsigned int thread_id);
401 /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
402 * Has no effect on a thread not frozen. */
403 void thread_thaw(unsigned int thread_id);
404 /* Wait for a thread to exit */
405 void thread_wait(unsigned int thread_id);
406 /* Exit the current thread */
407 void thread_exit(void) NORETURN_ATTR;
408 #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
409 #define ALLOW_REMOVE_THREAD
410 /* Remove a thread from the scheduler */
411 void remove_thread(unsigned int thread_id);
412 #endif
414 /* Switch to next runnable thread */
415 void switch_thread(void);
416 /* Blocks a thread for at least the specified number of ticks (0 = wait until
417 * next tick) */
418 void sleep_thread(int ticks);
419 /* Indefinitely blocks the current thread on a thread queue */
420 void block_thread(struct thread_entry *current);
421 /* Blocks the current thread on a thread queue until explicitely woken or
422 * the timeout is reached */
423 void block_thread_w_tmo(struct thread_entry *current, int timeout);
425 /* Return bit flags for thread wakeup */
426 #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
427 #define THREAD_OK 0x1 /* A thread was woken up */
428 #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
429 higher priority than current were woken) */
431 /* A convenience function for waking an entire queue of threads. */
432 unsigned int thread_queue_wake(struct thread_entry **list);
434 /* Wakeup a thread at the head of a list */
435 unsigned int wakeup_thread(struct thread_entry **list);
437 #ifdef HAVE_PRIORITY_SCHEDULING
438 int thread_set_priority(unsigned int thread_id, int priority);
439 int thread_get_priority(unsigned int thread_id);
440 #endif /* HAVE_PRIORITY_SCHEDULING */
441 #ifdef HAVE_IO_PRIORITY
442 void thread_set_io_priority(unsigned int thread_id, int io_priority);
443 int thread_get_io_priority(unsigned int thread_id);
444 #endif /* HAVE_IO_PRIORITY */
445 #if NUM_CORES > 1
446 unsigned int switch_core(unsigned int new_core);
447 #endif
448 unsigned int thread_get_current(void);
450 /* Debugging info - only! */
451 int thread_stack_usage(const struct thread_entry *thread);
452 #if NUM_CORES > 1
453 int idle_stack_usage(unsigned int core);
454 #endif
455 void thread_get_name(char *buffer, int size,
456 struct thread_entry *thread);
457 #ifdef RB_PROFILE
458 void profile_thread(void);
459 #endif
461 #endif /* THREAD_H */