Save a few registers in assembler routines for iPod 5Gs LCD driver and YUV conversion.
[kugel-rb.git] / firmware / export / thread.h
blobbb1cb7cd17f093c57ab23f7cb125946b0678e0fa
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #ifndef THREAD_H
20 #define THREAD_H
22 #include "config.h"
23 #include <inttypes.h>
24 #include <stddef.h>
25 #include <stdbool.h>
27 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
28 * by giving high priority threads more CPU time than less priority threads
29 * when they need it. Priority is differential such that the priority
30 * difference between a lower priority runnable thread and the highest priority
31 * runnable thread determines the amount of aging nescessary for the lower
32 * priority thread to be scheduled in order to prevent starvation.
34 * If software playback codec pcm buffer is going down to critical, codec
35 * can gradually raise its own priority to override user interface and
36 * prevent playback skipping.
38 #define PRIORITY_RESERVED_HIGH 0 /* Reserved */
39 #define PRIORITY_RESERVED_LOW 32 /* Reserved */
40 #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
41 #define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
42 /* Realtime range reserved for threads that will not allow threads of lower
43 * priority to age and run (future expansion) */
44 #define PRIORITY_REALTIME_1 1
45 #define PRIORITY_REALTIME_2 2
46 #define PRIORITY_REALTIME_3 3
47 #define PRIORITY_REALTIME_4 4
48 #define PRIORITY_REALTIME 4 /* Lowest realtime range */
49 #define PRIORITY_USER_INTERFACE 16 /* The main thread */
50 #define PRIORITY_RECORDING 16 /* Recording thread */
51 #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
52 #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
53 #define PRIORITY_BUFFERING 16 /* Codec buffering thread */
54 #define PRIORITY_SYSTEM 18 /* All other firmware threads */
55 #define PRIORITY_BACKGROUND 20 /* Normal application threads */
56 #define NUM_PRIORITIES 32
57 #define PRIORITY_IDLE 32 /* Priority representative of no tasks */
59 /* TODO: Only a minor tweak to create_thread would be needed to let
60 * thread slots be caller allocated - no essential threading functionality
61 * depends upon an array */
62 #if CONFIG_CODEC == SWCODEC
64 #ifdef HAVE_RECORDING
65 #define MAXTHREADS 18
66 #else
67 #define MAXTHREADS 17
68 #endif
70 #else
71 #define MAXTHREADS 11
72 #endif /* CONFIG_CODE == * */
74 #define DEFAULT_STACK_SIZE 0x400 /* Bytes */
76 #ifndef SIMULATOR
77 /* Need to keep structures inside the header file because debug_menu
78 * needs them. */
79 #ifdef CPU_COLDFIRE
80 struct regs
82 uint32_t macsr; /* 0 - EMAC status register */
83 uint32_t d[6]; /* 4-24 - d2-d7 */
84 uint32_t a[5]; /* 28-44 - a2-a6 */
85 uint32_t sp; /* 48 - Stack pointer (a7) */
86 uint32_t start; /* 52 - Thread start address, or NULL when started */
88 #elif CONFIG_CPU == SH7034
89 struct regs
91 uint32_t r[7]; /* 0-24 - Registers r8 thru r14 */
92 uint32_t sp; /* 28 - Stack pointer (r15) */
93 uint32_t pr; /* 32 - Procedure register */
94 uint32_t start; /* 36 - Thread start address, or NULL when started */
96 #elif defined(CPU_ARM)
97 struct regs
99 uint32_t r[8]; /* 0-28 - Registers r4-r11 */
100 uint32_t sp; /* 32 - Stack pointer (r13) */
101 uint32_t lr; /* 36 - r14 (lr) */
102 uint32_t start; /* 40 - Thread start address, or NULL when started */
104 #endif /* CONFIG_CPU */
105 #else
106 struct regs
108 void *t; /* Simulator OS thread */
109 void *s; /* Semaphore for blocking and wakeup */
110 void (*start)(void); /* Start function */
112 #endif /* !SIMULATOR */
114 /* NOTE: The use of the word "queue" may also refer to a linked list of
115 threads being maintainted that are normally dealt with in FIFO order
116 and not nescessarily kernel event_queue */
117 enum
119 /* States without a timeout must be first */
120 STATE_KILLED = 0, /* Thread is killed (default) */
121 STATE_RUNNING, /* Thread is currently running */
122 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
123 /* These states involve adding the thread to the tmo list */
124 STATE_SLEEPING, /* Thread is sleeping with a timeout */
125 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
126 /* Miscellaneous states */
127 STATE_FROZEN, /* Thread is suspended and will not run until
128 thread_thaw is called with its ID */
129 THREAD_NUM_STATES,
130 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
133 #if NUM_CORES > 1
134 /* Pointer value for name field to indicate thread is being killed. Using
135 * an alternate STATE_* won't work since that would interfere with operation
136 * while the thread is still running. */
137 #define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
138 #endif
140 /* Link information for lists thread is in */
141 struct thread_entry; /* forward */
142 struct thread_list
144 struct thread_entry *prev; /* Previous thread in a list */
145 struct thread_entry *next; /* Next thread in a list */
148 /* Small objects for core-wise mutual exclusion */
149 #if CONFIG_CORELOCK == SW_CORELOCK
150 /* No reliable atomic instruction available - use Peterson's algorithm */
151 struct corelock
153 volatile unsigned char myl[NUM_CORES];
154 volatile unsigned char turn;
155 } __attribute__((packed));
157 void corelock_init(struct corelock *cl);
158 void corelock_lock(struct corelock *cl);
159 int corelock_try_lock(struct corelock *cl);
160 void corelock_unlock(struct corelock *cl);
161 #elif CONFIG_CORELOCK == CORELOCK_SWAP
162 /* Use native atomic swap/exchange instruction */
163 struct corelock
165 volatile unsigned char locked;
166 } __attribute__((packed));
168 #define corelock_init(cl) \
169 ({ (cl)->locked = 0; })
170 #define corelock_lock(cl) \
171 ({ while (test_and_set(&(cl)->locked, 1)); })
172 #define corelock_try_lock(cl) \
173 ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
174 #define corelock_unlock(cl) \
175 ({ (cl)->locked = 0; })
176 #else
177 /* No atomic corelock op needed or just none defined */
178 #define corelock_init(cl)
179 #define corelock_lock(cl)
180 #define corelock_try_lock(cl)
181 #define corelock_unlock(cl)
182 #endif /* core locking selection */
184 #ifdef HAVE_PRIORITY_SCHEDULING
185 struct blocker
187 struct thread_entry *thread; /* thread blocking other threads
188 (aka. object owner) */
189 int priority; /* highest priority waiter */
190 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
193 /* Choices of wakeup protocol */
195 /* For transfer of object ownership by one thread to another thread by
196 * the owning thread itself (mutexes) */
197 struct thread_entry *
198 wakeup_priority_protocol_transfer(struct thread_entry *thread);
200 /* For release by owner where ownership doesn't change - other threads,
201 * interrupts, timeouts, etc. (mutex timeout, queues) */
202 struct thread_entry *
203 wakeup_priority_protocol_release(struct thread_entry *thread);
206 struct priority_distribution
208 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
209 uint32_t mask; /* Bitmask of hist entries that are not zero */
212 #endif /* HAVE_PRIORITY_SCHEDULING */
214 /* Information kept in each thread slot
215 * members are arranged according to size - largest first - in order
216 * to ensure both alignment and packing at the same time.
218 struct thread_entry
220 struct regs context; /* Register context at switch -
221 _must_ be first member */
222 uintptr_t *stack; /* Pointer to top of stack */
223 const char *name; /* Thread name */
224 long tmo_tick; /* Tick when thread should be woken from
225 timeout -
226 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
227 struct thread_list l; /* Links for blocked/waking/running -
228 circular linkage in both directions */
229 struct thread_list tmo; /* Links for timeout list -
230 Circular in reverse direction, NULL-terminated in
231 forward direction -
232 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
233 struct thread_entry **bqp; /* Pointer to list variable in kernel
234 object where thread is blocked - used
235 for implicit unblock and explicit wake
236 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
237 #if NUM_CORES > 1
238 struct corelock *obj_cl; /* Object corelock where thead is blocked -
239 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
240 #endif
241 struct thread_entry *queue; /* List of threads waiting for thread to be
242 removed */
243 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
244 #define HAVE_WAKEUP_EXT_CB
245 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
246 performs special steps needed when being
247 forced off of an object's wait queue that
248 go beyond the standard wait queue removal
249 and priority disinheritance */
250 /* Only enabled when using queue_send for now */
251 #endif
252 #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
253 intptr_t retval; /* Return value from a blocked operation/
254 misc. use */
255 #endif
256 #ifdef HAVE_PRIORITY_SCHEDULING
257 /* Priority summary of owned objects that support inheritance */
258 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
259 on an object that supports PIP -
260 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
261 struct priority_distribution pdist; /* Priority summary of owned objects
262 that have blocked threads and thread's own
263 base priority */
264 int skip_count; /* Number of times skipped if higher priority
265 thread was running */
266 #endif
267 unsigned short stack_size; /* Size of stack in bytes */
268 #ifdef HAVE_PRIORITY_SCHEDULING
269 unsigned char base_priority; /* Base priority (set explicitly during
270 creation or thread_set_priority) */
271 unsigned char priority; /* Scheduled priority (higher of base or
272 all threads blocked by this one) */
273 #endif
274 unsigned char state; /* Thread slot state (STATE_*) */
275 #ifdef HAVE_SCHEDULER_BOOSTCTRL
276 unsigned char cpu_boost; /* CPU frequency boost flag */
277 #endif
278 #if NUM_CORES > 1
279 unsigned char core; /* The core to which thread belongs */
280 struct corelock waiter_cl; /* Corelock for thread_wait */
281 struct corelock slot_cl; /* Corelock to lock thread slot */
282 #endif
285 #if NUM_CORES > 1
286 /* Operations to be performed just before stopping a thread and starting
287 a new one if specified before calling switch_thread */
288 enum
290 TBOP_CLEAR = 0, /* No operation to do */
291 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
292 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
295 struct thread_blk_ops
297 struct corelock *cl_p; /* pointer to corelock */
298 unsigned char flags; /* TBOP_* flags */
300 #endif /* NUM_CORES > 1 */
302 /* Information kept for each core
303 * Member are arranged for the same reason as in thread_entry
305 struct core_entry
307 /* "Active" lists - core is constantly active on these and are never
308 locked and interrupts do not access them */
309 struct thread_entry *running; /* threads that are running (RTR) */
310 struct thread_entry *timeout; /* threads that are on a timeout before
311 running again */
312 struct thread_entry *block_task; /* Task going off running list */
313 #ifdef HAVE_PRIORITY_SCHEDULING
314 struct priority_distribution rtr; /* Summary of running and ready-to-run
315 threads */
316 #endif
317 long next_tmo_check; /* soonest time to check tmo threads */
318 #if NUM_CORES > 1
319 struct thread_blk_ops blk_ops; /* operations to perform when
320 blocking a thread */
321 #ifdef HAVE_PRIORITY_SCHEDULING
322 struct corelock rtr_cl; /* Lock for rtr list */
323 #endif
324 #endif /* NUM_CORES */
327 #ifdef HAVE_PRIORITY_SCHEDULING
328 #define IF_PRIO(...) __VA_ARGS__
329 #define IFN_PRIO(...)
330 #else
331 #define IF_PRIO(...)
332 #define IFN_PRIO(...) __VA_ARGS__
333 #endif
335 /* Macros generate better code than an inline function is this case */
336 #if (defined (CPU_PP) || defined (CPU_ARM))
337 /* atomic */
338 #if CONFIG_CORELOCK == SW_CORELOCK
339 #define test_and_set(a, v, cl) \
340 xchg8((a), (v), (cl))
341 /* atomic */
342 #define xchg8(a, v, cl) \
343 ({ uint32_t o; \
344 corelock_lock(cl); \
345 o = *(uint8_t *)(a); \
346 *(uint8_t *)(a) = (v); \
347 corelock_unlock(cl); \
348 o; })
349 #define xchg32(a, v, cl) \
350 ({ uint32_t o; \
351 corelock_lock(cl); \
352 o = *(uint32_t *)(a); \
353 *(uint32_t *)(a) = (v); \
354 corelock_unlock(cl); \
355 o; })
356 #define xchgptr(a, v, cl) \
357 ({ typeof (*(a)) o; \
358 corelock_lock(cl); \
359 o = *(a); \
360 *(a) = (v); \
361 corelock_unlock(cl); \
362 o; })
363 #elif CONFIG_CORELOCK == CORELOCK_SWAP
364 /* atomic */
365 #define test_and_set(a, v, ...) \
366 xchg8((a), (v))
367 #define xchg8(a, v, ...) \
368 ({ uint32_t o; \
369 asm volatile( \
370 "swpb %0, %1, [%2]" \
371 : "=&r"(o) \
372 : "r"(v), \
373 "r"((uint8_t*)(a))); \
374 o; })
375 /* atomic */
376 #define xchg32(a, v, ...) \
377 ({ uint32_t o; \
378 asm volatile( \
379 "swp %0, %1, [%2]" \
380 : "=&r"(o) \
381 : "r"((uint32_t)(v)), \
382 "r"((uint32_t*)(a))); \
383 o; })
384 /* atomic */
385 #define xchgptr(a, v, ...) \
386 ({ typeof (*(a)) o; \
387 asm volatile( \
388 "swp %0, %1, [%2]" \
389 : "=&r"(o) \
390 : "r"(v), "r"(a)); \
391 o; })
392 #endif /* locking selection */
393 #elif defined (CPU_COLDFIRE)
394 /* atomic */
395 /* one branch will be optimized away if v is a constant expression */
396 #define test_and_set(a, v, ...) \
397 ({ uint32_t o = 0; \
398 if (v) { \
399 asm volatile ( \
400 "bset.b #0, (%0)" \
401 : : "a"((uint8_t*)(a)) \
402 : "cc"); \
403 } else { \
404 asm volatile ( \
405 "bclr.b #0, (%0)" \
406 : : "a"((uint8_t*)(a)) \
407 : "cc"); \
409 asm volatile ("sne.b %0" \
410 : "+d"(o)); \
411 o; })
412 #elif CONFIG_CPU == SH7034
413 /* atomic */
414 #define test_and_set(a, v, ...) \
415 ({ uint32_t o; \
416 asm volatile ( \
417 "tas.b @%2 \n" \
418 "mov #-1, %0 \n" \
419 "negc %0, %0 \n" \
420 : "=r"(o) \
421 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
422 "r"((uint8_t *)(a))); \
423 o; })
424 #endif /* CONFIG_CPU == */
426 /* defaults for no asm version */
427 #ifndef test_and_set
428 /* not atomic */
429 #define test_and_set(a, v, ...) \
430 ({ uint32_t o = *(uint8_t *)(a); \
431 *(uint8_t *)(a) = (v); \
432 o; })
433 #endif /* test_and_set */
434 #ifndef xchg8
435 /* not atomic */
436 #define xchg8(a, v, ...) \
437 ({ uint32_t o = *(uint8_t *)(a); \
438 *(uint8_t *)(a) = (v); \
439 o; })
440 #endif /* xchg8 */
441 #ifndef xchg32
442 /* not atomic */
443 #define xchg32(a, v, ...) \
444 ({ uint32_t o = *(uint32_t *)(a); \
445 *(uint32_t *)(a) = (v); \
446 o; })
447 #endif /* xchg32 */
448 #ifndef xchgptr
449 /* not atomic */
450 #define xchgptr(a, v, ...) \
451 ({ typeof (*(a)) o = *(a); \
452 *(a) = (v); \
453 o; })
454 #endif /* xchgptr */
456 void core_idle(void);
457 void core_wake(IF_COP_VOID(unsigned int core));
459 /* Initialize the scheduler */
460 void init_threads(void);
462 /* Allocate a thread in the scheduler */
463 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
464 struct thread_entry*
465 create_thread(void (*function)(void), void* stack, size_t stack_size,
466 unsigned flags, const char *name
467 IF_PRIO(, int priority)
468 IF_COP(, unsigned int core));
470 /* Set and clear the CPU frequency boost flag for the calling thread */
471 #ifdef HAVE_SCHEDULER_BOOSTCTRL
472 void trigger_cpu_boost(void);
473 void cancel_cpu_boost(void);
474 #else
475 #define trigger_cpu_boost()
476 #define cancel_cpu_boost()
477 #endif
478 /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
479 * Has no effect on a thread not frozen. */
480 void thread_thaw(struct thread_entry *thread);
481 /* Wait for a thread to exit */
482 void thread_wait(struct thread_entry *thread);
483 /* Exit the current thread */
484 void thread_exit(void);
485 #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
486 #define ALLOW_REMOVE_THREAD
487 /* Remove a thread from the scheduler */
488 void remove_thread(struct thread_entry *thread);
489 #endif
491 /* Switch to next runnable thread */
492 void switch_thread(void);
493 /* Blocks a thread for at least the specified number of ticks (0 = wait until
494 * next tick) */
495 void sleep_thread(int ticks);
496 /* Indefinitely blocks the current thread on a thread queue */
497 void block_thread(struct thread_entry *current);
498 /* Blocks the current thread on a thread queue until explicitely woken or
499 * the timeout is reached */
500 void block_thread_w_tmo(struct thread_entry *current, int timeout);
502 /* Return bit flags for thread wakeup */
503 #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
504 #define THREAD_OK 0x1 /* A thread was woken up */
505 #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
506 higher priority than current were woken) */
508 /* A convenience function for waking an entire queue of threads. */
509 unsigned int thread_queue_wake(struct thread_entry **list);
511 /* Wakeup a thread at the head of a list */
512 unsigned int wakeup_thread(struct thread_entry **list);
514 #ifdef HAVE_PRIORITY_SCHEDULING
515 int thread_set_priority(struct thread_entry *thread, int priority);
516 int thread_get_priority(struct thread_entry *thread);
517 #endif /* HAVE_PRIORITY_SCHEDULING */
518 #if NUM_CORES > 1
519 unsigned int switch_core(unsigned int new_core);
520 #endif
521 struct thread_entry * thread_get_current(void);
523 /* Debugging info - only! */
524 int thread_stack_usage(const struct thread_entry *thread);
525 #if NUM_CORES > 1
526 int idle_stack_usage(unsigned int core);
527 #endif
528 void thread_get_name(char *buffer, int size,
529 struct thread_entry *thread);
530 #ifdef RB_PROFILE
531 void profile_thread(void);
532 #endif
534 #endif /* THREAD_H */