Commit FS#10423 by Yoshihisa Uchida. Adds support for floating point PCM to libpcm.
[kugel-rb.git] / firmware / export / thread.h
blobcd273247408617cd7e4c9125ef199a554e887223
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #ifndef THREAD_H
22 #define THREAD_H
24 #include "config.h"
25 #include <inttypes.h>
26 #include <stddef.h>
27 #include <stdbool.h>
29 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
30 * by giving high priority threads more CPU time than lower priority threads
31 * when they need it. Priority is differential such that the priority
32 * difference between a lower priority runnable thread and the highest priority
33 * runnable thread determines the amount of aging necessary for the lower
34 * priority thread to be scheduled in order to prevent starvation.
36 * If software playback codec pcm buffer is going down to critical, codec
37 * can gradually raise its own priority to override user interface and
38 * prevent playback skipping.
40 #define PRIORITY_RESERVED_HIGH 0 /* Reserved */
41 #define PRIORITY_RESERVED_LOW 32 /* Reserved */
42 #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
43 #define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
44 /* Realtime range reserved for threads that will not allow threads of lower
45 * priority to age and run (future expansion) */
46 #define PRIORITY_REALTIME_1 1
47 #define PRIORITY_REALTIME_2 2
48 #define PRIORITY_REALTIME_3 3
49 #define PRIORITY_REALTIME_4 4
50 #define PRIORITY_REALTIME 4 /* Lowest realtime range */
51 #define PRIORITY_BUFFERING 15 /* Codec buffering thread */
52 #define PRIORITY_USER_INTERFACE 16 /* The main thread */
53 #define PRIORITY_RECORDING 16 /* Recording thread */
54 #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
55 #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
56 #define PRIORITY_SYSTEM 18 /* All other firmware threads */
57 #define PRIORITY_BACKGROUND 20 /* Normal application threads */
58 #define NUM_PRIORITIES 32
59 #define PRIORITY_IDLE 32 /* Priority representative of no tasks */
61 #if CONFIG_CODEC == SWCODEC
63 #ifdef HAVE_RECORDING
64 #define BASETHREADS 17
65 #else
66 #define BASETHREADS 16
67 #endif
69 #else
70 #define BASETHREADS 11
71 #endif /* CONFIG_CODE == * */
73 #ifndef TARGET_EXTRA_THREADS
74 #define TARGET_EXTRA_THREADS 0
75 #endif
77 #define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
79 #define DEFAULT_STACK_SIZE 0x400 /* Bytes */
81 #ifndef SIMULATOR
82 /* Need to keep structures inside the header file because debug_menu
83 * needs them. */
84 #ifdef CPU_COLDFIRE
85 struct regs
87 uint32_t macsr; /* 0 - EMAC status register */
88 uint32_t d[6]; /* 4-24 - d2-d7 */
89 uint32_t a[5]; /* 28-44 - a2-a6 */
90 uint32_t sp; /* 48 - Stack pointer (a7) */
91 uint32_t start; /* 52 - Thread start address, or NULL when started */
93 #elif CONFIG_CPU == SH7034
94 struct regs
96 uint32_t r[7]; /* 0-24 - Registers r8 thru r14 */
97 uint32_t sp; /* 28 - Stack pointer (r15) */
98 uint32_t pr; /* 32 - Procedure register */
99 uint32_t start; /* 36 - Thread start address, or NULL when started */
101 #elif defined(CPU_ARM)
102 struct regs
104 uint32_t r[8]; /* 0-28 - Registers r4-r11 */
105 uint32_t sp; /* 32 - Stack pointer (r13) */
106 uint32_t lr; /* 36 - r14 (lr) */
107 uint32_t start; /* 40 - Thread start address, or NULL when started */
109 #elif defined(CPU_MIPS)
110 struct regs
112 uint32_t r[9]; /* 0-32 - Registers s0-s7, fp */
113 uint32_t sp; /* 36 - Stack pointer */
114 uint32_t ra; /* 40 - Return address */
115 uint32_t start; /* 44 - Thread start address, or NULL when started */
117 #endif /* CONFIG_CPU */
118 #else
119 struct regs
121 void *t; /* Simulator OS thread */
122 void *s; /* Semaphore for blocking and wakeup */
123 void (*start)(void); /* Start function */
125 #endif /* !SIMULATOR */
127 /* NOTE: The use of the word "queue" may also refer to a linked list of
128 threads being maintained that are normally dealt with in FIFO order
129 and not necessarily kernel event_queue */
130 enum
132 /* States without a timeout must be first */
133 STATE_KILLED = 0, /* Thread is killed (default) */
134 STATE_RUNNING, /* Thread is currently running */
135 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
136 /* These states involve adding the thread to the tmo list */
137 STATE_SLEEPING, /* Thread is sleeping with a timeout */
138 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
139 /* Miscellaneous states */
140 STATE_FROZEN, /* Thread is suspended and will not run until
141 thread_thaw is called with its ID */
142 THREAD_NUM_STATES,
143 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
146 #if NUM_CORES > 1
147 /* Pointer value for name field to indicate thread is being killed. Using
148 * an alternate STATE_* won't work since that would interfere with operation
149 * while the thread is still running. */
150 #define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
151 #endif
153 /* Link information for lists thread is in */
154 struct thread_entry; /* forward */
155 struct thread_list
157 struct thread_entry *prev; /* Previous thread in a list */
158 struct thread_entry *next; /* Next thread in a list */
161 /* Small objects for core-wise mutual exclusion */
162 #if CONFIG_CORELOCK == SW_CORELOCK
163 /* No reliable atomic instruction available - use Peterson's algorithm */
164 struct corelock
166 volatile unsigned char myl[NUM_CORES];
167 volatile unsigned char turn;
168 } __attribute__((packed));
170 void corelock_init(struct corelock *cl);
171 void corelock_lock(struct corelock *cl);
172 int corelock_try_lock(struct corelock *cl);
173 void corelock_unlock(struct corelock *cl);
174 #elif CONFIG_CORELOCK == CORELOCK_SWAP
175 /* Use native atomic swap/exchange instruction */
176 struct corelock
178 volatile unsigned char locked;
179 } __attribute__((packed));
181 #define corelock_init(cl) \
182 ({ (cl)->locked = 0; })
183 #define corelock_lock(cl) \
184 ({ while (test_and_set(&(cl)->locked, 1)); })
185 #define corelock_try_lock(cl) \
186 ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
187 #define corelock_unlock(cl) \
188 ({ (cl)->locked = 0; })
189 #else
190 /* No atomic corelock op needed or just none defined */
191 #define corelock_init(cl)
192 #define corelock_lock(cl)
193 #define corelock_try_lock(cl)
194 #define corelock_unlock(cl)
195 #endif /* core locking selection */
197 #ifdef HAVE_PRIORITY_SCHEDULING
198 struct blocker
200 struct thread_entry *thread; /* thread blocking other threads
201 (aka. object owner) */
202 int priority; /* highest priority waiter */
203 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
206 /* Choices of wakeup protocol */
208 /* For transfer of object ownership by one thread to another thread by
209 * the owning thread itself (mutexes) */
210 struct thread_entry *
211 wakeup_priority_protocol_transfer(struct thread_entry *thread);
213 /* For release by owner where ownership doesn't change - other threads,
214 * interrupts, timeouts, etc. (mutex timeout, queues) */
215 struct thread_entry *
216 wakeup_priority_protocol_release(struct thread_entry *thread);
219 struct priority_distribution
221 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
222 uint32_t mask; /* Bitmask of hist entries that are not zero */
225 #endif /* HAVE_PRIORITY_SCHEDULING */
227 /* Information kept in each thread slot
228 * members are arranged according to size - largest first - in order
229 * to ensure both alignment and packing at the same time.
231 struct thread_entry
233 struct regs context; /* Register context at switch -
234 _must_ be first member */
235 uintptr_t *stack; /* Pointer to top of stack */
236 const char *name; /* Thread name */
237 long tmo_tick; /* Tick when thread should be woken from
238 timeout -
239 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
240 struct thread_list l; /* Links for blocked/waking/running -
241 circular linkage in both directions */
242 struct thread_list tmo; /* Links for timeout list -
243 Circular in reverse direction, NULL-terminated in
244 forward direction -
245 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
246 struct thread_entry **bqp; /* Pointer to list variable in kernel
247 object where thread is blocked - used
248 for implicit unblock and explicit wake
249 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
250 #if NUM_CORES > 1
251 struct corelock *obj_cl; /* Object corelock where thead is blocked -
252 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
253 #endif
254 struct thread_entry *queue; /* List of threads waiting for thread to be
255 removed */
256 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
257 #define HAVE_WAKEUP_EXT_CB
258 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
259 performs special steps needed when being
260 forced off of an object's wait queue that
261 go beyond the standard wait queue removal
262 and priority disinheritance */
263 /* Only enabled when using queue_send for now */
264 #endif
265 #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
266 intptr_t retval; /* Return value from a blocked operation/
267 misc. use */
268 #endif
269 #ifdef HAVE_PRIORITY_SCHEDULING
270 /* Priority summary of owned objects that support inheritance */
271 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
272 on an object that supports PIP -
273 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
274 struct priority_distribution pdist; /* Priority summary of owned objects
275 that have blocked threads and thread's own
276 base priority */
277 int skip_count; /* Number of times skipped if higher priority
278 thread was running */
279 #endif
280 uint16_t id; /* Current slot id */
281 unsigned short stack_size; /* Size of stack in bytes */
282 #ifdef HAVE_PRIORITY_SCHEDULING
283 unsigned char base_priority; /* Base priority (set explicitly during
284 creation or thread_set_priority) */
285 unsigned char priority; /* Scheduled priority (higher of base or
286 all threads blocked by this one) */
287 #endif
288 unsigned char state; /* Thread slot state (STATE_*) */
289 #ifdef HAVE_SCHEDULER_BOOSTCTRL
290 unsigned char cpu_boost; /* CPU frequency boost flag */
291 #endif
292 #if NUM_CORES > 1
293 unsigned char core; /* The core to which thread belongs */
294 struct corelock waiter_cl; /* Corelock for thread_wait */
295 struct corelock slot_cl; /* Corelock to lock thread slot */
296 #endif
299 /*** Macros for internal use ***/
300 /* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
301 #define THREAD_ID_VERSION_SHIFT 8
302 #define THREAD_ID_VERSION_MASK 0xff00
303 #define THREAD_ID_SLOT_MASK 0x00ff
304 #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
306 /* Specify current thread in a function taking an ID. */
307 #define THREAD_ID_CURRENT ((unsigned int)-1)
309 #if NUM_CORES > 1
310 /* Operations to be performed just before stopping a thread and starting
311 a new one if specified before calling switch_thread */
312 enum
314 TBOP_CLEAR = 0, /* No operation to do */
315 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
316 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
319 struct thread_blk_ops
321 struct corelock *cl_p; /* pointer to corelock */
322 unsigned char flags; /* TBOP_* flags */
324 #endif /* NUM_CORES > 1 */
326 /* Information kept for each core
327 * Members are arranged for the same reason as in thread_entry
329 struct core_entry
331 /* "Active" lists - core is constantly active on these and are never
332 locked and interrupts do not access them */
333 struct thread_entry *running; /* threads that are running (RTR) */
334 struct thread_entry *timeout; /* threads that are on a timeout before
335 running again */
336 struct thread_entry *block_task; /* Task going off running list */
337 #ifdef HAVE_PRIORITY_SCHEDULING
338 struct priority_distribution rtr; /* Summary of running and ready-to-run
339 threads */
340 #endif
341 long next_tmo_check; /* soonest time to check tmo threads */
342 #if NUM_CORES > 1
343 struct thread_blk_ops blk_ops; /* operations to perform when
344 blocking a thread */
345 struct corelock rtr_cl; /* Lock for rtr list */
346 #endif /* NUM_CORES */
349 #ifdef HAVE_PRIORITY_SCHEDULING
350 #define IF_PRIO(...) __VA_ARGS__
351 #define IFN_PRIO(...)
352 #else
353 #define IF_PRIO(...)
354 #define IFN_PRIO(...) __VA_ARGS__
355 #endif
357 /* Macros generate better code than an inline function is this case */
358 #if (defined (CPU_PP) || defined (CPU_ARM))
359 /* atomic */
360 #if CONFIG_CORELOCK == SW_CORELOCK
361 #define test_and_set(a, v, cl) \
362 xchg8((a), (v), (cl))
363 /* atomic */
364 #define xchg8(a, v, cl) \
365 ({ uint32_t o; \
366 corelock_lock(cl); \
367 o = *(uint8_t *)(a); \
368 *(uint8_t *)(a) = (v); \
369 corelock_unlock(cl); \
370 o; })
371 #define xchg32(a, v, cl) \
372 ({ uint32_t o; \
373 corelock_lock(cl); \
374 o = *(uint32_t *)(a); \
375 *(uint32_t *)(a) = (v); \
376 corelock_unlock(cl); \
377 o; })
378 #define xchgptr(a, v, cl) \
379 ({ typeof (*(a)) o; \
380 corelock_lock(cl); \
381 o = *(a); \
382 *(a) = (v); \
383 corelock_unlock(cl); \
384 o; })
385 #elif CONFIG_CORELOCK == CORELOCK_SWAP
386 /* atomic */
387 #define test_and_set(a, v, ...) \
388 xchg8((a), (v))
389 #define xchg8(a, v, ...) \
390 ({ uint32_t o; \
391 asm volatile( \
392 "swpb %0, %1, [%2]" \
393 : "=&r"(o) \
394 : "r"(v), \
395 "r"((uint8_t*)(a))); \
396 o; })
397 /* atomic */
398 #define xchg32(a, v, ...) \
399 ({ uint32_t o; \
400 asm volatile( \
401 "swp %0, %1, [%2]" \
402 : "=&r"(o) \
403 : "r"((uint32_t)(v)), \
404 "r"((uint32_t*)(a))); \
405 o; })
406 /* atomic */
407 #define xchgptr(a, v, ...) \
408 ({ typeof (*(a)) o; \
409 asm volatile( \
410 "swp %0, %1, [%2]" \
411 : "=&r"(o) \
412 : "r"(v), "r"(a)); \
413 o; })
414 #endif /* locking selection */
415 #elif defined (CPU_COLDFIRE)
416 /* atomic */
417 /* one branch will be optimized away if v is a constant expression */
418 #define test_and_set(a, v, ...) \
419 ({ uint32_t o = 0; \
420 if (v) { \
421 asm volatile ( \
422 "bset.b #0, (%0)" \
423 : : "a"((uint8_t*)(a)) \
424 : "cc"); \
425 } else { \
426 asm volatile ( \
427 "bclr.b #0, (%0)" \
428 : : "a"((uint8_t*)(a)) \
429 : "cc"); \
431 asm volatile ("sne.b %0" \
432 : "+d"(o)); \
433 o; })
434 #elif CONFIG_CPU == SH7034
435 /* atomic */
436 #define test_and_set(a, v, ...) \
437 ({ uint32_t o; \
438 asm volatile ( \
439 "tas.b @%2 \n" \
440 "mov #-1, %0 \n" \
441 "negc %0, %0 \n" \
442 : "=r"(o) \
443 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
444 "r"((uint8_t *)(a))); \
445 o; })
446 #endif /* CONFIG_CPU == */
448 /* defaults for no asm version */
449 #ifndef test_and_set
450 /* not atomic */
451 #define test_and_set(a, v, ...) \
452 ({ uint32_t o = *(uint8_t *)(a); \
453 *(uint8_t *)(a) = (v); \
454 o; })
455 #endif /* test_and_set */
456 #ifndef xchg8
457 /* not atomic */
458 #define xchg8(a, v, ...) \
459 ({ uint32_t o = *(uint8_t *)(a); \
460 *(uint8_t *)(a) = (v); \
461 o; })
462 #endif /* xchg8 */
463 #ifndef xchg32
464 /* not atomic */
465 #define xchg32(a, v, ...) \
466 ({ uint32_t o = *(uint32_t *)(a); \
467 *(uint32_t *)(a) = (v); \
468 o; })
469 #endif /* xchg32 */
470 #ifndef xchgptr
471 /* not atomic */
472 #define xchgptr(a, v, ...) \
473 ({ typeof (*(a)) o = *(a); \
474 *(a) = (v); \
475 o; })
476 #endif /* xchgptr */
478 void core_idle(void);
479 void core_wake(IF_COP_VOID(unsigned int core));
481 /* Initialize the scheduler */
482 void init_threads(void);
484 /* Allocate a thread in the scheduler */
485 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
486 unsigned int create_thread(void (*function)(void),
487 void* stack, size_t stack_size,
488 unsigned flags, const char *name
489 IF_PRIO(, int priority)
490 IF_COP(, unsigned int core));
492 /* Set and clear the CPU frequency boost flag for the calling thread */
493 #ifdef HAVE_SCHEDULER_BOOSTCTRL
494 void trigger_cpu_boost(void);
495 void cancel_cpu_boost(void);
496 #else
497 #define trigger_cpu_boost()
498 #define cancel_cpu_boost()
499 #endif
500 /* Return thread entry from id */
501 struct thread_entry *thread_id_entry(unsigned int thread_id);
502 /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
503 * Has no effect on a thread not frozen. */
504 void thread_thaw(unsigned int thread_id);
505 /* Wait for a thread to exit */
506 void thread_wait(unsigned int thread_id);
507 /* Exit the current thread */
508 void thread_exit(void);
509 #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
510 #define ALLOW_REMOVE_THREAD
511 /* Remove a thread from the scheduler */
512 void remove_thread(unsigned int thread_id);
513 #endif
515 /* Switch to next runnable thread */
516 void switch_thread(void);
517 /* Blocks a thread for at least the specified number of ticks (0 = wait until
518 * next tick) */
519 void sleep_thread(int ticks);
520 /* Indefinitely blocks the current thread on a thread queue */
521 void block_thread(struct thread_entry *current);
522 /* Blocks the current thread on a thread queue until explicitely woken or
523 * the timeout is reached */
524 void block_thread_w_tmo(struct thread_entry *current, int timeout);
526 /* Return bit flags for thread wakeup */
527 #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
528 #define THREAD_OK 0x1 /* A thread was woken up */
529 #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
530 higher priority than current were woken) */
532 /* A convenience function for waking an entire queue of threads. */
533 unsigned int thread_queue_wake(struct thread_entry **list);
535 /* Wakeup a thread at the head of a list */
536 unsigned int wakeup_thread(struct thread_entry **list);
538 #ifdef HAVE_PRIORITY_SCHEDULING
539 int thread_set_priority(unsigned int thread_id, int priority);
540 int thread_get_priority(unsigned int thread_id);
541 #endif /* HAVE_PRIORITY_SCHEDULING */
542 #if NUM_CORES > 1
543 unsigned int switch_core(unsigned int new_core);
544 #endif
545 unsigned int thread_get_current(void);
547 /* Debugging info - only! */
548 int thread_stack_usage(const struct thread_entry *thread);
549 #if NUM_CORES > 1
550 int idle_stack_usage(unsigned int core);
551 #endif
552 void thread_get_name(char *buffer, int size,
553 struct thread_entry *thread);
554 #ifdef RB_PROFILE
555 void profile_thread(void);
556 #endif
558 #endif /* THREAD_H */