Colour targets: Revert an optimisation from almost 18 months ago that actually turned...
[Rockbox.git] / firmware / export / thread.h
blob1d5c25c64916d87e895de05dd7b172cda7f917e7
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
21 #ifndef THREAD_H
22 #define THREAD_H
24 #include "config.h"
25 #include <inttypes.h>
26 #include <stddef.h>
27 #include <stdbool.h>
29 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
30 * by giving high priority threads more CPU time than lower priority threads
31 * when they need it. Priority is differential such that the priority
32 * difference between a lower priority runnable thread and the highest priority
33 * runnable thread determines the amount of aging necessary for the lower
34 * priority thread to be scheduled in order to prevent starvation.
36 * If software playback codec pcm buffer is going down to critical, codec
37 * can gradually raise its own priority to override user interface and
38 * prevent playback skipping.
40 #define PRIORITY_RESERVED_HIGH 0 /* Reserved */
41 #define PRIORITY_RESERVED_LOW 32 /* Reserved */
42 #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
43 #define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
44 /* Realtime range reserved for threads that will not allow threads of lower
45 * priority to age and run (future expansion) */
46 #define PRIORITY_REALTIME_1 1
47 #define PRIORITY_REALTIME_2 2
48 #define PRIORITY_REALTIME_3 3
49 #define PRIORITY_REALTIME_4 4
50 #define PRIORITY_REALTIME 4 /* Lowest realtime range */
51 #define PRIORITY_BUFFERING 15 /* Codec buffering thread */
52 #define PRIORITY_USER_INTERFACE 16 /* The main thread */
53 #define PRIORITY_RECORDING 16 /* Recording thread */
54 #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
55 #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
56 #define PRIORITY_SYSTEM 18 /* All other firmware threads */
57 #define PRIORITY_BACKGROUND 20 /* Normal application threads */
58 #define NUM_PRIORITIES 32
59 #define PRIORITY_IDLE 32 /* Priority representative of no tasks */
61 /* TODO: Only a minor tweak to create_thread would be needed to let
62 * thread slots be caller allocated - no essential threading functionality
63 * depends upon an array */
64 #if CONFIG_CODEC == SWCODEC
66 #ifdef HAVE_RECORDING
67 #define BASETHREADS 18
68 #else
69 #define BASETHREADS 17
70 #endif
72 #else
73 #define BASETHREADS 11
74 #endif /* CONFIG_CODE == * */
76 #ifndef TARGET_EXTRA_THREADS
77 #define TARGET_EXTRA_THREADS 0
78 #endif
80 #define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
82 #define DEFAULT_STACK_SIZE 0x400 /* Bytes */
84 #ifndef SIMULATOR
85 /* Need to keep structures inside the header file because debug_menu
86 * needs them. */
87 #ifdef CPU_COLDFIRE
88 struct regs
90 uint32_t macsr; /* 0 - EMAC status register */
91 uint32_t d[6]; /* 4-24 - d2-d7 */
92 uint32_t a[5]; /* 28-44 - a2-a6 */
93 uint32_t sp; /* 48 - Stack pointer (a7) */
94 uint32_t start; /* 52 - Thread start address, or NULL when started */
96 #elif CONFIG_CPU == SH7034
97 struct regs
99 uint32_t r[7]; /* 0-24 - Registers r8 thru r14 */
100 uint32_t sp; /* 28 - Stack pointer (r15) */
101 uint32_t pr; /* 32 - Procedure register */
102 uint32_t start; /* 36 - Thread start address, or NULL when started */
104 #elif defined(CPU_ARM)
105 struct regs
107 uint32_t r[8]; /* 0-28 - Registers r4-r11 */
108 uint32_t sp; /* 32 - Stack pointer (r13) */
109 uint32_t lr; /* 36 - r14 (lr) */
110 uint32_t start; /* 40 - Thread start address, or NULL when started */
112 #endif /* CONFIG_CPU */
113 #else
114 struct regs
116 void *t; /* Simulator OS thread */
117 void *s; /* Semaphore for blocking and wakeup */
118 void (*start)(void); /* Start function */
120 #endif /* !SIMULATOR */
122 /* NOTE: The use of the word "queue" may also refer to a linked list of
123 threads being maintained that are normally dealt with in FIFO order
124 and not necessarily kernel event_queue */
125 enum
127 /* States without a timeout must be first */
128 STATE_KILLED = 0, /* Thread is killed (default) */
129 STATE_RUNNING, /* Thread is currently running */
130 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
131 /* These states involve adding the thread to the tmo list */
132 STATE_SLEEPING, /* Thread is sleeping with a timeout */
133 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
134 /* Miscellaneous states */
135 STATE_FROZEN, /* Thread is suspended and will not run until
136 thread_thaw is called with its ID */
137 THREAD_NUM_STATES,
138 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
141 #if NUM_CORES > 1
142 /* Pointer value for name field to indicate thread is being killed. Using
143 * an alternate STATE_* won't work since that would interfere with operation
144 * while the thread is still running. */
145 #define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
146 #endif
148 /* Link information for lists thread is in */
149 struct thread_entry; /* forward */
150 struct thread_list
152 struct thread_entry *prev; /* Previous thread in a list */
153 struct thread_entry *next; /* Next thread in a list */
156 /* Small objects for core-wise mutual exclusion */
157 #if CONFIG_CORELOCK == SW_CORELOCK
158 /* No reliable atomic instruction available - use Peterson's algorithm */
159 struct corelock
161 volatile unsigned char myl[NUM_CORES];
162 volatile unsigned char turn;
163 } __attribute__((packed));
165 void corelock_init(struct corelock *cl);
166 void corelock_lock(struct corelock *cl);
167 int corelock_try_lock(struct corelock *cl);
168 void corelock_unlock(struct corelock *cl);
169 #elif CONFIG_CORELOCK == CORELOCK_SWAP
170 /* Use native atomic swap/exchange instruction */
171 struct corelock
173 volatile unsigned char locked;
174 } __attribute__((packed));
176 #define corelock_init(cl) \
177 ({ (cl)->locked = 0; })
178 #define corelock_lock(cl) \
179 ({ while (test_and_set(&(cl)->locked, 1)); })
180 #define corelock_try_lock(cl) \
181 ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
182 #define corelock_unlock(cl) \
183 ({ (cl)->locked = 0; })
184 #else
185 /* No atomic corelock op needed or just none defined */
186 #define corelock_init(cl)
187 #define corelock_lock(cl)
188 #define corelock_try_lock(cl)
189 #define corelock_unlock(cl)
190 #endif /* core locking selection */
192 #ifdef HAVE_PRIORITY_SCHEDULING
193 struct blocker
195 struct thread_entry *thread; /* thread blocking other threads
196 (aka. object owner) */
197 int priority; /* highest priority waiter */
198 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
201 /* Choices of wakeup protocol */
203 /* For transfer of object ownership by one thread to another thread by
204 * the owning thread itself (mutexes) */
205 struct thread_entry *
206 wakeup_priority_protocol_transfer(struct thread_entry *thread);
208 /* For release by owner where ownership doesn't change - other threads,
209 * interrupts, timeouts, etc. (mutex timeout, queues) */
210 struct thread_entry *
211 wakeup_priority_protocol_release(struct thread_entry *thread);
214 struct priority_distribution
216 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
217 uint32_t mask; /* Bitmask of hist entries that are not zero */
220 #endif /* HAVE_PRIORITY_SCHEDULING */
222 /* Information kept in each thread slot
223 * members are arranged according to size - largest first - in order
224 * to ensure both alignment and packing at the same time.
226 struct thread_entry
228 struct regs context; /* Register context at switch -
229 _must_ be first member */
230 uintptr_t *stack; /* Pointer to top of stack */
231 const char *name; /* Thread name */
232 long tmo_tick; /* Tick when thread should be woken from
233 timeout -
234 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
235 struct thread_list l; /* Links for blocked/waking/running -
236 circular linkage in both directions */
237 struct thread_list tmo; /* Links for timeout list -
238 Circular in reverse direction, NULL-terminated in
239 forward direction -
240 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
241 struct thread_entry **bqp; /* Pointer to list variable in kernel
242 object where thread is blocked - used
243 for implicit unblock and explicit wake
244 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
245 #if NUM_CORES > 1
246 struct corelock *obj_cl; /* Object corelock where thead is blocked -
247 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
248 #endif
249 struct thread_entry *queue; /* List of threads waiting for thread to be
250 removed */
251 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
252 #define HAVE_WAKEUP_EXT_CB
253 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
254 performs special steps needed when being
255 forced off of an object's wait queue that
256 go beyond the standard wait queue removal
257 and priority disinheritance */
258 /* Only enabled when using queue_send for now */
259 #endif
260 #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
261 intptr_t retval; /* Return value from a blocked operation/
262 misc. use */
263 #endif
264 #ifdef HAVE_PRIORITY_SCHEDULING
265 /* Priority summary of owned objects that support inheritance */
266 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
267 on an object that supports PIP -
268 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
269 struct priority_distribution pdist; /* Priority summary of owned objects
270 that have blocked threads and thread's own
271 base priority */
272 int skip_count; /* Number of times skipped if higher priority
273 thread was running */
274 #endif
275 unsigned short stack_size; /* Size of stack in bytes */
276 #ifdef HAVE_PRIORITY_SCHEDULING
277 unsigned char base_priority; /* Base priority (set explicitly during
278 creation or thread_set_priority) */
279 unsigned char priority; /* Scheduled priority (higher of base or
280 all threads blocked by this one) */
281 #endif
282 unsigned char state; /* Thread slot state (STATE_*) */
283 #ifdef HAVE_SCHEDULER_BOOSTCTRL
284 unsigned char cpu_boost; /* CPU frequency boost flag */
285 #endif
286 #if NUM_CORES > 1
287 unsigned char core; /* The core to which thread belongs */
288 struct corelock waiter_cl; /* Corelock for thread_wait */
289 struct corelock slot_cl; /* Corelock to lock thread slot */
290 #endif
293 #if NUM_CORES > 1
294 /* Operations to be performed just before stopping a thread and starting
295 a new one if specified before calling switch_thread */
296 enum
298 TBOP_CLEAR = 0, /* No operation to do */
299 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
300 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
303 struct thread_blk_ops
305 struct corelock *cl_p; /* pointer to corelock */
306 unsigned char flags; /* TBOP_* flags */
308 #endif /* NUM_CORES > 1 */
310 /* Information kept for each core
311 * Members are arranged for the same reason as in thread_entry
313 struct core_entry
315 /* "Active" lists - core is constantly active on these and are never
316 locked and interrupts do not access them */
317 struct thread_entry *running; /* threads that are running (RTR) */
318 struct thread_entry *timeout; /* threads that are on a timeout before
319 running again */
320 struct thread_entry *block_task; /* Task going off running list */
321 #ifdef HAVE_PRIORITY_SCHEDULING
322 struct priority_distribution rtr; /* Summary of running and ready-to-run
323 threads */
324 #endif
325 long next_tmo_check; /* soonest time to check tmo threads */
326 #if NUM_CORES > 1
327 struct thread_blk_ops blk_ops; /* operations to perform when
328 blocking a thread */
329 struct corelock rtr_cl; /* Lock for rtr list */
330 #endif /* NUM_CORES */
333 #ifdef HAVE_PRIORITY_SCHEDULING
334 #define IF_PRIO(...) __VA_ARGS__
335 #define IFN_PRIO(...)
336 #else
337 #define IF_PRIO(...)
338 #define IFN_PRIO(...) __VA_ARGS__
339 #endif
341 /* Macros generate better code than an inline function is this case */
342 #if (defined (CPU_PP) || defined (CPU_ARM))
343 /* atomic */
344 #if CONFIG_CORELOCK == SW_CORELOCK
345 #define test_and_set(a, v, cl) \
346 xchg8((a), (v), (cl))
347 /* atomic */
348 #define xchg8(a, v, cl) \
349 ({ uint32_t o; \
350 corelock_lock(cl); \
351 o = *(uint8_t *)(a); \
352 *(uint8_t *)(a) = (v); \
353 corelock_unlock(cl); \
354 o; })
355 #define xchg32(a, v, cl) \
356 ({ uint32_t o; \
357 corelock_lock(cl); \
358 o = *(uint32_t *)(a); \
359 *(uint32_t *)(a) = (v); \
360 corelock_unlock(cl); \
361 o; })
362 #define xchgptr(a, v, cl) \
363 ({ typeof (*(a)) o; \
364 corelock_lock(cl); \
365 o = *(a); \
366 *(a) = (v); \
367 corelock_unlock(cl); \
368 o; })
369 #elif CONFIG_CORELOCK == CORELOCK_SWAP
370 /* atomic */
371 #define test_and_set(a, v, ...) \
372 xchg8((a), (v))
373 #define xchg8(a, v, ...) \
374 ({ uint32_t o; \
375 asm volatile( \
376 "swpb %0, %1, [%2]" \
377 : "=&r"(o) \
378 : "r"(v), \
379 "r"((uint8_t*)(a))); \
380 o; })
381 /* atomic */
382 #define xchg32(a, v, ...) \
383 ({ uint32_t o; \
384 asm volatile( \
385 "swp %0, %1, [%2]" \
386 : "=&r"(o) \
387 : "r"((uint32_t)(v)), \
388 "r"((uint32_t*)(a))); \
389 o; })
390 /* atomic */
391 #define xchgptr(a, v, ...) \
392 ({ typeof (*(a)) o; \
393 asm volatile( \
394 "swp %0, %1, [%2]" \
395 : "=&r"(o) \
396 : "r"(v), "r"(a)); \
397 o; })
398 #endif /* locking selection */
399 #elif defined (CPU_COLDFIRE)
400 /* atomic */
401 /* one branch will be optimized away if v is a constant expression */
402 #define test_and_set(a, v, ...) \
403 ({ uint32_t o = 0; \
404 if (v) { \
405 asm volatile ( \
406 "bset.b #0, (%0)" \
407 : : "a"((uint8_t*)(a)) \
408 : "cc"); \
409 } else { \
410 asm volatile ( \
411 "bclr.b #0, (%0)" \
412 : : "a"((uint8_t*)(a)) \
413 : "cc"); \
415 asm volatile ("sne.b %0" \
416 : "+d"(o)); \
417 o; })
418 #elif CONFIG_CPU == SH7034
419 /* atomic */
420 #define test_and_set(a, v, ...) \
421 ({ uint32_t o; \
422 asm volatile ( \
423 "tas.b @%2 \n" \
424 "mov #-1, %0 \n" \
425 "negc %0, %0 \n" \
426 : "=r"(o) \
427 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
428 "r"((uint8_t *)(a))); \
429 o; })
430 #endif /* CONFIG_CPU == */
432 /* defaults for no asm version */
433 #ifndef test_and_set
434 /* not atomic */
435 #define test_and_set(a, v, ...) \
436 ({ uint32_t o = *(uint8_t *)(a); \
437 *(uint8_t *)(a) = (v); \
438 o; })
439 #endif /* test_and_set */
440 #ifndef xchg8
441 /* not atomic */
442 #define xchg8(a, v, ...) \
443 ({ uint32_t o = *(uint8_t *)(a); \
444 *(uint8_t *)(a) = (v); \
445 o; })
446 #endif /* xchg8 */
447 #ifndef xchg32
448 /* not atomic */
449 #define xchg32(a, v, ...) \
450 ({ uint32_t o = *(uint32_t *)(a); \
451 *(uint32_t *)(a) = (v); \
452 o; })
453 #endif /* xchg32 */
454 #ifndef xchgptr
455 /* not atomic */
456 #define xchgptr(a, v, ...) \
457 ({ typeof (*(a)) o = *(a); \
458 *(a) = (v); \
459 o; })
460 #endif /* xchgptr */
462 void core_idle(void);
463 void core_wake(IF_COP_VOID(unsigned int core));
465 /* Initialize the scheduler */
466 void init_threads(void);
468 /* Allocate a thread in the scheduler */
469 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
470 struct thread_entry*
471 create_thread(void (*function)(void), void* stack, size_t stack_size,
472 unsigned flags, const char *name
473 IF_PRIO(, int priority)
474 IF_COP(, unsigned int core));
476 /* Set and clear the CPU frequency boost flag for the calling thread */
477 #ifdef HAVE_SCHEDULER_BOOSTCTRL
478 void trigger_cpu_boost(void);
479 void cancel_cpu_boost(void);
480 #else
481 #define trigger_cpu_boost()
482 #define cancel_cpu_boost()
483 #endif
484 /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
485 * Has no effect on a thread not frozen. */
486 void thread_thaw(struct thread_entry *thread);
487 /* Wait for a thread to exit */
488 void thread_wait(struct thread_entry *thread);
489 /* Exit the current thread */
490 void thread_exit(void);
491 #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
492 #define ALLOW_REMOVE_THREAD
493 /* Remove a thread from the scheduler */
494 void remove_thread(struct thread_entry *thread);
495 #endif
497 /* Switch to next runnable thread */
498 void switch_thread(void);
499 /* Blocks a thread for at least the specified number of ticks (0 = wait until
500 * next tick) */
501 void sleep_thread(int ticks);
502 /* Indefinitely blocks the current thread on a thread queue */
503 void block_thread(struct thread_entry *current);
504 /* Blocks the current thread on a thread queue until explicitely woken or
505 * the timeout is reached */
506 void block_thread_w_tmo(struct thread_entry *current, int timeout);
508 /* Return bit flags for thread wakeup */
509 #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
510 #define THREAD_OK 0x1 /* A thread was woken up */
511 #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
512 higher priority than current were woken) */
514 /* A convenience function for waking an entire queue of threads. */
515 unsigned int thread_queue_wake(struct thread_entry **list);
517 /* Wakeup a thread at the head of a list */
518 unsigned int wakeup_thread(struct thread_entry **list);
520 #ifdef HAVE_PRIORITY_SCHEDULING
521 int thread_set_priority(struct thread_entry *thread, int priority);
522 int thread_get_priority(struct thread_entry *thread);
523 #endif /* HAVE_PRIORITY_SCHEDULING */
524 #if NUM_CORES > 1
525 unsigned int switch_core(unsigned int new_core);
526 #endif
527 struct thread_entry * thread_get_current(void);
529 /* Debugging info - only! */
530 int thread_stack_usage(const struct thread_entry *thread);
531 #if NUM_CORES > 1
532 int idle_stack_usage(unsigned int core);
533 #endif
534 void thread_get_name(char *buffer, int size,
535 struct thread_entry *thread);
536 #ifdef RB_PROFILE
537 void profile_thread(void);
538 #endif
540 #endif /* THREAD_H */