Reverse the behaviour of the scrollwheel (i.e. fwd/clockwise is down, back/anti-clock...
[Rockbox.git] / firmware / export / thread.h
blob0b1500cd99e08b2e57033b65933b8ff558de02e1
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #ifndef THREAD_H
20 #define THREAD_H
22 #include "config.h"
23 #include <inttypes.h>
24 #include <stddef.h>
25 #include <stdbool.h>
27 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
28 * by giving high priority threads more CPU time than less priority threads
29 * when they need it.
31 * If software playback codec pcm buffer is going down to critical, codec
32 * can change it own priority to REALTIME to override user interface and
33 * prevent playback skipping.
35 #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
36 #define LOWEST_PRIORITY 100 /* The lowest possible thread priority */
37 #define PRIORITY_REALTIME 1
38 #define PRIORITY_USER_INTERFACE 4 /* The main thread */
39 #define PRIORITY_RECORDING 4 /* Recording thread */
40 #define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
41 #define PRIORITY_BUFFERING 4 /* Codec buffering thread */
42 #define PRIORITY_SYSTEM 6 /* All other firmware threads */
43 #define PRIORITY_BACKGROUND 8 /* Normal application threads */
45 /* TODO: Only a minor tweak to create_thread would be needed to let
46 * thread slots be caller allocated - no essential threading functionality
47 * depends upon an array */
48 #if CONFIG_CODEC == SWCODEC
50 #ifdef HAVE_RECORDING
51 #define MAXTHREADS 18
52 #else
53 #define MAXTHREADS 17
54 #endif
56 #else
57 #define MAXTHREADS 11
58 #endif /* CONFIG_CODE == * */
60 #define DEFAULT_STACK_SIZE 0x400 /* Bytes */
62 /**
63 * "Busy" values that can be swapped into a variable to indicate
64 * that the variable or object pointed to is in use by another processor
65 * core. When accessed, the busy value is swapped-in while the current
66 * value is atomically returned. If the swap returns the busy value,
67 * the processor should retry the operation until some other value is
68 * returned. When modification is finished, the new value should be
69 * written which unlocks it and updates it atomically.
71 * Procedure:
72 * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
74 * Modify/examine object at mem location or variable. Create "new_value"
75 * as suitable.
77 * variable = new_value or curr_value;
79 * To check a value for busy and perform an operation if not:
80 * curr_value = swap(&variable, BUSY_VALUE);
82 * if (curr_value != BUSY_VALUE)
83 * {
84 * Modify/examine object at mem location or variable. Create "new_value"
85 * as suitable.
86 * variable = new_value or curr_value;
87 * }
88 * else
89 * {
90 * Do nothing - already busy
91 * }
93 * Only ever restore when an actual value is returned or else it could leave
94 * the variable locked permanently if another processor unlocked in the
95 * meantime. The next access attempt would deadlock for all processors since
96 * an abandoned busy status would be left behind.
98 #define STATE_BUSYuptr ((void*)UINTPTR_MAX)
99 #define STATE_BUSYu8 UINT8_MAX
100 #define STATE_BUSYi INT_MIN
102 #ifndef SIMULATOR
103 /* Need to keep structures inside the header file because debug_menu
104 * needs them. */
105 #ifdef CPU_COLDFIRE
106 struct regs
108 unsigned int macsr; /* 0 - EMAC status register */
109 unsigned int d[6]; /* 4-24 - d2-d7 */
110 unsigned int a[5]; /* 28-44 - a2-a6 */
111 void *sp; /* 48 - Stack pointer (a7) */
112 void *start; /* 52 - Thread start address, or NULL when started */
114 #elif CONFIG_CPU == SH7034
115 struct regs
117 unsigned int r[7]; /* 0-24 - Registers r8 thru r14 */
118 void *sp; /* 28 - Stack pointer (r15) */
119 void *pr; /* 32 - Procedure register */
120 void *start; /* 36 - Thread start address, or NULL when started */
122 #elif defined(CPU_ARM)
123 struct regs
125 unsigned int r[8]; /* 0-28 - Registers r4-r11 */
126 void *sp; /* 32 - Stack pointer (r13) */
127 unsigned int lr; /* 36 - r14 (lr) */
128 void *start; /* 40 - Thread start address, or NULL when started */
130 #endif /* CONFIG_CPU */
131 #else
132 struct regs
134 void *t; /* Simulator OS thread */
135 void *c; /* Condition for blocking and sync */
136 void (*start)(void); /* Start function */
138 #endif /* !SIMULATOR */
140 /* NOTE: The use of the word "queue" may also refer to a linked list of
141 threads being maintainted that are normally dealt with in FIFO order
142 and not nescessarily kernel event_queue */
143 enum
145 /* States without a timeout must be first */
146 STATE_KILLED = 0, /* Thread is killed (default) */
147 STATE_RUNNING, /* Thread is currently running */
148 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
149 /* These states involve adding the thread to the tmo list */
150 STATE_SLEEPING, /* Thread is sleeping with a timeout */
151 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
152 /* Miscellaneous states */
153 STATE_FROZEN, /* Thread is suspended and will not run until
154 thread_thaw is called with its ID */
155 THREAD_NUM_STATES,
156 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
157 #if NUM_CORES > 1
158 STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
159 #endif
162 #if NUM_CORES > 1
163 #define THREAD_DESTRUCT ((const char *)0x84905617)
164 #endif
166 /* Link information for lists thread is in */
167 struct thread_entry; /* forward */
168 struct thread_list
170 struct thread_entry *prev; /* Previous thread in a list */
171 struct thread_entry *next; /* Next thread in a list */
174 /* Small objects for core-wise mutual exclusion */
175 #if CONFIG_CORELOCK == SW_CORELOCK
176 /* No reliable atomic instruction available - use Peterson's algorithm */
177 struct corelock
179 volatile unsigned char myl[NUM_CORES];
180 volatile unsigned char turn;
181 } __attribute__((packed));
183 void corelock_init(struct corelock *cl);
184 void corelock_lock(struct corelock *cl);
185 int corelock_try_lock(struct corelock *cl);
186 void corelock_unlock(struct corelock *cl);
187 #elif CONFIG_CORELOCK == CORELOCK_SWAP
188 /* Use native atomic swap/exchange instruction */
189 struct corelock
191 unsigned char locked;
192 } __attribute__((packed));
194 #define corelock_init(cl) \
195 ({ (cl)->locked = 0; })
196 #define corelock_lock(cl) \
197 ({ while (test_and_set(&(cl)->locked, 1)); })
198 #define corelock_try_lock(cl) \
199 ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
200 #define corelock_unlock(cl) \
201 ({ (cl)->locked = 0; })
202 #else
203 /* No atomic corelock op needed or just none defined */
204 #define corelock_init(cl)
205 #define corelock_lock(cl)
206 #define corelock_try_lock(cl)
207 #define corelock_unlock(cl)
208 #endif /* core locking selection */
210 struct thread_queue
212 struct thread_entry *queue; /* list of threads waiting -
213 _must_ be first member */
214 #if CONFIG_CORELOCK == SW_CORELOCK
215 struct corelock cl; /* lock for atomic list operations */
216 #endif
219 /* Information kept in each thread slot
220 * members are arranged according to size - largest first - in order
221 * to ensure both alignment and packing at the same time.
223 struct thread_entry
225 struct regs context; /* Register context at switch -
226 _must_ be first member */
227 void *stack; /* Pointer to top of stack */
228 const char *name; /* Thread name */
229 long tmo_tick; /* Tick when thread should be woken from
230 timeout */
231 struct thread_list l; /* Links for blocked/waking/running -
232 circular linkage in both directions */
233 struct thread_list tmo; /* Links for timeout list -
234 Self-pointer-terminated in reverse direction,
235 NULL-terminated in forward direction */
236 struct thread_queue *bqp; /* Pointer to list variable in kernel
237 object where thread is blocked - used
238 for implicit unblock and explicit wake */
239 #if CONFIG_CORELOCK == SW_CORELOCK
240 struct thread_entry **bqnlp; /* Pointer to list variable in kernel
241 object where thread is blocked - non-locked
242 operations will be used */
243 #endif
244 struct thread_entry *queue; /* List of threads waiting for thread to be
245 removed */
246 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
247 intptr_t retval; /* Return value from a blocked operation */
248 #endif
249 #ifdef HAVE_PRIORITY_SCHEDULING
250 long last_run; /* Last tick when started */
251 #endif
252 unsigned short stack_size; /* Size of stack in bytes */
253 #ifdef HAVE_PRIORITY_SCHEDULING
254 unsigned char priority; /* Current priority */
255 unsigned char priority_x; /* Inherited priority - right now just a
256 runtime guarantee flag */
257 #endif
258 unsigned char state; /* Thread slot state (STATE_*) */
259 #if NUM_CORES > 1
260 unsigned char core; /* The core to which thread belongs */
261 #endif
262 #ifdef HAVE_SCHEDULER_BOOSTCTRL
263 unsigned char boosted; /* CPU frequency boost flag */
264 #endif
265 #if CONFIG_CORELOCK == SW_CORELOCK
266 struct corelock cl; /* Corelock to lock thread slot */
267 #endif
270 #if NUM_CORES > 1
271 /* Operations to be performed just before stopping a thread and starting
272 a new one if specified before calling switch_thread */
273 #define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */
274 #if CONFIG_CORELOCK == CORELOCK_SWAP
275 #define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */
276 #define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */
277 #define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/
278 #endif /* CONFIG_CORELOCK */
279 #define TBOP_UNLOCK_CORELOCK 0x04
280 #define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
281 #define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
282 #define TBOP_IRQ_LEVEL 0x20 /* Set a new irq level */
283 #define TBOP_SWITCH_CORE 0x40 /* Call the core switch preparation routine */
285 struct thread_blk_ops
287 int irq_level; /* new IRQ level to set */
288 #if CONFIG_CORELOCK != SW_CORELOCK
289 union
291 int var_iv; /* int variable value to set */
292 uint8_t var_u8v; /* unsigned char valur to set */
293 struct thread_entry *list_v; /* list pointer queue value to set */
295 #endif
296 union
298 #if CONFIG_CORELOCK != SW_CORELOCK
299 int *var_ip; /* pointer to int variable */
300 uint8_t *var_u8p; /* pointer to unsigned char varuable */
301 #endif
302 struct thread_queue *list_p; /* pointer to list variable */
304 #if CONFIG_CORELOCK == SW_CORELOCK
305 struct corelock *cl_p; /* corelock to unlock */
306 struct thread_entry *thread; /* thread to unlock */
307 #elif CONFIG_CORELOCK == CORELOCK_SWAP
308 unsigned char state; /* new thread state (performs unlock) */
309 #endif /* SOFTWARE_CORELOCK */
310 unsigned char flags; /* TBOP_* flags */
312 #endif /* NUM_CORES > 1 */
314 /* Information kept for each core
315 * Member are arranged for the same reason as in thread_entry
317 struct core_entry
319 /* "Active" lists - core is constantly active on these and are never
320 locked and interrupts do not access them */
321 struct thread_entry *running; /* threads that are running */
322 struct thread_entry *timeout; /* threads that are on a timeout before
323 running again */
324 /* "Shared" lists - cores interact in a synchronized manner - access
325 is locked between cores and interrupts */
326 struct thread_queue waking; /* intermediate locked list that
327 hold threads other core should wake up
328 on next task switch */
329 long next_tmo_check; /* soonest time to check tmo threads */
330 #if NUM_CORES > 1
331 struct thread_blk_ops blk_ops; /* operations to perform when
332 blocking a thread */
333 #else
334 #define STAY_IRQ_LEVEL (-1)
335 int irq_level; /* sets the irq level to irq_level */
336 #endif /* NUM_CORES */
337 #ifdef HAVE_PRIORITY_SCHEDULING
338 unsigned char highest_priority;
339 #endif
342 #ifdef HAVE_PRIORITY_SCHEDULING
343 #define IF_PRIO(...) __VA_ARGS__
344 #else
345 #define IF_PRIO(...)
346 #endif
348 /* Macros generate better code than an inline function is this case */
349 #if (defined (CPU_PP) || defined (CPU_ARM))
350 /* atomic */
351 #if CONFIG_CORELOCK == SW_CORELOCK
352 #define test_and_set(a, v, cl) \
353 xchg8((a), (v), (cl))
354 /* atomic */
355 #define xchg8(a, v, cl) \
356 ({ uint32_t o; \
357 corelock_lock(cl); \
358 o = *(uint8_t *)(a); \
359 *(uint8_t *)(a) = (v); \
360 corelock_unlock(cl); \
361 o; })
362 #define xchg32(a, v, cl) \
363 ({ uint32_t o; \
364 corelock_lock(cl); \
365 o = *(uint32_t *)(a); \
366 *(uint32_t *)(a) = (v); \
367 corelock_unlock(cl); \
368 o; })
369 #define xchgptr(a, v, cl) \
370 ({ typeof (*(a)) o; \
371 corelock_lock(cl); \
372 o = *(a); \
373 *(a) = (v); \
374 corelock_unlock(cl); \
375 o; })
376 #elif CONFIG_CORELOCK == CORELOCK_SWAP
377 /* atomic */
378 #define test_and_set(a, v, ...) \
379 xchg8((a), (v))
380 #define xchg8(a, v, ...) \
381 ({ uint32_t o; \
382 asm volatile( \
383 "swpb %0, %1, [%2]" \
384 : "=&r"(o) \
385 : "r"(v), \
386 "r"((uint8_t*)(a))); \
387 o; })
388 /* atomic */
389 #define xchg32(a, v, ...) \
390 ({ uint32_t o; \
391 asm volatile( \
392 "swp %0, %1, [%2]" \
393 : "=&r"(o) \
394 : "r"((uint32_t)(v)), \
395 "r"((uint32_t*)(a))); \
396 o; })
397 /* atomic */
398 #define xchgptr(a, v, ...) \
399 ({ typeof (*(a)) o; \
400 asm volatile( \
401 "swp %0, %1, [%2]" \
402 : "=&r"(o) \
403 : "r"(v), "r"(a)); \
404 o; })
405 #endif /* locking selection */
406 #elif defined (CPU_COLDFIRE)
407 /* atomic */
408 /* one branch will be optimized away if v is a constant expression */
409 #define test_and_set(a, v, ...) \
410 ({ uint32_t o = 0; \
411 if (v) { \
412 asm volatile ( \
413 "bset.b #0, (%0)" \
414 : : "a"((uint8_t*)(a)) \
415 : "cc"); \
416 } else { \
417 asm volatile ( \
418 "bclr.b #0, (%0)" \
419 : : "a"((uint8_t*)(a)) \
420 : "cc"); \
422 asm volatile ("sne.b %0" \
423 : "+d"(o)); \
424 o; })
425 #elif CONFIG_CPU == SH7034
426 /* atomic */
427 #define test_and_set(a, v, ...) \
428 ({ uint32_t o; \
429 asm volatile ( \
430 "tas.b @%2 \n" \
431 "mov #-1, %0 \n" \
432 "negc %0, %0 \n" \
433 : "=r"(o) \
434 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
435 "r"((uint8_t *)(a))); \
436 o; })
437 #endif /* CONFIG_CPU == */
439 /* defaults for no asm version */
440 #ifndef test_and_set
441 /* not atomic */
442 #define test_and_set(a, v, ...) \
443 ({ uint32_t o = *(uint8_t *)(a); \
444 *(uint8_t *)(a) = (v); \
445 o; })
446 #endif /* test_and_set */
447 #ifndef xchg8
448 /* not atomic */
449 #define xchg8(a, v, ...) \
450 ({ uint32_t o = *(uint8_t *)(a); \
451 *(uint8_t *)(a) = (v); \
452 o; })
453 #endif /* xchg8 */
454 #ifndef xchg32
455 /* not atomic */
456 #define xchg32(a, v, ...) \
457 ({ uint32_t o = *(uint32_t *)(a); \
458 *(uint32_t *)(a) = (v); \
459 o; })
460 #endif /* xchg32 */
461 #ifndef xchgptr
462 /* not atomic */
463 #define xchgptr(a, v, ...) \
464 ({ typeof (*(a)) o = *(a); \
465 *(a) = (v); \
466 o; })
467 #endif /* xchgptr */
469 void core_idle(void);
470 void core_wake(IF_COP_VOID(unsigned int core));
472 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
473 struct thread_entry*
474 create_thread(void (*function)(void), void* stack, int stack_size,
475 unsigned flags, const char *name
476 IF_PRIO(, int priority)
477 IF_COP(, unsigned int core));
479 #ifdef HAVE_SCHEDULER_BOOSTCTRL
480 void trigger_cpu_boost(void);
481 void cancel_cpu_boost(void);
482 #else
483 #define trigger_cpu_boost()
484 #define cancel_cpu_boost()
485 #endif
486 void thread_thaw(struct thread_entry *thread);
487 void thread_wait(struct thread_entry *thread);
488 void remove_thread(struct thread_entry *thread);
489 void switch_thread(struct thread_entry *old);
490 void sleep_thread(int ticks);
493 * Setup to allow using thread queues as locked or non-locked without speed
494 * sacrifices in both core locking types.
496 * The blocking/waking function inline two different version of the real
497 * function into the stubs when a software or other separate core locking
498 * mechanism is employed.
500 * When a simple test-and-set or similar instruction is available, locking
501 * has no cost and so one version is used and the internal worker is called
502 * directly.
504 * CORELOCK_NONE is treated the same as when an atomic instruction can be
505 * used.
508 /* Blocks the current thread on a thread queue */
509 #if CONFIG_CORELOCK == SW_CORELOCK
510 void block_thread(struct thread_queue *tq);
511 void block_thread_no_listlock(struct thread_entry **list);
512 #else
513 void _block_thread(struct thread_queue *tq);
514 static inline void block_thread(struct thread_queue *tq)
515 { _block_thread(tq); }
516 static inline void block_thread_no_listlock(struct thread_entry **list)
517 { _block_thread((struct thread_queue *)list); }
518 #endif /* CONFIG_CORELOCK */
520 /* Blocks the current thread on a thread queue for a max amount of time
521 * There is no "_no_listlock" version because timeout blocks without sync on
522 * the blocking queues is not permitted since either core could access the
523 * list at any time to do an implicit wake. In other words, objects with
524 * timeout support require lockable queues. */
525 void block_thread_w_tmo(struct thread_queue *tq, int timeout);
527 /* Wakes up the thread at the head of the queue */
528 #define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
529 #define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
530 #if CONFIG_CORELOCK == SW_CORELOCK
531 struct thread_entry * wakeup_thread(struct thread_queue *tq);
532 struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
533 #else
534 struct thread_entry * _wakeup_thread(struct thread_queue *list);
535 static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
536 { return _wakeup_thread(tq); }
537 static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
538 { return _wakeup_thread((struct thread_queue *)list); }
539 #endif /* CONFIG_CORELOCK */
541 /* Initialize a thread_queue object. */
542 static inline void thread_queue_init(struct thread_queue *tq)
543 { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
544 /* A convenience function for waking an entire queue of threads. */
545 static inline void thread_queue_wake(struct thread_queue *tq)
546 { while (wakeup_thread(tq) != NULL); }
547 /* The no-listlock version of thread_queue_wake() */
548 static inline void thread_queue_wake_no_listlock(struct thread_entry **list)
549 { while (wakeup_thread_no_listlock(list) != NULL); }
551 #ifdef HAVE_PRIORITY_SCHEDULING
552 int thread_set_priority(struct thread_entry *thread, int priority);
553 int thread_get_priority(struct thread_entry *thread);
554 /* Yield that guarantees thread execution once per round regardless of
555 thread's scheduler priority - basically a transient realtime boost
556 without altering the scheduler's thread precedence. */
557 void priority_yield(void);
558 #else
559 #define priority_yield yield
560 #endif /* HAVE_PRIORITY_SCHEDULING */
561 #if NUM_CORES > 1
562 unsigned int switch_core(unsigned int new_core);
563 #endif
564 struct thread_entry * thread_get_current(void);
565 void init_threads(void);
566 int thread_stack_usage(const struct thread_entry *thread);
567 #if NUM_CORES > 1
568 int idle_stack_usage(unsigned int core);
569 #endif
570 void thread_get_name(char *buffer, int size,
571 struct thread_entry *thread);
572 #ifdef RB_PROFILE
573 void profile_thread(void);
574 #endif
576 #endif /* THREAD_H */