add global proxy / cache settings to httpget class. This removes the need of passing...
[Rockbox.git] / firmware / export / thread.h
blobdd97ab1e8389cccf2d27154d517e80368c80ad7c
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Ulf Ralberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #ifndef THREAD_H
20 #define THREAD_H
22 #include "config.h"
23 #include <inttypes.h>
24 #include <stddef.h>
25 #include <stdbool.h>
27 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
28 * by giving high priority threads more CPU time than less priority threads
29 * when they need it.
31 * If software playback codec pcm buffer is going down to critical, codec
32 * can change it own priority to REALTIME to override user interface and
33 * prevent playback skipping.
35 #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
36 #define LOWEST_PRIORITY 100 /* The lowest possible thread priority */
37 #define PRIORITY_REALTIME 1
38 #define PRIORITY_USER_INTERFACE 4 /* The main thread */
39 #define PRIORITY_RECORDING 4 /* Recording thread */
40 #define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
41 #define PRIORITY_BUFFERING 4 /* Codec buffering thread */
42 #define PRIORITY_SYSTEM 6 /* All other firmware threads */
43 #define PRIORITY_BACKGROUND 8 /* Normal application threads */
45 /* TODO: Only a minor tweak to create_thread would be needed to let
46 * thread slots be caller allocated - no essential threading functionality
47 * depends upon an array */
48 #if CONFIG_CODEC == SWCODEC
50 #ifdef HAVE_RECORDING
51 #define MAXTHREADS 18
52 #else
53 #define MAXTHREADS 17
54 #endif
56 #else
57 #define MAXTHREADS 11
58 #endif /* CONFIG_CODE == * */
60 #define DEFAULT_STACK_SIZE 0x400 /* Bytes */
62 /**
63 * "Busy" values that can be swapped into a variable to indicate
64 * that the variable or object pointed to is in use by another processor
65 * core. When accessed, the busy value is swapped-in while the current
66 * value is atomically returned. If the swap returns the busy value,
67 * the processor should retry the operation until some other value is
68 * returned. When modification is finished, the new value should be
69 * written which unlocks it and updates it atomically.
71 * Procedure:
72 * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
74 * Modify/examine object at mem location or variable. Create "new_value"
75 * as suitable.
77 * variable = new_value or curr_value;
79 * To check a value for busy and perform an operation if not:
80 * curr_value = swap(&variable, BUSY_VALUE);
82 * if (curr_value != BUSY_VALUE)
83 * {
84 * Modify/examine object at mem location or variable. Create "new_value"
85 * as suitable.
86 * variable = new_value or curr_value;
87 * }
88 * else
89 * {
90 * Do nothing - already busy
91 * }
93 * Only ever restore when an actual value is returned or else it could leave
94 * the variable locked permanently if another processor unlocked in the
95 * meantime. The next access attempt would deadlock for all processors since
96 * an abandoned busy status would be left behind.
98 #define STATE_BUSYuptr ((void*)UINTPTR_MAX)
99 #define STATE_BUSYu8 UINT8_MAX
100 #define STATE_BUSYi INT_MIN
102 #ifndef SIMULATOR
103 /* Need to keep structures inside the header file because debug_menu
104 * needs them. */
105 #ifdef CPU_COLDFIRE
106 struct regs
108 unsigned int macsr; /* 0 - EMAC status register */
109 unsigned int d[6]; /* 4-24 - d2-d7 */
110 unsigned int a[5]; /* 28-44 - a2-a6 */
111 void *sp; /* 48 - Stack pointer (a7) */
112 void *start; /* 52 - Thread start address, or NULL when started */
114 #elif CONFIG_CPU == SH7034
115 struct regs
117 unsigned int r[7]; /* 0-24 - Registers r8 thru r14 */
118 void *sp; /* 28 - Stack pointer (r15) */
119 void *pr; /* 32 - Procedure register */
120 void *start; /* 36 - Thread start address, or NULL when started */
122 #elif defined(CPU_ARM)
123 struct regs
125 unsigned int r[8]; /* 0-28 - Registers r4-r11 */
126 void *sp; /* 32 - Stack pointer (r13) */
127 unsigned int lr; /* 36 - r14 (lr) */
128 void *start; /* 40 - Thread start address, or NULL when started */
130 #endif /* CONFIG_CPU */
131 #else
132 struct regs
134 void *t; /* Simulator OS thread */
135 void *c; /* Condition for blocking and sync */
136 void (*start)(void); /* Start function */
138 #endif /* !SIMULATOR */
140 /* NOTE: The use of the word "queue" may also refer to a linked list of
141 threads being maintainted that are normally dealt with in FIFO order
142 and not nescessarily kernel event_queue */
143 enum
145 /* States without a timeout must be first */
146 STATE_KILLED = 0, /* Thread is killed (default) */
147 STATE_RUNNING, /* Thread is currently running */
148 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
149 /* These states involve adding the thread to the tmo list */
150 STATE_SLEEPING, /* Thread is sleeping with a timeout */
151 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
152 /* Miscellaneous states */
153 STATE_FROZEN, /* Thread is suspended and will not run until
154 thread_thaw is called with its ID */
155 THREAD_NUM_STATES,
156 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
157 #if NUM_CORES > 1
158 STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
159 #endif
162 #if NUM_CORES > 1
163 #define THREAD_DESTRUCT ((const char *)0x84905617)
164 #endif
166 /* Link information for lists thread is in */
167 struct thread_entry; /* forward */
168 struct thread_list
170 struct thread_entry *prev; /* Previous thread in a list */
171 struct thread_entry *next; /* Next thread in a list */
174 /* Small objects for core-wise mutual exclusion */
175 #if CONFIG_CORELOCK == SW_CORELOCK
176 /* No reliable atomic instruction available - use Peterson's algorithm */
177 struct corelock
179 volatile unsigned char myl[NUM_CORES];
180 volatile unsigned char turn;
181 } __attribute__((packed));
183 void corelock_init(struct corelock *cl);
184 void corelock_lock(struct corelock *cl);
185 int corelock_try_lock(struct corelock *cl);
186 void corelock_unlock(struct corelock *cl);
187 #elif CONFIG_CORELOCK == CORELOCK_SWAP
188 /* Use native atomic swap/exchange instruction */
189 struct corelock
191 unsigned char locked;
192 } __attribute__((packed));
194 #define corelock_init(cl) \
195 ({ (cl)->locked = 0; })
196 #define corelock_lock(cl) \
197 ({ while (test_and_set(&(cl)->locked, 1)); })
198 #define corelock_try_lock(cl) \
199 ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
200 #define corelock_unlock(cl) \
201 ({ (cl)->locked = 0; })
202 #else
203 /* No atomic corelock op needed or just none defined */
204 #define corelock_init(cl)
205 #define corelock_lock(cl)
206 #define corelock_try_lock(cl)
207 #define corelock_unlock(cl)
208 #endif /* core locking selection */
210 struct thread_queue
212 struct thread_entry *queue; /* list of threads waiting -
213 _must_ be first member */
214 #if CONFIG_CORELOCK == SW_CORELOCK
215 struct corelock cl; /* lock for atomic list operations */
216 #endif
219 /* Information kept in each thread slot
220 * members are arranged according to size - largest first - in order
221 * to ensure both alignment and packing at the same time.
223 struct thread_entry
225 struct regs context; /* Register context at switch -
226 _must_ be first member */
227 void *stack; /* Pointer to top of stack */
228 const char *name; /* Thread name */
229 long tmo_tick; /* Tick when thread should be woken from
230 timeout */
231 struct thread_list l; /* Links for blocked/waking/running -
232 circular linkage in both directions */
233 struct thread_list tmo; /* Links for timeout list -
234 Self-pointer-terminated in reverse direction,
235 NULL-terminated in forward direction */
236 struct thread_queue *bqp; /* Pointer to list variable in kernel
237 object where thread is blocked - used
238 for implicit unblock and explicit wake */
239 #if CONFIG_CORELOCK == SW_CORELOCK
240 struct thread_entry **bqnlp; /* Pointer to list variable in kernel
241 object where thread is blocked - non-locked
242 operations will be used */
243 #endif
244 struct thread_entry *queue; /* List of threads waiting for thread to be
245 removed */
246 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
247 intptr_t retval; /* Return value from a blocked operation */
248 #endif
249 #ifdef HAVE_PRIORITY_SCHEDULING
250 long last_run; /* Last tick when started */
251 #endif
252 unsigned short stack_size; /* Size of stack in bytes */
253 #ifdef HAVE_PRIORITY_SCHEDULING
254 unsigned char priority; /* Current priority */
255 unsigned char priority_x; /* Inherited priority - right now just a
256 runtime guarantee flag */
257 #endif
258 unsigned char state; /* Thread slot state (STATE_*) */
259 #if NUM_CORES > 1
260 unsigned char core; /* The core to which thread belongs */
261 #endif
262 #ifdef HAVE_SCHEDULER_BOOSTCTRL
263 unsigned char boosted; /* CPU frequency boost flag */
264 #endif
265 #if CONFIG_CORELOCK == SW_CORELOCK
266 struct corelock cl; /* Corelock to lock thread slot */
267 #endif
270 #if NUM_CORES > 1
271 /* Operations to be performed just before stopping a thread and starting
272 a new one if specified before calling switch_thread */
273 #define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */
274 #if CONFIG_CORELOCK == CORELOCK_SWAP
275 #define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */
276 #define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */
277 #define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/
278 #endif /* CONFIG_CORELOCK */
279 #define TBOP_UNLOCK_CORELOCK 0x04
280 #define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
281 #define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
282 #define TBOP_SWITCH_CORE 0x20 /* Call the core switch preparation routine */
284 struct thread_blk_ops
286 #if CONFIG_CORELOCK != SW_CORELOCK
287 union
289 int var_iv; /* int variable value to set */
290 uint8_t var_u8v; /* unsigned char valur to set */
291 struct thread_entry *list_v; /* list pointer queue value to set */
293 #endif
294 union
296 #if CONFIG_CORELOCK != SW_CORELOCK
297 int *var_ip; /* pointer to int variable */
298 uint8_t *var_u8p; /* pointer to unsigned char varuable */
299 #endif
300 struct thread_queue *list_p; /* pointer to list variable */
302 #if CONFIG_CORELOCK == SW_CORELOCK
303 struct corelock *cl_p; /* corelock to unlock */
304 struct thread_entry *thread; /* thread to unlock */
305 #elif CONFIG_CORELOCK == CORELOCK_SWAP
306 unsigned char state; /* new thread state (performs unlock) */
307 #endif /* SOFTWARE_CORELOCK */
308 unsigned char flags; /* TBOP_* flags */
310 #endif /* NUM_CORES > 1 */
312 /* Information kept for each core
313 * Member are arranged for the same reason as in thread_entry
315 struct core_entry
317 /* "Active" lists - core is constantly active on these and are never
318 locked and interrupts do not access them */
319 struct thread_entry *running; /* threads that are running */
320 struct thread_entry *timeout; /* threads that are on a timeout before
321 running again */
322 /* "Shared" lists - cores interact in a synchronized manner - access
323 is locked between cores and interrupts */
324 struct thread_queue waking; /* intermediate locked list that
325 hold threads other core should wake up
326 on next task switch */
327 long next_tmo_check; /* soonest time to check tmo threads */
328 #if NUM_CORES > 1
329 struct thread_blk_ops blk_ops; /* operations to perform when
330 blocking a thread */
331 #endif /* NUM_CORES */
332 #ifdef HAVE_PRIORITY_SCHEDULING
333 unsigned char highest_priority;
334 #endif
337 #ifdef HAVE_PRIORITY_SCHEDULING
338 #define IF_PRIO(...) __VA_ARGS__
339 #else
340 #define IF_PRIO(...)
341 #endif
343 /* Macros generate better code than an inline function is this case */
344 #if (defined (CPU_PP) || defined (CPU_ARM))
345 /* atomic */
346 #if CONFIG_CORELOCK == SW_CORELOCK
347 #define test_and_set(a, v, cl) \
348 xchg8((a), (v), (cl))
349 /* atomic */
350 #define xchg8(a, v, cl) \
351 ({ uint32_t o; \
352 corelock_lock(cl); \
353 o = *(uint8_t *)(a); \
354 *(uint8_t *)(a) = (v); \
355 corelock_unlock(cl); \
356 o; })
357 #define xchg32(a, v, cl) \
358 ({ uint32_t o; \
359 corelock_lock(cl); \
360 o = *(uint32_t *)(a); \
361 *(uint32_t *)(a) = (v); \
362 corelock_unlock(cl); \
363 o; })
364 #define xchgptr(a, v, cl) \
365 ({ typeof (*(a)) o; \
366 corelock_lock(cl); \
367 o = *(a); \
368 *(a) = (v); \
369 corelock_unlock(cl); \
370 o; })
371 #elif CONFIG_CORELOCK == CORELOCK_SWAP
372 /* atomic */
373 #define test_and_set(a, v, ...) \
374 xchg8((a), (v))
375 #define xchg8(a, v, ...) \
376 ({ uint32_t o; \
377 asm volatile( \
378 "swpb %0, %1, [%2]" \
379 : "=&r"(o) \
380 : "r"(v), \
381 "r"((uint8_t*)(a))); \
382 o; })
383 /* atomic */
384 #define xchg32(a, v, ...) \
385 ({ uint32_t o; \
386 asm volatile( \
387 "swp %0, %1, [%2]" \
388 : "=&r"(o) \
389 : "r"((uint32_t)(v)), \
390 "r"((uint32_t*)(a))); \
391 o; })
392 /* atomic */
393 #define xchgptr(a, v, ...) \
394 ({ typeof (*(a)) o; \
395 asm volatile( \
396 "swp %0, %1, [%2]" \
397 : "=&r"(o) \
398 : "r"(v), "r"(a)); \
399 o; })
400 #endif /* locking selection */
401 #elif defined (CPU_COLDFIRE)
402 /* atomic */
403 /* one branch will be optimized away if v is a constant expression */
404 #define test_and_set(a, v, ...) \
405 ({ uint32_t o = 0; \
406 if (v) { \
407 asm volatile ( \
408 "bset.b #0, (%0)" \
409 : : "a"((uint8_t*)(a)) \
410 : "cc"); \
411 } else { \
412 asm volatile ( \
413 "bclr.b #0, (%0)" \
414 : : "a"((uint8_t*)(a)) \
415 : "cc"); \
417 asm volatile ("sne.b %0" \
418 : "+d"(o)); \
419 o; })
420 #elif CONFIG_CPU == SH7034
421 /* atomic */
422 #define test_and_set(a, v, ...) \
423 ({ uint32_t o; \
424 asm volatile ( \
425 "tas.b @%2 \n" \
426 "mov #-1, %0 \n" \
427 "negc %0, %0 \n" \
428 : "=r"(o) \
429 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
430 "r"((uint8_t *)(a))); \
431 o; })
432 #endif /* CONFIG_CPU == */
434 /* defaults for no asm version */
435 #ifndef test_and_set
436 /* not atomic */
437 #define test_and_set(a, v, ...) \
438 ({ uint32_t o = *(uint8_t *)(a); \
439 *(uint8_t *)(a) = (v); \
440 o; })
441 #endif /* test_and_set */
442 #ifndef xchg8
443 /* not atomic */
444 #define xchg8(a, v, ...) \
445 ({ uint32_t o = *(uint8_t *)(a); \
446 *(uint8_t *)(a) = (v); \
447 o; })
448 #endif /* xchg8 */
449 #ifndef xchg32
450 /* not atomic */
451 #define xchg32(a, v, ...) \
452 ({ uint32_t o = *(uint32_t *)(a); \
453 *(uint32_t *)(a) = (v); \
454 o; })
455 #endif /* xchg32 */
456 #ifndef xchgptr
457 /* not atomic */
458 #define xchgptr(a, v, ...) \
459 ({ typeof (*(a)) o = *(a); \
460 *(a) = (v); \
461 o; })
462 #endif /* xchgptr */
464 void core_idle(void);
465 void core_wake(IF_COP_VOID(unsigned int core));
467 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
468 struct thread_entry*
469 create_thread(void (*function)(void), void* stack, int stack_size,
470 unsigned flags, const char *name
471 IF_PRIO(, int priority)
472 IF_COP(, unsigned int core));
474 #ifdef HAVE_SCHEDULER_BOOSTCTRL
475 void trigger_cpu_boost(void);
476 void cancel_cpu_boost(void);
477 #else
478 #define trigger_cpu_boost()
479 #define cancel_cpu_boost()
480 #endif
481 void thread_thaw(struct thread_entry *thread);
482 void thread_wait(struct thread_entry *thread);
483 void remove_thread(struct thread_entry *thread);
484 void switch_thread(struct thread_entry *old);
485 void sleep_thread(int ticks);
488 * Setup to allow using thread queues as locked or non-locked without speed
489 * sacrifices in both core locking types.
491 * The blocking/waking function inline two different version of the real
492 * function into the stubs when a software or other separate core locking
493 * mechanism is employed.
495 * When a simple test-and-set or similar instruction is available, locking
496 * has no cost and so one version is used and the internal worker is called
497 * directly.
499 * CORELOCK_NONE is treated the same as when an atomic instruction can be
500 * used.
503 /* Blocks the current thread on a thread queue */
504 #if CONFIG_CORELOCK == SW_CORELOCK
505 void block_thread(struct thread_queue *tq);
506 void block_thread_no_listlock(struct thread_entry **list);
507 #else
508 void _block_thread(struct thread_queue *tq);
509 static inline void block_thread(struct thread_queue *tq)
510 { _block_thread(tq); }
511 static inline void block_thread_no_listlock(struct thread_entry **list)
512 { _block_thread((struct thread_queue *)list); }
513 #endif /* CONFIG_CORELOCK */
515 /* Blocks the current thread on a thread queue for a max amount of time
516 * There is no "_no_listlock" version because timeout blocks without sync on
517 * the blocking queues is not permitted since either core could access the
518 * list at any time to do an implicit wake. In other words, objects with
519 * timeout support require lockable queues. */
520 void block_thread_w_tmo(struct thread_queue *tq, int timeout);
522 /* Wakes up the thread at the head of the queue */
523 #define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
524 #define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
525 #if CONFIG_CORELOCK == SW_CORELOCK
526 struct thread_entry * wakeup_thread(struct thread_queue *tq);
527 struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
528 #else
529 struct thread_entry * _wakeup_thread(struct thread_queue *list);
530 static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
531 { return _wakeup_thread(tq); }
532 static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
533 { return _wakeup_thread((struct thread_queue *)list); }
534 #endif /* CONFIG_CORELOCK */
536 /* Initialize a thread_queue object. */
537 static inline void thread_queue_init(struct thread_queue *tq)
538 { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
539 /* A convenience function for waking an entire queue of threads. */
540 static inline void thread_queue_wake(struct thread_queue *tq)
541 { while (wakeup_thread(tq) != NULL); }
542 /* The no-listlock version of thread_queue_wake() */
543 static inline void thread_queue_wake_no_listlock(struct thread_entry **list)
544 { while (wakeup_thread_no_listlock(list) != NULL); }
546 #ifdef HAVE_PRIORITY_SCHEDULING
547 int thread_set_priority(struct thread_entry *thread, int priority);
548 int thread_get_priority(struct thread_entry *thread);
549 /* Yield that guarantees thread execution once per round regardless of
550 thread's scheduler priority - basically a transient realtime boost
551 without altering the scheduler's thread precedence. */
552 void priority_yield(void);
553 #else
554 #define priority_yield yield
555 #endif /* HAVE_PRIORITY_SCHEDULING */
556 #if NUM_CORES > 1
557 unsigned int switch_core(unsigned int new_core);
558 #endif
559 struct thread_entry * thread_get_current(void);
560 void init_threads(void);
561 int thread_stack_usage(const struct thread_entry *thread);
562 #if NUM_CORES > 1
563 int idle_stack_usage(unsigned int core);
564 #endif
565 void thread_get_name(char *buffer, int size,
566 struct thread_entry *thread);
567 #ifdef RB_PROFILE
568 void profile_thread(void);
569 #endif
571 #endif /* THREAD_H */