1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
29 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
30 * by giving high priority threads more CPU time than lower priority threads
31 * when they need it. Priority is differential such that the priority
32 * difference between a lower priority runnable thread and the highest priority
33 * runnable thread determines the amount of aging necessary for the lower
34 * priority thread to be scheduled in order to prevent starvation.
36 * If software playback codec pcm buffer is going down to critical, codec
37 * can gradually raise its own priority to override user interface and
38 * prevent playback skipping.
40 #define PRIORITY_RESERVED_HIGH 0 /* Reserved */
41 #define PRIORITY_RESERVED_LOW 32 /* Reserved */
42 #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
43 #define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
44 /* Realtime range reserved for threads that will not allow threads of lower
45 * priority to age and run (future expansion) */
46 #define PRIORITY_REALTIME_1 1
47 #define PRIORITY_REALTIME_2 2
48 #define PRIORITY_REALTIME_3 3
49 #define PRIORITY_REALTIME_4 4
50 #define PRIORITY_REALTIME 4 /* Lowest realtime range */
51 #define PRIORITY_BUFFERING 15 /* Codec buffering thread */
52 #define PRIORITY_USER_INTERFACE 16 /* The main thread */
53 #define PRIORITY_RECORDING 16 /* Recording thread */
54 #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
55 #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
56 #define PRIORITY_SYSTEM 18 /* All other firmware threads */
57 #define PRIORITY_BACKGROUND 20 /* Normal application threads */
58 #define NUM_PRIORITIES 32
59 #define PRIORITY_IDLE 32 /* Priority representative of no tasks */
61 #define IO_PRIORITY_IMMEDIATE 0
62 #define IO_PRIORITY_BACKGROUND 32
64 #if CONFIG_CODEC == SWCODEC
67 #define BASETHREADS 17
69 #define BASETHREADS 16
73 #define BASETHREADS 11
74 #endif /* CONFIG_CODE == * */
76 #ifndef TARGET_EXTRA_THREADS
77 #define TARGET_EXTRA_THREADS 0
80 #define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
82 #define DEFAULT_STACK_SIZE 0x400 /* Bytes */
85 /* Need to keep structures inside the header file because debug_menu
90 uint32_t macsr
; /* 0 - EMAC status register */
91 uint32_t d
[6]; /* 4-24 - d2-d7 */
92 uint32_t a
[5]; /* 28-44 - a2-a6 */
93 uint32_t sp
; /* 48 - Stack pointer (a7) */
94 uint32_t start
; /* 52 - Thread start address, or NULL when started */
96 #elif CONFIG_CPU == SH7034
99 uint32_t r
[7]; /* 0-24 - Registers r8 thru r14 */
100 uint32_t sp
; /* 28 - Stack pointer (r15) */
101 uint32_t pr
; /* 32 - Procedure register */
102 uint32_t start
; /* 36 - Thread start address, or NULL when started */
104 #elif defined(CPU_ARM)
107 uint32_t r
[8]; /* 0-28 - Registers r4-r11 */
108 uint32_t sp
; /* 32 - Stack pointer (r13) */
109 uint32_t lr
; /* 36 - r14 (lr) */
110 uint32_t start
; /* 40 - Thread start address, or NULL when started */
112 #elif defined(CPU_MIPS)
115 uint32_t r
[9]; /* 0-32 - Registers s0-s7, fp */
116 uint32_t sp
; /* 36 - Stack pointer */
117 uint32_t ra
; /* 40 - Return address */
118 uint32_t start
; /* 44 - Thread start address, or NULL when started */
120 #endif /* CONFIG_CPU */
124 void *t
; /* Simulator OS thread */
125 void *told
; /* Last thread in slot (explained in thead-sdl.c) */
126 void *s
; /* Semaphore for blocking and wakeup */
127 void (*start
)(void); /* Start function */
129 #endif /* !SIMULATOR */
131 /* NOTE: The use of the word "queue" may also refer to a linked list of
132 threads being maintained that are normally dealt with in FIFO order
133 and not necessarily kernel event_queue */
136 /* States without a timeout must be first */
137 STATE_KILLED
= 0, /* Thread is killed (default) */
138 STATE_RUNNING
, /* Thread is currently running */
139 STATE_BLOCKED
, /* Thread is indefinitely blocked on a queue */
140 /* These states involve adding the thread to the tmo list */
141 STATE_SLEEPING
, /* Thread is sleeping with a timeout */
142 STATE_BLOCKED_W_TMO
, /* Thread is blocked on a queue with a timeout */
143 /* Miscellaneous states */
144 STATE_FROZEN
, /* Thread is suspended and will not run until
145 thread_thaw is called with its ID */
147 TIMEOUT_STATE_FIRST
= STATE_SLEEPING
,
151 /* Pointer value for name field to indicate thread is being killed. Using
152 * an alternate STATE_* won't work since that would interfere with operation
153 * while the thread is still running. */
154 #define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
157 /* Link information for lists thread is in */
158 struct thread_entry
; /* forward */
161 struct thread_entry
*prev
; /* Previous thread in a list */
162 struct thread_entry
*next
; /* Next thread in a list */
165 /* Small objects for core-wise mutual exclusion */
166 #if CONFIG_CORELOCK == SW_CORELOCK
167 /* No reliable atomic instruction available - use Peterson's algorithm */
170 volatile unsigned char myl
[NUM_CORES
];
171 volatile unsigned char turn
;
172 } __attribute__((packed
));
174 void corelock_init(struct corelock
*cl
);
175 void corelock_lock(struct corelock
*cl
);
176 int corelock_try_lock(struct corelock
*cl
);
177 void corelock_unlock(struct corelock
*cl
);
178 #elif CONFIG_CORELOCK == CORELOCK_SWAP
179 /* Use native atomic swap/exchange instruction */
182 volatile unsigned char locked
;
183 } __attribute__((packed
));
185 #define corelock_init(cl) \
186 ({ (cl)->locked = 0; })
187 #define corelock_lock(cl) \
188 ({ while (test_and_set(&(cl)->locked, 1)); })
189 #define corelock_try_lock(cl) \
190 ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
191 #define corelock_unlock(cl) \
192 ({ (cl)->locked = 0; })
194 /* No atomic corelock op needed or just none defined */
195 #define corelock_init(cl)
196 #define corelock_lock(cl)
197 #define corelock_try_lock(cl)
198 #define corelock_unlock(cl)
199 #endif /* core locking selection */
201 #ifdef HAVE_PRIORITY_SCHEDULING
204 struct thread_entry
*thread
; /* thread blocking other threads
205 (aka. object owner) */
206 int priority
; /* highest priority waiter */
207 struct thread_entry
* (*wakeup_protocol
)(struct thread_entry
*thread
);
210 /* Choices of wakeup protocol */
212 /* For transfer of object ownership by one thread to another thread by
213 * the owning thread itself (mutexes) */
214 struct thread_entry
*
215 wakeup_priority_protocol_transfer(struct thread_entry
*thread
);
217 /* For release by owner where ownership doesn't change - other threads,
218 * interrupts, timeouts, etc. (mutex timeout, queues) */
219 struct thread_entry
*
220 wakeup_priority_protocol_release(struct thread_entry
*thread
);
223 struct priority_distribution
225 uint8_t hist
[NUM_PRIORITIES
]; /* Histogram: Frequency for each priority */
226 uint32_t mask
; /* Bitmask of hist entries that are not zero */
229 #endif /* HAVE_PRIORITY_SCHEDULING */
231 /* Information kept in each thread slot
232 * members are arranged according to size - largest first - in order
233 * to ensure both alignment and packing at the same time.
237 struct regs context
; /* Register context at switch -
238 _must_ be first member */
239 uintptr_t *stack
; /* Pointer to top of stack */
240 const char *name
; /* Thread name */
241 long tmo_tick
; /* Tick when thread should be woken from
243 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
244 struct thread_list l
; /* Links for blocked/waking/running -
245 circular linkage in both directions */
246 struct thread_list tmo
; /* Links for timeout list -
247 Circular in reverse direction, NULL-terminated in
249 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
250 struct thread_entry
**bqp
; /* Pointer to list variable in kernel
251 object where thread is blocked - used
252 for implicit unblock and explicit wake
253 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
255 struct corelock
*obj_cl
; /* Object corelock where thead is blocked -
256 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
257 struct corelock waiter_cl
; /* Corelock for thread_wait */
258 struct corelock slot_cl
; /* Corelock to lock thread slot */
259 unsigned char core
; /* The core to which thread belongs */
261 struct thread_entry
*queue
; /* List of threads waiting for thread to be
263 #ifdef HAVE_WAKEUP_EXT_CB
264 void (*wakeup_ext_cb
)(struct thread_entry
*thread
); /* Callback that
265 performs special steps needed when being
266 forced off of an object's wait queue that
267 go beyond the standard wait queue removal
268 and priority disinheritance */
269 /* Only enabled when using queue_send for now */
271 #if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
272 intptr_t retval
; /* Return value from a blocked operation/
275 #ifdef HAVE_PRIORITY_SCHEDULING
276 /* Priority summary of owned objects that support inheritance */
277 struct blocker
*blocker
; /* Pointer to blocker when this thread is blocked
278 on an object that supports PIP -
279 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
280 struct priority_distribution pdist
; /* Priority summary of owned objects
281 that have blocked threads and thread's own
283 int skip_count
; /* Number of times skipped if higher priority
284 thread was running */
285 unsigned char base_priority
; /* Base priority (set explicitly during
286 creation or thread_set_priority) */
287 unsigned char priority
; /* Scheduled priority (higher of base or
288 all threads blocked by this one) */
290 uint16_t id
; /* Current slot id */
291 unsigned short stack_size
; /* Size of stack in bytes */
292 unsigned char state
; /* Thread slot state (STATE_*) */
293 #ifdef HAVE_SCHEDULER_BOOSTCTRL
294 unsigned char cpu_boost
; /* CPU frequency boost flag */
296 #ifdef HAVE_IO_PRIORITY
297 unsigned char io_priority
;
301 /*** Macros for internal use ***/
302 /* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
303 #define THREAD_ID_VERSION_SHIFT 8
304 #define THREAD_ID_VERSION_MASK 0xff00
305 #define THREAD_ID_SLOT_MASK 0x00ff
306 #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
308 /* Specify current thread in a function taking an ID. */
309 #define THREAD_ID_CURRENT ((unsigned int)-1)
312 /* Operations to be performed just before stopping a thread and starting
313 a new one if specified before calling switch_thread */
316 TBOP_CLEAR
= 0, /* No operation to do */
317 TBOP_UNLOCK_CORELOCK
, /* Unlock a corelock variable */
318 TBOP_SWITCH_CORE
, /* Call the core switch preparation routine */
321 struct thread_blk_ops
323 struct corelock
*cl_p
; /* pointer to corelock */
324 unsigned char flags
; /* TBOP_* flags */
326 #endif /* NUM_CORES > 1 */
328 /* Information kept for each core
329 * Members are arranged for the same reason as in thread_entry
333 /* "Active" lists - core is constantly active on these and are never
334 locked and interrupts do not access them */
335 struct thread_entry
*running
; /* threads that are running (RTR) */
336 struct thread_entry
*timeout
; /* threads that are on a timeout before
338 struct thread_entry
*block_task
; /* Task going off running list */
339 #ifdef HAVE_PRIORITY_SCHEDULING
340 struct priority_distribution rtr
; /* Summary of running and ready-to-run
343 long next_tmo_check
; /* soonest time to check tmo threads */
345 struct thread_blk_ops blk_ops
; /* operations to perform when
347 struct corelock rtr_cl
; /* Lock for rtr list */
348 #endif /* NUM_CORES */
351 #ifdef HAVE_PRIORITY_SCHEDULING
352 #define IF_PRIO(...) __VA_ARGS__
353 #define IFN_PRIO(...)
356 #define IFN_PRIO(...) __VA_ARGS__
359 /* Macros generate better code than an inline function is this case */
360 #if (defined (CPU_PP) || defined (CPU_ARM))
362 #if CONFIG_CORELOCK == SW_CORELOCK
363 #define test_and_set(a, v, cl) \
364 xchg8((a), (v), (cl))
366 #define xchg8(a, v, cl) \
369 o = *(uint8_t *)(a); \
370 *(uint8_t *)(a) = (v); \
371 corelock_unlock(cl); \
373 #define xchg32(a, v, cl) \
376 o = *(uint32_t *)(a); \
377 *(uint32_t *)(a) = (v); \
378 corelock_unlock(cl); \
380 #define xchgptr(a, v, cl) \
381 ({ typeof (*(a)) o; \
385 corelock_unlock(cl); \
387 #elif CONFIG_CORELOCK == CORELOCK_SWAP
389 #define test_and_set(a, v, ...) \
391 #define xchg8(a, v, ...) \
394 "swpb %0, %1, [%2]" \
397 "r"((uint8_t*)(a))); \
400 #define xchg32(a, v, ...) \
405 : "r"((uint32_t)(v)), \
406 "r"((uint32_t*)(a))); \
409 #define xchgptr(a, v, ...) \
410 ({ typeof (*(a)) o; \
416 #endif /* locking selection */
417 #elif defined (CPU_COLDFIRE)
419 /* one branch will be optimized away if v is a constant expression */
420 #define test_and_set(a, v, ...) \
425 : : "a"((uint8_t*)(a)) \
430 : : "a"((uint8_t*)(a)) \
433 asm volatile ("sne.b %0" \
436 #elif CONFIG_CPU == SH7034
438 #define test_and_set(a, v, ...) \
445 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
446 "r"((uint8_t *)(a))); \
448 #endif /* CONFIG_CPU == */
450 /* defaults for no asm version */
453 #define test_and_set(a, v, ...) \
454 ({ uint32_t o = *(uint8_t *)(a); \
455 *(uint8_t *)(a) = (v); \
457 #endif /* test_and_set */
460 #define xchg8(a, v, ...) \
461 ({ uint32_t o = *(uint8_t *)(a); \
462 *(uint8_t *)(a) = (v); \
467 #define xchg32(a, v, ...) \
468 ({ uint32_t o = *(uint32_t *)(a); \
469 *(uint32_t *)(a) = (v); \
474 #define xchgptr(a, v, ...) \
475 ({ typeof (*(a)) o = *(a); \
480 void core_idle(void);
481 void core_wake(IF_COP_VOID(unsigned int core
));
483 /* Initialize the scheduler */
484 void init_threads(void) INIT_ATTR
;
486 /* Allocate a thread in the scheduler */
487 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
488 unsigned int create_thread(void (*function
)(void),
489 void* stack
, size_t stack_size
,
490 unsigned flags
, const char *name
491 IF_PRIO(, int priority
)
492 IF_COP(, unsigned int core
));
494 /* Set and clear the CPU frequency boost flag for the calling thread */
495 #ifdef HAVE_SCHEDULER_BOOSTCTRL
496 void trigger_cpu_boost(void);
497 void cancel_cpu_boost(void);
499 #define trigger_cpu_boost()
500 #define cancel_cpu_boost()
502 /* Return thread entry from id */
503 struct thread_entry
*thread_id_entry(unsigned int thread_id
);
504 /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
505 * Has no effect on a thread not frozen. */
506 void thread_thaw(unsigned int thread_id
);
507 /* Wait for a thread to exit */
508 void thread_wait(unsigned int thread_id
);
509 /* Exit the current thread */
510 void thread_exit(void);
511 #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
512 #define ALLOW_REMOVE_THREAD
513 /* Remove a thread from the scheduler */
514 void remove_thread(unsigned int thread_id
);
517 /* Switch to next runnable thread */
518 void switch_thread(void);
519 /* Blocks a thread for at least the specified number of ticks (0 = wait until
521 void sleep_thread(int ticks
);
522 /* Indefinitely blocks the current thread on a thread queue */
523 void block_thread(struct thread_entry
*current
);
524 /* Blocks the current thread on a thread queue until explicitely woken or
525 * the timeout is reached */
526 void block_thread_w_tmo(struct thread_entry
*current
, int timeout
);
528 /* Return bit flags for thread wakeup */
529 #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
530 #define THREAD_OK 0x1 /* A thread was woken up */
531 #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
532 higher priority than current were woken) */
534 /* A convenience function for waking an entire queue of threads. */
535 unsigned int thread_queue_wake(struct thread_entry
**list
);
537 /* Wakeup a thread at the head of a list */
538 unsigned int wakeup_thread(struct thread_entry
**list
);
540 #ifdef HAVE_PRIORITY_SCHEDULING
541 int thread_set_priority(unsigned int thread_id
, int priority
);
542 int thread_get_priority(unsigned int thread_id
);
543 #endif /* HAVE_PRIORITY_SCHEDULING */
544 #ifdef HAVE_IO_PRIORITY
545 void thread_set_io_priority(unsigned int thread_id
, int io_priority
);
546 int thread_get_io_priority(unsigned int thread_id
);
547 #endif /* HAVE_IO_PRIORITY */
549 unsigned int switch_core(unsigned int new_core
);
551 unsigned int thread_get_current(void);
553 /* Debugging info - only! */
554 int thread_stack_usage(const struct thread_entry
*thread
);
556 int idle_stack_usage(unsigned int core
);
558 void thread_get_name(char *buffer
, int size
,
559 struct thread_entry
*thread
);
561 void profile_thread(void);
564 #endif /* THREAD_H */