1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Ulf Ralberg
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
28 #include "gcc_extensions.h"
30 /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
31 * by giving high priority threads more CPU time than lower priority threads
32 * when they need it. Priority is differential such that the priority
33 * difference between a lower priority runnable thread and the highest priority
34 * runnable thread determines the amount of aging necessary for the lower
35 * priority thread to be scheduled in order to prevent starvation.
37 * If software playback codec pcm buffer is going down to critical, codec
38 * can gradually raise its own priority to override user interface and
39 * prevent playback skipping.
41 #define PRIORITY_RESERVED_HIGH 0 /* Reserved */
42 #define PRIORITY_RESERVED_LOW 32 /* Reserved */
43 #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
44 #define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
45 /* Realtime range reserved for threads that will not allow threads of lower
46 * priority to age and run (future expansion) */
47 #define PRIORITY_REALTIME_1 1
48 #define PRIORITY_REALTIME_2 2
49 #define PRIORITY_REALTIME_3 3
50 #define PRIORITY_REALTIME_4 4
51 #define PRIORITY_REALTIME 4 /* Lowest realtime range */
52 #define PRIORITY_BUFFERING 15 /* Codec buffering thread */
53 #define PRIORITY_USER_INTERFACE 16 /* The main thread */
54 #define PRIORITY_RECORDING 16 /* Recording thread */
55 #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
56 #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
57 #define PRIORITY_SYSTEM 18 /* All other firmware threads */
58 #define PRIORITY_BACKGROUND 20 /* Normal application threads */
59 #define NUM_PRIORITIES 32
60 #define PRIORITY_IDLE 32 /* Priority representative of no tasks */
62 #define IO_PRIORITY_IMMEDIATE 0
63 #define IO_PRIORITY_BACKGROUND 32
65 #if CONFIG_CODEC == SWCODEC
68 #ifdef HAVE_HARDWARE_CLICK
69 #define BASETHREADS 18
71 #define BASETHREADS 17
74 #ifdef HAVE_HARDWARE_CLICK
75 #define BASETHREADS 17
77 #define BASETHREADS 16
82 #define BASETHREADS 11
83 #endif /* CONFIG_CODE == * */
85 #ifndef TARGET_EXTRA_THREADS
86 #define TARGET_EXTRA_THREADS 0
89 #define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
92 * We need more stack when we run under a host
93 * maybe more expensive C lib functions?
95 * simulator (possibly) doesn't simulate stack usage anyway but well ... */
96 #ifdef HAVE_SIGALTSTACK_THREADS
98 /* MINSIGSTKSZ for the OS to deliver the signal + 0x3000 for us */
99 #define DEFAULT_STACK_SIZE (MINSIGSTKSZ+0x3000) /* Bytes */
100 #elif (CONFIG_PLATFORM & PLATFORM_ANDROID) || defined(HAVE_WIN32_FIBER_THREADS)
101 #define DEFAULT_STACK_SIZE 0x1000 /* Bytes */
102 #else /* native threads, sdl threads */
103 #define DEFAULT_STACK_SIZE 0x400 /* Bytes */
107 #if defined(ASSEMBLER_THREADS)
108 /* Need to keep structures inside the header file because debug_menu
113 uint32_t macsr
; /* 0 - EMAC status register */
114 uint32_t d
[6]; /* 4-24 - d2-d7 */
115 uint32_t a
[5]; /* 28-44 - a2-a6 */
116 uint32_t sp
; /* 48 - Stack pointer (a7) */
117 uint32_t start
; /* 52 - Thread start address, or NULL when started */
119 #elif CONFIG_CPU == SH7034
122 uint32_t r
[7]; /* 0-24 - Registers r8 thru r14 */
123 uint32_t sp
; /* 28 - Stack pointer (r15) */
124 uint32_t pr
; /* 32 - Procedure register */
125 uint32_t start
; /* 36 - Thread start address, or NULL when started */
127 #elif defined(CPU_ARM)
130 uint32_t r
[8]; /* 0-28 - Registers r4-r11 */
131 uint32_t sp
; /* 32 - Stack pointer (r13) */
132 uint32_t lr
; /* 36 - r14 (lr) */
133 uint32_t start
; /* 40 - Thread start address, or NULL when started */
136 #elif defined(CPU_MIPS)
139 uint32_t r
[9]; /* 0-32 - Registers s0-s7, fp */
140 uint32_t sp
; /* 36 - Stack pointer */
141 uint32_t ra
; /* 40 - Return address */
142 uint32_t start
; /* 44 - Thread start address, or NULL when started */
144 #endif /* CONFIG_CPU */
145 #elif (CONFIG_PLATFORM & PLATFORM_HOSTED) || defined(__PCTOOL__)
146 #ifndef HAVE_SDL_THREADS
149 void (*start
)(void); /* thread's entry point, or NULL when started */
150 void* uc
; /* host thread handle */
151 uintptr_t sp
; /* Stack pointer, unused */
152 size_t stack_size
; /* stack size, not always used */
153 uintptr_t stack
; /* pointer to start of the stack buffer */
155 #else /* SDL threads */
158 void *t
; /* OS thread */
159 void *told
; /* Last thread in slot (explained in thead-sdl.c) */
160 void *s
; /* Semaphore for blocking and wakeup */
161 void (*start
)(void); /* Start function */
164 #endif /* PLATFORM_NATIVE */
167 #ifdef HAVE_CORELOCK_OBJECT
168 /* No reliable atomic instruction available - use Peterson's algorithm */
171 volatile unsigned char myl
[NUM_CORES
];
172 volatile unsigned char turn
;
173 } __attribute__((packed
));
175 /* Too big to inline everywhere */
176 void corelock_init(struct corelock
*cl
);
177 void corelock_lock(struct corelock
*cl
);
178 int corelock_try_lock(struct corelock
*cl
);
179 void corelock_unlock(struct corelock
*cl
);
180 #endif /* HAVE_CORELOCK_OBJECT */
183 /* NOTE: The use of the word "queue" may also refer to a linked list of
184 threads being maintained that are normally dealt with in FIFO order
185 and not necessarily kernel event_queue */
188 /* States without a timeout must be first */
189 STATE_KILLED
= 0, /* Thread is killed (default) */
190 STATE_RUNNING
, /* Thread is currently running */
191 STATE_BLOCKED
, /* Thread is indefinitely blocked on a queue */
192 /* These states involve adding the thread to the tmo list */
193 STATE_SLEEPING
, /* Thread is sleeping with a timeout */
194 STATE_BLOCKED_W_TMO
, /* Thread is blocked on a queue with a timeout */
195 /* Miscellaneous states */
196 STATE_FROZEN
, /* Thread is suspended and will not run until
197 thread_thaw is called with its ID */
199 TIMEOUT_STATE_FIRST
= STATE_SLEEPING
,
203 /* Pointer value for name field to indicate thread is being killed. Using
204 * an alternate STATE_* won't work since that would interfere with operation
205 * while the thread is still running. */
206 #define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
209 /* Link information for lists thread is in */
210 struct thread_entry
; /* forward */
213 struct thread_entry
*prev
; /* Previous thread in a list */
214 struct thread_entry
*next
; /* Next thread in a list */
217 #ifndef HAVE_CORELOCK_OBJECT
218 /* No atomic corelock op needed or just none defined */
219 #define corelock_init(cl)
220 #define corelock_lock(cl)
221 #define corelock_try_lock(cl)
222 #define corelock_unlock(cl)
223 #endif /* HAVE_CORELOCK_OBJECT */
225 #ifdef HAVE_PRIORITY_SCHEDULING
228 struct thread_entry
* volatile thread
; /* thread blocking other threads
229 (aka. object owner) */
230 int priority
; /* highest priority waiter */
231 struct thread_entry
* (*wakeup_protocol
)(struct thread_entry
*thread
);
234 /* Choices of wakeup protocol */
236 /* For transfer of object ownership by one thread to another thread by
237 * the owning thread itself (mutexes) */
238 struct thread_entry
*
239 wakeup_priority_protocol_transfer(struct thread_entry
*thread
);
241 /* For release by owner where ownership doesn't change - other threads,
242 * interrupts, timeouts, etc. (mutex timeout, queues) */
243 struct thread_entry
*
244 wakeup_priority_protocol_release(struct thread_entry
*thread
);
247 struct priority_distribution
249 uint8_t hist
[NUM_PRIORITIES
]; /* Histogram: Frequency for each priority */
250 uint32_t mask
; /* Bitmask of hist entries that are not zero */
253 #endif /* HAVE_PRIORITY_SCHEDULING */
255 /* Information kept in each thread slot
256 * members are arranged according to size - largest first - in order
257 * to ensure both alignment and packing at the same time.
261 struct regs context
; /* Register context at switch -
262 _must_ be first member */
263 uintptr_t *stack
; /* Pointer to top of stack */
264 const char *name
; /* Thread name */
265 long tmo_tick
; /* Tick when thread should be woken from
267 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
268 struct thread_list l
; /* Links for blocked/waking/running -
269 circular linkage in both directions */
270 struct thread_list tmo
; /* Links for timeout list -
271 Circular in reverse direction, NULL-terminated in
273 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
274 struct thread_entry
**bqp
; /* Pointer to list variable in kernel
275 object where thread is blocked - used
276 for implicit unblock and explicit wake
277 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
278 #ifdef HAVE_CORELOCK_OBJECT
279 struct corelock
*obj_cl
; /* Object corelock where thead is blocked -
280 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
281 struct corelock waiter_cl
; /* Corelock for thread_wait */
282 struct corelock slot_cl
; /* Corelock to lock thread slot */
283 unsigned char core
; /* The core to which thread belongs */
285 struct thread_entry
*queue
; /* List of threads waiting for thread to be
287 #ifdef HAVE_WAKEUP_EXT_CB
288 void (*wakeup_ext_cb
)(struct thread_entry
*thread
); /* Callback that
289 performs special steps needed when being
290 forced off of an object's wait queue that
291 go beyond the standard wait queue removal
292 and priority disinheritance */
293 /* Only enabled when using queue_send for now */
295 #if defined(HAVE_SEMAPHORE_OBJECTS) || \
296 defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
298 volatile intptr_t retval
; /* Return value from a blocked operation/
301 #ifdef HAVE_PRIORITY_SCHEDULING
302 /* Priority summary of owned objects that support inheritance */
303 struct blocker
*blocker
; /* Pointer to blocker when this thread is blocked
304 on an object that supports PIP -
305 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
306 struct priority_distribution pdist
; /* Priority summary of owned objects
307 that have blocked threads and thread's own
309 int skip_count
; /* Number of times skipped if higher priority
310 thread was running */
311 unsigned char base_priority
; /* Base priority (set explicitly during
312 creation or thread_set_priority) */
313 unsigned char priority
; /* Scheduled priority (higher of base or
314 all threads blocked by this one) */
316 uint16_t id
; /* Current slot id */
317 unsigned short stack_size
; /* Size of stack in bytes */
318 unsigned char state
; /* Thread slot state (STATE_*) */
319 #ifdef HAVE_SCHEDULER_BOOSTCTRL
320 unsigned char cpu_boost
; /* CPU frequency boost flag */
322 #ifdef HAVE_IO_PRIORITY
323 unsigned char io_priority
;
327 /*** Macros for internal use ***/
328 /* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
329 #define THREAD_ID_VERSION_SHIFT 8
330 #define THREAD_ID_VERSION_MASK 0xff00
331 #define THREAD_ID_SLOT_MASK 0x00ff
332 #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
334 #ifdef HAVE_CORELOCK_OBJECT
335 /* Operations to be performed just before stopping a thread and starting
336 a new one if specified before calling switch_thread */
339 TBOP_CLEAR
= 0, /* No operation to do */
340 TBOP_UNLOCK_CORELOCK
, /* Unlock a corelock variable */
341 TBOP_SWITCH_CORE
, /* Call the core switch preparation routine */
344 struct thread_blk_ops
346 struct corelock
*cl_p
; /* pointer to corelock */
347 unsigned char flags
; /* TBOP_* flags */
349 #endif /* NUM_CORES > 1 */
351 /* Information kept for each core
352 * Members are arranged for the same reason as in thread_entry
356 /* "Active" lists - core is constantly active on these and are never
357 locked and interrupts do not access them */
358 struct thread_entry
*running
; /* threads that are running (RTR) */
359 struct thread_entry
*timeout
; /* threads that are on a timeout before
361 struct thread_entry
*block_task
; /* Task going off running list */
362 #ifdef HAVE_PRIORITY_SCHEDULING
363 struct priority_distribution rtr
; /* Summary of running and ready-to-run
366 long next_tmo_check
; /* soonest time to check tmo threads */
367 #ifdef HAVE_CORELOCK_OBJECT
368 struct thread_blk_ops blk_ops
; /* operations to perform when
370 struct corelock rtr_cl
; /* Lock for rtr list */
371 #endif /* NUM_CORES */
374 #ifdef HAVE_PRIORITY_SCHEDULING
375 #define IF_PRIO(...) __VA_ARGS__
376 #define IFN_PRIO(...)
379 #define IFN_PRIO(...) __VA_ARGS__
382 void core_idle(void);
383 void core_wake(IF_COP_VOID(unsigned int core
));
385 /* Initialize the scheduler */
386 void init_threads(void) INIT_ATTR
;
388 /* Allocate a thread in the scheduler */
389 #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
390 unsigned int create_thread(void (*function
)(void),
391 void* stack
, size_t stack_size
,
392 unsigned flags
, const char *name
393 IF_PRIO(, int priority
)
394 IF_COP(, unsigned int core
));
396 /* Set and clear the CPU frequency boost flag for the calling thread */
397 #ifdef HAVE_SCHEDULER_BOOSTCTRL
398 void trigger_cpu_boost(void);
399 void cancel_cpu_boost(void);
401 #define trigger_cpu_boost() do { } while(0)
402 #define cancel_cpu_boost() do { } while(0)
404 /* Return thread entry from id */
405 struct thread_entry
*thread_id_entry(unsigned int thread_id
);
406 /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
407 * Has no effect on a thread not frozen. */
408 void thread_thaw(unsigned int thread_id
);
409 /* Wait for a thread to exit */
410 void thread_wait(unsigned int thread_id
);
411 /* Exit the current thread */
412 void thread_exit(void) NORETURN_ATTR
;
413 #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
414 #define ALLOW_REMOVE_THREAD
415 /* Remove a thread from the scheduler */
416 void remove_thread(unsigned int thread_id
);
419 /* Switch to next runnable thread */
420 void switch_thread(void);
421 /* Blocks a thread for at least the specified number of ticks (0 = wait until
423 void sleep_thread(int ticks
);
424 /* Indefinitely blocks the current thread on a thread queue */
425 void block_thread(struct thread_entry
*current
);
426 /* Blocks the current thread on a thread queue until explicitely woken or
427 * the timeout is reached */
428 void block_thread_w_tmo(struct thread_entry
*current
, int timeout
);
430 /* Return bit flags for thread wakeup */
431 #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
432 #define THREAD_OK 0x1 /* A thread was woken up */
433 #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
434 higher priority than current were woken) */
436 /* A convenience function for waking an entire queue of threads. */
437 unsigned int thread_queue_wake(struct thread_entry
**list
);
439 /* Wakeup a thread at the head of a list */
440 unsigned int wakeup_thread(struct thread_entry
**list
);
442 #ifdef HAVE_PRIORITY_SCHEDULING
443 int thread_set_priority(unsigned int thread_id
, int priority
);
444 int thread_get_priority(unsigned int thread_id
);
445 #endif /* HAVE_PRIORITY_SCHEDULING */
446 #ifdef HAVE_IO_PRIORITY
447 void thread_set_io_priority(unsigned int thread_id
, int io_priority
);
448 int thread_get_io_priority(unsigned int thread_id
);
449 #endif /* HAVE_IO_PRIORITY */
451 unsigned int switch_core(unsigned int new_core
);
454 /* Return the id of the calling thread. */
455 unsigned int thread_self(void);
457 /* Return the thread_entry for the calling thread.
458 * INTERNAL: Intended for use by kernel and not for programs. */
459 struct thread_entry
* thread_self_entry(void);
461 /* Debugging info - only! */
462 int thread_stack_usage(const struct thread_entry
*thread
);
464 int idle_stack_usage(unsigned int core
);
466 void thread_get_name(char *buffer
, int size
,
467 struct thread_entry
*thread
);
469 void profile_thread(void);
472 #endif /* THREAD_H */