1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
28 /* wrap-safe macros for tick comparison */
29 #define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
30 #define TIME_BEFORE(a,b) TIME_AFTER(b,a)
32 #define HZ 100 /* number of ticks per second */
34 #define MAX_NUM_TICK_TASKS 8
36 #define MAX_NUM_QUEUES 32
37 #define QUEUE_LENGTH 16 /* MUST be a power of 2 */
38 #define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
40 /* System defined message ID's - |sign bit = 1|class|id| */
41 /* Event class list */
42 #define SYS_EVENT_CLS_QUEUE 0
43 #define SYS_EVENT_CLS_USB 1
44 #define SYS_EVENT_CLS_POWER 2
45 #define SYS_EVENT_CLS_FILESYS 3
46 #define SYS_EVENT_CLS_PLUG 4
47 #define SYS_EVENT_CLS_MISC 5
48 /* make sure SYS_EVENT_CLS_BITS has enough range */
50 /* Bit 31->|S|c...c|i...i| */
51 #define SYS_EVENT ((long)(int)(1 << 31))
52 #define SYS_EVENT_CLS_BITS (3)
53 #define SYS_EVENT_CLS_SHIFT (31-SYS_EVENT_CLS_BITS)
54 #define SYS_EVENT_CLS_MASK (((1l << SYS_EVENT_CLS_BITS)-1) << SYS_EVENT_SHIFT)
55 #define MAKE_SYS_EVENT(cls, id) (SYS_EVENT | ((long)(cls) << SYS_EVENT_CLS_SHIFT) | (long)(id))
56 /* Macros for extracting codes */
57 #define SYS_EVENT_CLS(e) (((e) & SYS_EVENT_CLS_MASK) >> SYS_EVENT_SHIFT)
58 #define SYS_EVENT_ID(e) ((e) & ~(SYS_EVENT|SYS_EVENT_CLS_MASK))
60 #define SYS_TIMEOUT MAKE_SYS_EVENT(SYS_EVENT_CLS_QUEUE, 0)
61 #define SYS_USB_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 0)
62 #define SYS_USB_CONNECTED_ACK MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 1)
63 #define SYS_USB_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 2)
64 #define SYS_USB_DISCONNECTED_ACK MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 3)
65 #define SYS_POWEROFF MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 0)
66 #define SYS_CHARGER_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 1)
67 #define SYS_CHARGER_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 2)
68 #define SYS_BATTERY_UPDATE MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 3)
69 #define SYS_FS_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_FILESYS, 0)
70 #define SYS_HOTSWAP_INSERTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 0)
71 #define SYS_HOTSWAP_EXTRACTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 1)
72 #define SYS_PHONE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 2)
73 #define SYS_PHONE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 3)
74 #define SYS_REMOTE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 4)
75 #define SYS_REMOTE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 5)
76 #define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
77 #define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
79 #define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
82 #define TIMEOUT_BLOCK -1
83 #define TIMEOUT_NOBLOCK 0
92 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
93 struct queue_sender_list
95 /* If non-NULL, there is a thread waiting for the corresponding event */
96 /* Must be statically allocated to put in non-cached ram. */
97 struct thread_entry
*senders
[QUEUE_LENGTH
]; /* message->thread map */
98 struct thread_entry
*list
; /* list of senders in map */
99 /* Send info for last message dequeued or NULL if replied or not sent */
100 struct thread_entry
*curr_sender
;
101 #ifdef HAVE_PRIORITY_SCHEDULING
102 struct blocker blocker
;
105 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
107 #ifdef HAVE_PRIORITY_SCHEDULING
108 #define QUEUE_GET_THREAD(q) \
109 (((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
111 /* Queue without priority enabled have no owner provision _at this time_ */
112 #define QUEUE_GET_THREAD(q) \
118 struct thread_entry
*queue
; /* waiter list */
119 struct queue_event events
[QUEUE_LENGTH
]; /* list of events */
120 unsigned int read
; /* head of queue */
121 unsigned int write
; /* tail of queue */
122 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
123 struct queue_sender_list
*send
; /* list of threads waiting for
125 #ifdef HAVE_PRIORITY_SCHEDULING
126 struct blocker
*blocker_p
; /* priority inheritance info
127 for sync message senders */
130 IF_COP( struct corelock cl
; ) /* multiprocessor sync */
133 #ifdef HAVE_PRIORITY_SCHEDULING
134 #define MUTEX_SET_THREAD(m, t) ((m)->blocker.thread = (t))
135 #define MUTEX_GET_THREAD(m) ((m)->blocker.thread)
137 #define MUTEX_SET_THREAD(m, t) ((m)->thread = (t))
138 #define MUTEX_GET_THREAD(m) ((m)->thread)
143 struct thread_entry
*queue
; /* waiter list */
144 int count
; /* lock owner recursion count */
145 #ifdef HAVE_PRIORITY_SCHEDULING
146 struct blocker blocker
; /* priority inheritance info
148 bool no_preempt
; /* don't allow higher-priority thread
149 to be scheduled even if woken */
151 struct thread_entry
*thread
;
153 IF_COP( struct corelock cl
; ) /* multiprocessor sync */
154 unsigned char locked
; /* locked semaphore */
160 struct thread_entry
*thread
; /* lock owner */
161 int count
; /* lock owner recursion count */
162 struct corelock cl
; /* multiprocessor sync */
166 #ifdef HAVE_SEMAPHORE_OBJECTS
169 struct thread_entry
*queue
; /* Waiter list */
170 int count
; /* # of waits remaining before unsignaled */
171 int max
; /* maximum # of waits to remain signaled */
172 IF_COP( struct corelock cl
; ) /* multiprocessor sync */
176 #ifdef HAVE_EVENT_OBJECTS
179 struct thread_entry
*queues
[2]; /* waiters for each state */
180 unsigned char automatic
; /* event performs auto-reset */
181 unsigned char state
; /* state: 1 = signaled */
182 IF_COP( struct corelock cl
; ) /* multiprocessor sync */
187 #ifdef HAVE_WAKEUP_OBJECTS
190 struct thread_entry
*queue
; /* waiter list */
191 unsigned char signalled
; /* signalled status */
192 IF_COP( struct corelock cl
; ) /* multiprocessor sync */
197 /* global tick variable */
198 #if defined(CPU_PP) && defined(BOOTLOADER)
199 /* We don't enable interrupts in the iPod bootloader, so we need to fake
200 the current_tick variable */
201 #define current_tick (signed)(USEC_TIMER/10000)
203 extern volatile long current_tick
;
207 #define sleep(x) sim_sleep(x)
210 /* kernel functions */
211 extern void kernel_init(void);
212 extern void yield(void);
213 extern void sleep(int ticks
);
214 int tick_add_task(void (*f
)(void));
215 int tick_remove_task(void (*f
)(void));
216 extern void tick_start(unsigned int interval_in_ms
);
220 /* timeout callback type
221 * tmo - pointer to struct timeout associated with event
223 typedef bool (* timeout_cb_type
)(struct timeout
*tmo
);
227 /* for use by callback/internal - read/write */
228 timeout_cb_type callback
;/* callback - returning false cancels */
229 int ticks
; /* timeout period in ticks */
230 intptr_t data
; /* data passed to callback */
231 /* internal use - read-only */
232 const struct timeout
* const next
; /* next timeout in list */
233 const long expires
; /* expiration tick */
236 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
237 int ticks
, intptr_t data
);
238 void timeout_cancel(struct timeout
*tmo
);
240 #define STATE_NONSIGNALED 0
241 #define STATE_SIGNALED 1
243 #define OBJ_WAIT_TIMEDOUT (-1)
244 #define OBJ_WAIT_FAILED 0
245 #define OBJ_WAIT_SUCCEEDED 1
247 extern void queue_init(struct event_queue
*q
, bool register_queue
);
248 extern void queue_delete(struct event_queue
*q
);
249 extern void queue_wait(struct event_queue
*q
, struct queue_event
*ev
);
250 extern void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
,
252 extern void queue_post(struct event_queue
*q
, long id
, intptr_t data
);
253 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
254 extern void queue_enable_queue_send(struct event_queue
*q
,
255 struct queue_sender_list
*send
,
256 struct thread_entry
*owner
);
257 extern intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
);
258 extern void queue_reply(struct event_queue
*q
, intptr_t retval
);
259 extern bool queue_in_queue_send(struct event_queue
*q
);
260 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
261 extern bool queue_empty(const struct event_queue
* q
);
262 extern bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
);
263 extern void queue_clear(struct event_queue
* q
);
264 extern void queue_remove_from_head(struct event_queue
*q
, long id
);
265 extern int queue_count(const struct event_queue
*q
);
266 extern int queue_broadcast(long id
, intptr_t data
);
268 extern void mutex_init(struct mutex
*m
);
269 extern void mutex_lock(struct mutex
*m
);
270 extern void mutex_unlock(struct mutex
*m
);
271 #ifdef HAVE_PRIORITY_SCHEDULING
272 /* Temporary function to disable mutex preempting a thread on unlock */
273 static inline void mutex_set_preempt(struct mutex
*m
, bool preempt
)
274 { m
->no_preempt
= !preempt
; }
277 extern void spinlock_init(struct spinlock
*l
);
278 extern void spinlock_lock(struct spinlock
*l
);
279 extern void spinlock_unlock(struct spinlock
*l
);
281 #ifdef HAVE_SEMAPHORE_OBJECTS
282 extern void semaphore_init(struct semaphore
*s
, int max
, int start
);
283 extern void semaphore_wait(struct semaphore
*s
);
284 extern void semaphore_release(struct semaphore
*s
);
285 #endif /* HAVE_SEMAPHORE_OBJECTS */
286 #ifdef HAVE_EVENT_OBJECTS
287 #define EVENT_AUTOMATIC 0x10
288 #define EVENT_MANUAL 0x00
289 extern void event_init(struct event
*e
, unsigned int flags
);
290 extern void event_wait(struct event
*e
, unsigned int for_state
);
291 extern void event_set_state(struct event
*e
, unsigned int state
);
292 #endif /* HAVE_EVENT_OBJECTS */
294 #ifdef HAVE_WAKEUP_OBJECTS
295 extern void wakeup_init(struct wakeup
*w
);
296 extern int wakeup_wait(struct wakeup
*w
, int timeout
);
297 extern int wakeup_signal(struct wakeup
*w
);
298 #endif /* HAVE_WAKEUP_OBJECTS */
300 #endif /* _KERNEL_H_ */