Refer to "disk" instead of "hard disk" for the dap as flash based daps don't have...
[Rockbox.git] / uisimulator / sdl / kernel.c
blobe01fbe65b9501f5e2b85e0185fe173fffa27e8ee
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Felix Arends
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
20 #include <stdlib.h>
21 #include "memory.h"
22 #include "uisdl.h"
23 #include "kernel.h"
24 #include "thread-sdl.h"
25 #include "thread.h"
26 #include "debug.h"
28 static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
30 int set_irq_level (int level)
32 static int _lv = 0;
33 return (_lv = level);
36 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
37 /* Moves waiting thread's descriptor to the current sender when a
38 message is dequeued */
39 static void queue_fetch_sender(struct queue_sender_list *send,
40 unsigned int i)
42 int old_level = set_irq_level(15<<4);
43 struct queue_sender **spp = &send->senders[i];
45 if(*spp)
47 send->curr_sender = *spp;
48 *spp = NULL;
51 set_irq_level(old_level);
54 /* Puts the specified return value in the waiting thread's return value
55 and wakes the thread - a sender should be confirmed to exist first */
56 static void queue_release_sender(struct queue_sender **sender,
57 intptr_t retval)
59 (*sender)->retval = retval;
60 *sender = NULL;
63 /* Releases any waiting threads that are queued with queue_send -
64 reply with NULL */
65 static void queue_release_all_senders(struct event_queue *q)
67 if(q->send)
69 unsigned int i;
70 for(i = q->read; i != q->write; i++)
72 struct queue_sender **spp =
73 &q->send->senders[i & QUEUE_LENGTH_MASK];
74 if(*spp)
76 queue_release_sender(spp, 0);
82 /* Enables queue_send on the specified queue - caller allocates the extra
83 data structure */
84 void queue_enable_queue_send(struct event_queue *q,
85 struct queue_sender_list *send)
87 q->send = send;
88 memset(send, 0, sizeof(*send));
90 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
92 void queue_init(struct event_queue *q, bool register_queue)
94 (void)register_queue;
96 q->read = 0;
97 q->write = 0;
98 q->thread = NULL;
99 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
100 q->send = NULL; /* No message sending by default */
101 #endif
104 void queue_delete(struct event_queue *q)
106 (void)q;
109 void queue_wait(struct event_queue *q, struct event *ev)
111 unsigned int rd;
113 while(q->read == q->write)
115 switch_thread(true, NULL);
118 rd = q->read++ & QUEUE_LENGTH_MASK;
119 *ev = q->events[rd];
121 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
122 if(q->send && q->send->senders[rd])
124 /* Get data for a waiting thread if one */
125 queue_fetch_sender(q->send, rd);
127 #endif
130 void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
132 unsigned int timeout = current_tick + ticks;
134 while(q->read == q->write && TIME_BEFORE( current_tick, timeout ))
136 sim_sleep(1);
139 if(q->read != q->write)
141 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
142 *ev = q->events[rd];
144 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
145 if(q->send && q->send->senders[rd])
147 /* Get data for a waiting thread if one */
148 queue_fetch_sender(q->send, rd);
150 #endif
152 else
154 ev->id = SYS_TIMEOUT;
158 void queue_post(struct event_queue *q, long id, intptr_t data)
160 int oldlevel = set_irq_level(15<<4);
161 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
163 q->events[wr].id = id;
164 q->events[wr].data = data;
166 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
167 if(q->send)
169 struct queue_sender **spp = &q->send->senders[wr];
171 if(*spp)
173 /* overflow protect - unblock any thread waiting at this index */
174 queue_release_sender(spp, 0);
177 #endif
179 set_irq_level(oldlevel);
182 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
183 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
185 int oldlevel = set_irq_level(15<<4);
186 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
188 q->events[wr].id = id;
189 q->events[wr].data = data;
191 if(q->send)
193 struct queue_sender **spp = &q->send->senders[wr];
194 struct queue_sender sender;
196 if(*spp)
198 /* overflow protect - unblock any thread waiting at this index */
199 queue_release_sender(spp, 0);
202 *spp = &sender;
204 set_irq_level(oldlevel);
205 while (*spp != NULL)
207 switch_thread(true, NULL);
210 return sender.retval;
213 /* Function as queue_post if sending is not enabled */
214 set_irq_level(oldlevel);
215 return 0;
218 #if 0 /* not used now but probably will be later */
219 /* Query if the last message dequeued was added by queue_send or not */
220 bool queue_in_queue_send(struct event_queue *q)
222 return q->send && q->send->curr_sender;
224 #endif
226 /* Replies with retval to any dequeued message sent with queue_send */
227 void queue_reply(struct event_queue *q, intptr_t retval)
229 if(q->send && q->send->curr_sender)
231 queue_release_sender(&q->send->curr_sender, retval);
234 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
236 bool queue_empty(const struct event_queue* q)
238 return ( q->read == q->write );
241 void queue_clear(struct event_queue* q)
243 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
244 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
245 /* Release all thread waiting in the queue for a reply -
246 dequeued sent message will be handled by owning thread */
247 queue_release_all_senders(q);
248 #endif
249 q->read = 0;
250 q->write = 0;
253 void queue_remove_from_head(struct event_queue *q, long id)
255 int oldlevel = set_irq_level(15<<4);
257 while(q->read != q->write)
259 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
261 if(q->events[rd].id != id)
263 break;
266 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
267 if(q->send)
269 struct queue_sender **spp = &q->send->senders[rd];
271 if(*spp)
273 /* Release any thread waiting on this message */
274 queue_release_sender(spp, 0);
277 #endif
278 q->read++;
281 set_irq_level(oldlevel);
284 void switch_thread(bool save_context, struct thread_entry **blocked_list)
286 (void)save_context;
287 (void)blocked_list;
289 yield ();
292 void sim_tick_tasks(void)
294 int i;
296 /* Run through the list of tick tasks */
297 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
299 if(tick_funcs[i])
301 tick_funcs[i]();
306 int tick_add_task(void (*f)(void))
308 int i;
310 /* Add a task if there is room */
311 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
313 if(tick_funcs[i] == NULL)
315 tick_funcs[i] = f;
316 return 0;
319 DEBUGF("Error! tick_add_task(): out of tasks");
320 return -1;
323 int tick_remove_task(void (*f)(void))
325 int i;
327 /* Remove a task if it is there */
328 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
330 if(tick_funcs[i] == f)
332 tick_funcs[i] = NULL;
333 return 0;
337 return -1;
340 /* Very simple mutex simulation - won't work with pre-emptive
341 multitasking, but is better than nothing at all */
342 void mutex_init(struct mutex *m)
344 m->locked = false;
347 void mutex_lock(struct mutex *m)
349 while(m->locked)
350 switch_thread(true, NULL);
351 m->locked = true;
354 void mutex_unlock(struct mutex *m)
356 m->locked = false;
359 void spinlock_lock(struct mutex *m)
361 while(m->locked)
362 switch_thread(true, NULL);
363 m->locked = true;
366 void spinlock_unlock(struct mutex *m)
368 m->locked = false;