Remove the full url path from links to the wiki and display the wiki name only instea...
[Rockbox.git] / uisimulator / sdl / kernel.c
blobff6c94933bd2d3bd99dd2b1378544f2ab9acc93c
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Felix Arends
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
20 #include <stdlib.h>
21 #include "memory.h"
22 #include "uisdl.h"
23 #include "kernel.h"
24 #include "thread-sdl.h"
25 #include "thread.h"
26 #include "debug.h"
28 volatile long current_tick = 0;
29 static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
31 /* This array holds all queues that are initiated. It is used for broadcast. */
32 static struct event_queue *all_queues[32];
33 static int num_queues = 0;
35 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
36 /* Moves waiting thread's descriptor to the current sender when a
37 message is dequeued */
38 static void queue_fetch_sender(struct queue_sender_list *send,
39 unsigned int i)
41 struct thread_entry **spp = &send->senders[i];
43 if(*spp)
45 send->curr_sender = *spp;
46 *spp = NULL;
50 /* Puts the specified return value in the waiting thread's return value
51 and wakes the thread - a sender should be confirmed to exist first */
52 static void queue_release_sender(struct thread_entry **sender,
53 intptr_t retval)
55 (*sender)->retval = retval;
56 wakeup_thread(sender);
57 if(*sender != NULL)
59 fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
60 exit(-1);
64 /* Releases any waiting threads that are queued with queue_send -
65 reply with NULL */
66 static void queue_release_all_senders(struct event_queue *q)
68 if(q->send)
70 unsigned int i;
71 for(i = q->read; i != q->write; i++)
73 struct thread_entry **spp =
74 &q->send->senders[i & QUEUE_LENGTH_MASK];
75 if(*spp)
77 queue_release_sender(spp, 0);
83 /* Enables queue_send on the specified queue - caller allocates the extra
84 data structure */
85 void queue_enable_queue_send(struct event_queue *q,
86 struct queue_sender_list *send)
88 q->send = NULL;
89 if(send)
91 q->send = send;
92 memset(send, 0, sizeof(*send));
95 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
97 void queue_init(struct event_queue *q, bool register_queue)
99 q->read = 0;
100 q->write = 0;
101 q->thread = NULL;
102 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
103 q->send = NULL; /* No message sending by default */
104 #endif
106 if(register_queue)
108 if(num_queues >= 32)
110 fprintf(stderr, "queue_init->out of queues");
111 exit(-1);
113 /* Add it to the all_queues array */
114 all_queues[num_queues++] = q;
118 void queue_delete(struct event_queue *q)
120 int i;
121 bool found = false;
123 /* Find the queue to be deleted */
124 for(i = 0;i < num_queues;i++)
126 if(all_queues[i] == q)
128 found = true;
129 break;
133 if(found)
135 /* Move the following queues up in the list */
136 for(;i < num_queues-1;i++)
138 all_queues[i] = all_queues[i+1];
141 num_queues--;
144 /* Release threads waiting on queue head */
145 wakeup_thread(&q->thread);
147 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
148 /* Release waiting threads and reply to any dequeued message
149 waiting for one. */
150 queue_release_all_senders(q);
151 queue_reply(q, 0);
152 #endif
154 q->read = 0;
155 q->write = 0;
158 void queue_wait(struct event_queue *q, struct event *ev)
160 unsigned int rd;
162 if (q->read == q->write)
164 block_thread(&q->thread);
167 rd = q->read++ & QUEUE_LENGTH_MASK;
168 *ev = q->events[rd];
170 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
171 if(q->send && q->send->senders[rd])
173 /* Get data for a waiting thread if one */
174 queue_fetch_sender(q->send, rd);
176 #endif
179 void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
181 if (q->read == q->write && ticks > 0)
183 block_thread_w_tmo(&q->thread, ticks);
186 if(q->read != q->write)
188 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
189 *ev = q->events[rd];
191 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
192 if(q->send && q->send->senders[rd])
194 /* Get data for a waiting thread if one */
195 queue_fetch_sender(q->send, rd);
197 #endif
199 else
201 ev->id = SYS_TIMEOUT;
205 void queue_post(struct event_queue *q, long id, intptr_t data)
207 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
209 q->events[wr].id = id;
210 q->events[wr].data = data;
212 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
213 if(q->send)
215 struct thread_entry **spp = &q->send->senders[wr];
217 if(*spp)
219 /* overflow protect - unblock any thread waiting at this index */
220 queue_release_sender(spp, 0);
223 #endif
225 wakeup_thread(&q->thread);
228 /* Special thread-synced queue_post for button driver or any other preemptive sim thread */
229 void queue_syncpost(struct event_queue *q, long id, intptr_t data)
231 thread_sdl_lock();
232 /* No rockbox threads can be running here */
233 queue_post(q, id, data);
234 thread_sdl_unlock();
237 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
238 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
240 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
242 q->events[wr].id = id;
243 q->events[wr].data = data;
245 if(q->send)
247 struct thread_entry **spp = &q->send->senders[wr];
249 if(*spp)
251 /* overflow protect - unblock any thread waiting at this index */
252 queue_release_sender(spp, 0);
255 wakeup_thread(&q->thread);
257 block_thread(spp);
258 return thread_get_current()->retval;
261 /* Function as queue_post if sending is not enabled */
262 return 0;
265 #if 0 /* not used now but probably will be later */
266 /* Query if the last message dequeued was added by queue_send or not */
267 bool queue_in_queue_send(struct event_queue *q)
269 return q->send && q->send->curr_sender;
271 #endif
273 /* Replies with retval to any dequeued message sent with queue_send */
274 void queue_reply(struct event_queue *q, intptr_t retval)
276 if(q->send && q->send->curr_sender)
278 queue_release_sender(&q->send->curr_sender, retval);
281 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
283 bool queue_empty(const struct event_queue* q)
285 return ( q->read == q->write );
288 void queue_clear(struct event_queue* q)
290 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
291 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
292 /* Release all thread waiting in the queue for a reply -
293 dequeued sent message will be handled by owning thread */
294 queue_release_all_senders(q);
295 #endif
296 q->read = 0;
297 q->write = 0;
300 void queue_remove_from_head(struct event_queue *q, long id)
302 while(q->read != q->write)
304 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
306 if(q->events[rd].id != id)
308 break;
311 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
312 if(q->send)
314 struct thread_entry **spp = &q->send->senders[rd];
316 if(*spp)
318 /* Release any thread waiting on this message */
319 queue_release_sender(spp, 0);
322 #endif
323 q->read++;
327 int queue_count(const struct event_queue *q)
329 return q->write - q->read;
332 int queue_broadcast(long id, intptr_t data)
334 int i;
336 for(i = 0;i < num_queues;i++)
338 queue_post(all_queues[i], id, data);
341 return num_queues;
344 /* Special thread-synced queue_broadcast for button driver or any other preemptive sim thread */
345 int queue_syncbroadcast(long id, intptr_t data)
347 int i;
348 thread_sdl_lock();
349 /* No rockbox threads can be running here */
350 i = queue_broadcast(id, data);
351 thread_sdl_unlock();
352 return i;
355 void yield(void)
357 switch_thread(true, NULL);
360 void sleep(int ticks)
362 sleep_thread(ticks);
365 void sim_tick_tasks(void)
367 int i;
369 /* Run through the list of tick tasks */
370 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
372 if(tick_funcs[i])
374 tick_funcs[i]();
379 int tick_add_task(void (*f)(void))
381 int i;
383 /* Add a task if there is room */
384 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
386 if(tick_funcs[i] == NULL)
388 tick_funcs[i] = f;
389 return 0;
392 fprintf(stderr, "Error! tick_add_task(): out of tasks");
393 exit(-1);
394 return -1;
397 int tick_remove_task(void (*f)(void))
399 int i;
401 /* Remove a task if it is there */
402 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
404 if(tick_funcs[i] == f)
406 tick_funcs[i] = NULL;
407 return 0;
411 return -1;
414 /* Very simple mutex simulation - won't work with pre-emptive
415 multitasking, but is better than nothing at all */
416 void mutex_init(struct mutex *m)
418 m->thread = NULL;
419 m->locked = 0;
422 void mutex_lock(struct mutex *m)
424 if (test_and_set(&m->locked, 1))
426 block_thread(&m->thread);
430 void mutex_unlock(struct mutex *m)
432 if (m->thread != NULL)
434 wakeup_thread(&m->thread);
436 else
438 m->locked = 0;
442 void spinlock_lock(struct mutex *l)
444 while(test_and_set(&l->locked, 1))
446 switch_thread(true, NULL);
450 void spinlock_unlock(struct mutex *l)
452 l->locked = 0;