add global proxy / cache settings to httpget class. This removes the need of passing...
[Rockbox.git] / uisimulator / sdl / kernel.c
blobd67fb2b9f18e964912c0e6e67dcdd6768007ba85
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Felix Arends
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
20 #include <stdlib.h>
21 #include <SDL.h>
22 #include <SDL_thread.h>
23 #include "memory.h"
24 #include "system-sdl.h"
25 #include "uisdl.h"
26 #include "kernel.h"
27 #include "thread-sdl.h"
28 #include "thread.h"
29 #include "debug.h"
31 /* Condition to signal that "interrupts" may proceed */
32 static SDL_cond *sim_thread_cond;
33 /* Mutex to serialize changing levels and exclude other threads while
34 * inside a handler */
35 static SDL_mutex *sim_irq_mtx;
36 static int interrupt_level = HIGHEST_IRQ_LEVEL;
37 static int handlers_pending = 0;
38 static int status_reg = 0;
40 extern struct core_entry cores[NUM_CORES];
42 /* Nescessary logic:
43 * 1) All threads must pass unblocked
44 * 2) Current handler must always pass unblocked
45 * 3) Threads must be excluded when irq routine is running
46 * 4) No more than one handler routine should execute at a time
48 int set_irq_level(int level)
50 SDL_LockMutex(sim_irq_mtx);
52 int oldlevel = interrupt_level;
54 if (status_reg == 0 && level == 0 && oldlevel != 0)
56 /* Not in a handler and "interrupts" are being reenabled */
57 if (handlers_pending > 0)
58 SDL_CondSignal(sim_thread_cond);
61 interrupt_level = level; /* save new level */
63 SDL_UnlockMutex(sim_irq_mtx);
64 return oldlevel;
67 void sim_enter_irq_handler(void)
69 SDL_LockMutex(sim_irq_mtx);
70 handlers_pending++;
72 if(interrupt_level != 0)
74 /* "Interrupts" are disabled. Wait for reenable */
75 SDL_CondWait(sim_thread_cond, sim_irq_mtx);
78 status_reg = 1;
81 void sim_exit_irq_handler(void)
83 if (--handlers_pending > 0)
84 SDL_CondSignal(sim_thread_cond);
86 status_reg = 0;
87 SDL_UnlockMutex(sim_irq_mtx);
90 bool sim_kernel_init(void)
92 sim_irq_mtx = SDL_CreateMutex();
93 if (sim_irq_mtx == NULL)
95 fprintf(stderr, "Cannot create sim_handler_mtx\n");
96 return false;
99 sim_thread_cond = SDL_CreateCond();
100 if (sim_thread_cond == NULL)
102 fprintf(stderr, "Cannot create sim_thread_cond\n");
103 return false;
106 return true;
109 void sim_kernel_shutdown(void)
111 SDL_DestroyMutex(sim_irq_mtx);
112 SDL_DestroyCond(sim_thread_cond);
115 volatile long current_tick = 0;
116 static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
118 /* This array holds all queues that are initiated. It is used for broadcast. */
119 static struct event_queue *all_queues[MAX_NUM_QUEUES];
120 static int num_queues = 0;
122 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
123 /* Moves waiting thread's descriptor to the current sender when a
124 message is dequeued */
125 static void queue_fetch_sender(struct queue_sender_list *send,
126 unsigned int i)
128 struct thread_entry **spp = &send->senders[i];
130 if(*spp)
132 send->curr_sender = *spp;
133 *spp = NULL;
137 /* Puts the specified return value in the waiting thread's return value
138 and wakes the thread - a sender should be confirmed to exist first */
139 static void queue_release_sender(struct thread_entry **sender,
140 intptr_t retval)
142 (*sender)->retval = retval;
143 wakeup_thread_no_listlock(sender);
144 if(*sender != NULL)
146 fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
147 exit(-1);
151 /* Releases any waiting threads that are queued with queue_send -
152 reply with NULL */
153 static void queue_release_all_senders(struct event_queue *q)
155 if(q->send)
157 unsigned int i;
158 for(i = q->read; i != q->write; i++)
160 struct thread_entry **spp =
161 &q->send->senders[i & QUEUE_LENGTH_MASK];
162 if(*spp)
164 queue_release_sender(spp, 0);
170 /* Enables queue_send on the specified queue - caller allocates the extra
171 data structure */
172 void queue_enable_queue_send(struct event_queue *q,
173 struct queue_sender_list *send)
175 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
176 q->send = NULL;
177 if(send)
179 q->send = send;
180 memset(send, 0, sizeof(*send));
182 set_irq_level(oldlevel);
184 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
186 void queue_init(struct event_queue *q, bool register_queue)
188 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
190 q->read = 0;
191 q->write = 0;
192 thread_queue_init(&q->queue);
193 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
194 q->send = NULL; /* No message sending by default */
195 #endif
197 if(register_queue)
199 if(num_queues >= MAX_NUM_QUEUES)
201 fprintf(stderr, "queue_init->out of queues");
202 exit(-1);
204 /* Add it to the all_queues array */
205 all_queues[num_queues++] = q;
208 set_irq_level(oldlevel);
211 void queue_delete(struct event_queue *q)
213 int i;
214 bool found = false;
216 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
218 /* Find the queue to be deleted */
219 for(i = 0;i < num_queues;i++)
221 if(all_queues[i] == q)
223 found = true;
224 break;
228 if(found)
230 /* Move the following queues up in the list */
231 for(;i < num_queues-1;i++)
233 all_queues[i] = all_queues[i+1];
236 num_queues--;
239 /* Release threads waiting on queue head */
240 thread_queue_wake(&q->queue);
242 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
243 /* Release waiting threads and reply to any dequeued message
244 waiting for one. */
245 queue_release_all_senders(q);
246 queue_reply(q, 0);
247 #endif
249 q->read = 0;
250 q->write = 0;
252 set_irq_level(oldlevel);
255 void queue_wait(struct event_queue *q, struct queue_event *ev)
257 unsigned int rd;
258 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
260 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 if (q->send && q->send->curr_sender)
263 /* auto-reply */
264 queue_release_sender(&q->send->curr_sender, 0);
266 #endif
268 if (q->read == q->write)
272 block_thread(&q->queue);
273 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
275 while (q->read == q->write);
278 rd = q->read++ & QUEUE_LENGTH_MASK;
279 *ev = q->events[rd];
281 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
282 if(q->send && q->send->senders[rd])
284 /* Get data for a waiting thread if one */
285 queue_fetch_sender(q->send, rd);
287 #endif
289 set_irq_level(oldlevel);
292 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
294 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
296 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
297 if (q->send && q->send->curr_sender)
299 /* auto-reply */
300 queue_release_sender(&q->send->curr_sender, 0);
302 #endif
304 if (q->read == q->write && ticks > 0)
306 block_thread_w_tmo(&q->queue, ticks);
307 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
310 if(q->read != q->write)
312 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
313 *ev = q->events[rd];
315 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
316 if(q->send && q->send->senders[rd])
318 /* Get data for a waiting thread if one */
319 queue_fetch_sender(q->send, rd);
321 #endif
323 else
325 ev->id = SYS_TIMEOUT;
328 set_irq_level(oldlevel);
331 void queue_post(struct event_queue *q, long id, intptr_t data)
333 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
335 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
337 q->events[wr].id = id;
338 q->events[wr].data = data;
340 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
341 if(q->send)
343 struct thread_entry **spp = &q->send->senders[wr];
345 if(*spp)
347 /* overflow protect - unblock any thread waiting at this index */
348 queue_release_sender(spp, 0);
351 #endif
353 wakeup_thread(&q->queue);
355 set_irq_level(oldlevel);
358 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
359 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
361 int oldlevel = set_irq_level(oldlevel);
363 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
365 q->events[wr].id = id;
366 q->events[wr].data = data;
368 if(q->send)
370 struct thread_entry **spp = &q->send->senders[wr];
372 if(*spp)
374 /* overflow protect - unblock any thread waiting at this index */
375 queue_release_sender(spp, 0);
378 wakeup_thread(&q->queue);
380 block_thread_no_listlock(spp);
381 return thread_get_current()->retval;
384 /* Function as queue_post if sending is not enabled */
385 wakeup_thread(&q->queue);
386 set_irq_level(oldlevel);
387 return 0;
390 #if 0 /* not used now but probably will be later */
391 /* Query if the last message dequeued was added by queue_send or not */
392 bool queue_in_queue_send(struct event_queue *q)
394 return q->send && q->send->curr_sender;
396 #endif
398 /* Replies with retval to any dequeued message sent with queue_send */
399 void queue_reply(struct event_queue *q, intptr_t retval)
401 if(q->send && q->send->curr_sender)
403 queue_release_sender(&q->send->curr_sender, retval);
406 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
408 bool queue_empty(const struct event_queue* q)
410 return ( q->read == q->write );
413 bool queue_peek(struct event_queue *q, struct queue_event *ev)
415 if (q->read == q->write)
416 return false;
418 bool have_msg = false;
420 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
422 if (q->read != q->write)
424 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
425 have_msg = true;
428 set_irq_level(oldlevel);
430 return have_msg;
433 void queue_clear(struct event_queue* q)
435 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
437 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
438 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
439 /* Release all thread waiting in the queue for a reply -
440 dequeued sent message will be handled by owning thread */
441 queue_release_all_senders(q);
442 #endif
443 q->read = 0;
444 q->write = 0;
446 set_irq_level(oldlevel);
449 void queue_remove_from_head(struct event_queue *q, long id)
451 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
453 while(q->read != q->write)
455 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
457 if(q->events[rd].id != id)
459 break;
462 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
463 if(q->send)
465 struct thread_entry **spp = &q->send->senders[rd];
467 if(*spp)
469 /* Release any thread waiting on this message */
470 queue_release_sender(spp, 0);
473 #endif
474 q->read++;
477 set_irq_level(oldlevel);
480 int queue_count(const struct event_queue *q)
482 return q->write - q->read;
485 int queue_broadcast(long id, intptr_t data)
487 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
488 int i;
490 for(i = 0;i < num_queues;i++)
492 queue_post(all_queues[i], id, data);
495 set_irq_level(oldlevel);
496 return num_queues;
499 void yield(void)
501 switch_thread(NULL);
504 void sleep(int ticks)
506 sleep_thread(ticks);
509 void sim_tick_tasks(void)
511 int i;
513 /* Run through the list of tick tasks */
514 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
516 if(tick_funcs[i])
518 tick_funcs[i]();
523 int tick_add_task(void (*f)(void))
525 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
526 int i;
528 /* Add a task if there is room */
529 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
531 if(tick_funcs[i] == NULL)
533 tick_funcs[i] = f;
534 set_irq_level(oldlevel);
535 return 0;
538 fprintf(stderr, "Error! tick_add_task(): out of tasks");
539 exit(-1);
540 return -1;
543 int tick_remove_task(void (*f)(void))
545 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
546 int i;
548 /* Remove a task if it is there */
549 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
551 if(tick_funcs[i] == f)
553 tick_funcs[i] = NULL;
554 set_irq_level(oldlevel);
555 return 0;
559 set_irq_level(oldlevel);
560 return -1;
563 /* Very simple mutex simulation - won't work with pre-emptive
564 multitasking, but is better than nothing at all */
565 void mutex_init(struct mutex *m)
567 m->queue = NULL;
568 m->thread = NULL;
569 m->count = 0;
570 m->locked = 0;
573 void mutex_lock(struct mutex *m)
575 struct thread_entry *const thread = thread_get_current();
577 if(thread == m->thread)
579 m->count++;
580 return;
583 if (!test_and_set(&m->locked, 1))
585 m->thread = thread;
586 return;
589 block_thread_no_listlock(&m->queue);
592 void mutex_unlock(struct mutex *m)
594 /* unlocker not being the owner is an unlocking violation */
595 if(m->thread != thread_get_current())
597 fprintf(stderr, "mutex_unlock->wrong thread");
598 exit(-1);
601 if (m->count > 0)
603 /* this thread still owns lock */
604 m->count--;
605 return;
608 m->thread = wakeup_thread_no_listlock(&m->queue);
610 if (m->thread == NULL)
612 /* release lock */
613 m->locked = 0;
617 #ifdef HAVE_SEMAPHORE_OBJECTS
618 void semaphore_init(struct semaphore *s, int max, int start)
620 if(max <= 0 || start < 0 || start > max)
622 fprintf(stderr, "semaphore_init->inv arg");
623 exit(-1);
625 s->queue = NULL;
626 s->max = max;
627 s->count = start;
630 void semaphore_wait(struct semaphore *s)
632 if(--s->count >= 0)
633 return;
634 block_thread_no_listlock(&s->queue);
637 void semaphore_release(struct semaphore *s)
639 if(s->count < s->max)
641 if(++s->count <= 0)
643 if(s->queue == NULL)
645 /* there should be threads in this queue */
646 fprintf(stderr, "semaphore->wakeup");
647 exit(-1);
649 /* a thread was queued - wake it up */
650 wakeup_thread_no_listlock(&s->queue);
654 #endif /* HAVE_SEMAPHORE_OBJECTS */
656 #ifdef HAVE_EVENT_OBJECTS
657 void event_init(struct event *e, unsigned int flags)
659 e->queues[STATE_NONSIGNALED] = NULL;
660 e->queues[STATE_SIGNALED] = NULL;
661 e->state = flags & STATE_SIGNALED;
662 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
665 void event_wait(struct event *e, unsigned int for_state)
667 unsigned int last_state = e->state;
669 if(e->automatic != 0)
671 /* wait for false always satisfied by definition
672 or if it just changed to false */
673 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
675 /* automatic - unsignal */
676 e->state = STATE_NONSIGNALED;
677 return;
679 /* block until state matches */
681 else if(for_state == last_state)
683 /* the state being waited for is the current state */
684 return;
687 /* current state does not match wait-for state */
688 block_thread_no_listlock(&e->queues[for_state]);
691 void event_set_state(struct event *e, unsigned int state)
693 unsigned int last_state = e->state;
695 if(last_state == state)
697 /* no change */
698 return;
701 if(state == STATE_SIGNALED)
703 if(e->automatic != 0)
705 struct thread_entry *thread;
707 if(e->queues[STATE_NONSIGNALED] != NULL)
709 /* no thread should have ever blocked for nonsignaled */
710 fprintf(stderr, "set_event_state->queue[NS]:S");
711 exit(-1);
714 /* pass to next thread and keep unsignaled - "pulse" */
715 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
716 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
718 else
720 /* release all threads waiting for signaled */
721 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
722 e->state = STATE_SIGNALED;
725 else
727 /* release all threads waiting for unsignaled */
728 if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
730 /* no thread should have ever blocked */
731 fprintf(stderr, "set_event_state->queue[NS]:NS");
732 exit(-1);
735 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
736 e->state = STATE_NONSIGNALED;
739 #endif /* HAVE_EVENT_OBJECTS */