Implement as genuine a set_irq_level function for the sim as possible. The yield...
[Rockbox.git] / uisimulator / sdl / kernel.c
blob4e0a508f74df4fd9960c90315cddc91f64cc2273
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Felix Arends
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
20 #include <stdlib.h>
21 #include <SDL.h>
22 #include <SDL_thread.h>
23 #include "memory.h"
24 #include "system-sdl.h"
25 #include "uisdl.h"
26 #include "kernel.h"
27 #include "thread-sdl.h"
28 #include "thread.h"
29 #include "debug.h"
31 /* Prevent "irq handler" from thread concurrent access as well as current
32 * access on multiple handlers */
33 static SDL_cond *sim_thread_cond;
34 /* Protect sim irq object when it is being changed */
35 static SDL_mutex *sim_irq_mtx;
36 static int interrupt_level = HIGHEST_IRQ_LEVEL;
37 static int status_reg = 0;
39 extern struct core_entry cores[NUM_CORES];
41 /* Nescessary logic:
42 * 1) All threads must pass unblocked
43 * 2) Current handler must always pass unblocked
44 * 3) Threads must be excluded when irq routine is running
45 * 4) No more than one handler routine should execute at a time
47 int set_irq_level(int level)
49 SDL_LockMutex(sim_irq_mtx);
51 int oldlevel = interrupt_level;
53 if (status_reg == 0 && level == 0 && oldlevel != 0)
55 /* Not in a handler and "interrupts" are being reenabled */
56 SDL_CondSignal(sim_thread_cond);
59 interrupt_level = level; /* save new level */
61 SDL_UnlockMutex(sim_irq_mtx);
62 return oldlevel;
65 void sim_enter_irq_handler(void)
67 SDL_LockMutex(sim_irq_mtx);
68 if(interrupt_level != 0)
70 /* "Interrupts" are disabled. Wait for reenable */
71 SDL_CondWait(sim_thread_cond, sim_irq_mtx);
73 status_reg = 1;
76 void sim_exit_irq_handler(void)
78 status_reg = 0;
79 SDL_UnlockMutex(sim_irq_mtx);
82 bool sim_kernel_init(void)
84 sim_irq_mtx = SDL_CreateMutex();
85 if (sim_irq_mtx == NULL)
87 fprintf(stderr, "Cannot create sim_handler_mtx\n");
88 return false;
91 /* Create with a count of 0 to have interrupts disabled by default */
92 sim_thread_cond = SDL_CreateCond();
93 if (sim_thread_cond == NULL)
95 fprintf(stderr, "Cannot create sim_thread_cond\n");
96 return false;
99 return true;
102 void sim_kernel_shutdown(void)
104 SDL_DestroyMutex(sim_irq_mtx);
105 SDL_DestroyCond(sim_thread_cond);
108 volatile long current_tick = 0;
109 static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
111 /* This array holds all queues that are initiated. It is used for broadcast. */
112 static struct event_queue *all_queues[MAX_NUM_QUEUES];
113 static int num_queues = 0;
115 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
116 /* Moves waiting thread's descriptor to the current sender when a
117 message is dequeued */
118 static void queue_fetch_sender(struct queue_sender_list *send,
119 unsigned int i)
121 struct thread_entry **spp = &send->senders[i];
123 if(*spp)
125 send->curr_sender = *spp;
126 *spp = NULL;
130 /* Puts the specified return value in the waiting thread's return value
131 and wakes the thread - a sender should be confirmed to exist first */
132 static void queue_release_sender(struct thread_entry **sender,
133 intptr_t retval)
135 (*sender)->retval = retval;
136 wakeup_thread_no_listlock(sender);
137 if(*sender != NULL)
139 fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
140 exit(-1);
144 /* Releases any waiting threads that are queued with queue_send -
145 reply with NULL */
146 static void queue_release_all_senders(struct event_queue *q)
148 if(q->send)
150 unsigned int i;
151 for(i = q->read; i != q->write; i++)
153 struct thread_entry **spp =
154 &q->send->senders[i & QUEUE_LENGTH_MASK];
155 if(*spp)
157 queue_release_sender(spp, 0);
163 /* Enables queue_send on the specified queue - caller allocates the extra
164 data structure */
165 void queue_enable_queue_send(struct event_queue *q,
166 struct queue_sender_list *send)
168 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
169 q->send = NULL;
170 if(send)
172 q->send = send;
173 memset(send, 0, sizeof(*send));
175 set_irq_level(oldlevel);
177 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
179 void queue_init(struct event_queue *q, bool register_queue)
181 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
183 q->read = 0;
184 q->write = 0;
185 thread_queue_init(&q->queue);
186 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
187 q->send = NULL; /* No message sending by default */
188 #endif
190 if(register_queue)
192 if(num_queues >= MAX_NUM_QUEUES)
194 fprintf(stderr, "queue_init->out of queues");
195 exit(-1);
197 /* Add it to the all_queues array */
198 all_queues[num_queues++] = q;
201 set_irq_level(oldlevel);
204 void queue_delete(struct event_queue *q)
206 int i;
207 bool found = false;
209 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
211 /* Find the queue to be deleted */
212 for(i = 0;i < num_queues;i++)
214 if(all_queues[i] == q)
216 found = true;
217 break;
221 if(found)
223 /* Move the following queues up in the list */
224 for(;i < num_queues-1;i++)
226 all_queues[i] = all_queues[i+1];
229 num_queues--;
232 /* Release threads waiting on queue head */
233 thread_queue_wake(&q->queue);
235 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
236 /* Release waiting threads and reply to any dequeued message
237 waiting for one. */
238 queue_release_all_senders(q);
239 queue_reply(q, 0);
240 #endif
242 q->read = 0;
243 q->write = 0;
245 set_irq_level(oldlevel);
248 void queue_wait(struct event_queue *q, struct queue_event *ev)
250 unsigned int rd;
251 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
253 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
254 if (q->send && q->send->curr_sender)
256 /* auto-reply */
257 queue_release_sender(&q->send->curr_sender, 0);
259 #endif
261 if (q->read == q->write)
265 cores[CURRENT_CORE].irq_level = oldlevel;
266 block_thread(&q->queue);
267 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
269 while (q->read == q->write);
272 rd = q->read++ & QUEUE_LENGTH_MASK;
273 *ev = q->events[rd];
275 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
276 if(q->send && q->send->senders[rd])
278 /* Get data for a waiting thread if one */
279 queue_fetch_sender(q->send, rd);
281 #endif
283 set_irq_level(oldlevel);
286 void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
288 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
290 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
291 if (q->send && q->send->curr_sender)
293 /* auto-reply */
294 queue_release_sender(&q->send->curr_sender, 0);
296 #endif
298 if (q->read == q->write && ticks > 0)
300 cores[CURRENT_CORE].irq_level = oldlevel;
301 block_thread_w_tmo(&q->queue, ticks);
302 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
305 if(q->read != q->write)
307 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
308 *ev = q->events[rd];
310 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
311 if(q->send && q->send->senders[rd])
313 /* Get data for a waiting thread if one */
314 queue_fetch_sender(q->send, rd);
316 #endif
318 else
320 ev->id = SYS_TIMEOUT;
323 set_irq_level(oldlevel);
326 void queue_post(struct event_queue *q, long id, intptr_t data)
328 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
330 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
332 q->events[wr].id = id;
333 q->events[wr].data = data;
335 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
336 if(q->send)
338 struct thread_entry **spp = &q->send->senders[wr];
340 if(*spp)
342 /* overflow protect - unblock any thread waiting at this index */
343 queue_release_sender(spp, 0);
346 #endif
348 wakeup_thread(&q->queue);
350 set_irq_level(oldlevel);
353 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
354 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
356 int oldlevel = set_irq_level(oldlevel);
358 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
360 q->events[wr].id = id;
361 q->events[wr].data = data;
363 if(q->send)
365 struct thread_entry **spp = &q->send->senders[wr];
367 if(*spp)
369 /* overflow protect - unblock any thread waiting at this index */
370 queue_release_sender(spp, 0);
373 wakeup_thread(&q->queue);
375 cores[CURRENT_CORE].irq_level = oldlevel;
376 block_thread_no_listlock(spp);
377 return thread_get_current()->retval;
380 /* Function as queue_post if sending is not enabled */
381 wakeup_thread(&q->queue);
382 set_irq_level(oldlevel);
383 return 0;
386 #if 0 /* not used now but probably will be later */
387 /* Query if the last message dequeued was added by queue_send or not */
388 bool queue_in_queue_send(struct event_queue *q)
390 return q->send && q->send->curr_sender;
392 #endif
394 /* Replies with retval to any dequeued message sent with queue_send */
395 void queue_reply(struct event_queue *q, intptr_t retval)
397 if(q->send && q->send->curr_sender)
399 queue_release_sender(&q->send->curr_sender, retval);
402 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
404 bool queue_empty(const struct event_queue* q)
406 return ( q->read == q->write );
409 void queue_clear(struct event_queue* q)
411 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
413 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
414 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
415 /* Release all thread waiting in the queue for a reply -
416 dequeued sent message will be handled by owning thread */
417 queue_release_all_senders(q);
418 #endif
419 q->read = 0;
420 q->write = 0;
422 set_irq_level(oldlevel);
425 void queue_remove_from_head(struct event_queue *q, long id)
427 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
429 while(q->read != q->write)
431 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
433 if(q->events[rd].id != id)
435 break;
438 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
439 if(q->send)
441 struct thread_entry **spp = &q->send->senders[rd];
443 if(*spp)
445 /* Release any thread waiting on this message */
446 queue_release_sender(spp, 0);
449 #endif
450 q->read++;
453 set_irq_level(oldlevel);
456 int queue_count(const struct event_queue *q)
458 return q->write - q->read;
461 int queue_broadcast(long id, intptr_t data)
463 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
464 int i;
466 for(i = 0;i < num_queues;i++)
468 queue_post(all_queues[i], id, data);
471 set_irq_level(oldlevel);
472 return num_queues;
475 void yield(void)
477 switch_thread(NULL);
480 void sleep(int ticks)
482 sleep_thread(ticks);
485 void sim_tick_tasks(void)
487 int i;
489 /* Run through the list of tick tasks */
490 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
492 if(tick_funcs[i])
494 tick_funcs[i]();
499 int tick_add_task(void (*f)(void))
501 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
502 int i;
504 /* Add a task if there is room */
505 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
507 if(tick_funcs[i] == NULL)
509 tick_funcs[i] = f;
510 set_irq_level(oldlevel);
511 return 0;
514 fprintf(stderr, "Error! tick_add_task(): out of tasks");
515 exit(-1);
516 return -1;
519 int tick_remove_task(void (*f)(void))
521 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
522 int i;
524 /* Remove a task if it is there */
525 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
527 if(tick_funcs[i] == f)
529 tick_funcs[i] = NULL;
530 set_irq_level(oldlevel);
531 return 0;
535 set_irq_level(oldlevel);
536 return -1;
539 /* Very simple mutex simulation - won't work with pre-emptive
540 multitasking, but is better than nothing at all */
541 void mutex_init(struct mutex *m)
543 m->queue = NULL;
544 m->thread = NULL;
545 m->count = 0;
546 m->locked = 0;
549 void mutex_lock(struct mutex *m)
551 struct thread_entry *const thread = thread_get_current();
553 if(thread == m->thread)
555 m->count++;
556 return;
559 if (!test_and_set(&m->locked, 1))
561 m->thread = thread;
562 return;
565 block_thread_no_listlock(&m->queue);
568 void mutex_unlock(struct mutex *m)
570 /* unlocker not being the owner is an unlocking violation */
571 if(m->thread != thread_get_current())
573 fprintf(stderr, "spinlock_unlock->wrong thread");
574 exit(-1);
577 if (m->count > 0)
579 /* this thread still owns lock */
580 m->count--;
581 return;
584 m->thread = wakeup_thread_no_listlock(&m->queue);
586 if (m->thread == NULL)
588 /* release lock */
589 m->locked = 0;
593 void spinlock_init(struct spinlock *l)
595 l->locked = 0;
596 l->thread = NULL;
597 l->count = 0;
600 void spinlock_lock(struct spinlock *l)
602 struct thread_entry *const thread = thread_get_current();
604 if (l->thread == thread)
606 l->count++;
607 return;
610 while(test_and_set(&l->locked, 1))
612 switch_thread(NULL);
615 l->thread = thread;
618 void spinlock_unlock(struct spinlock *l)
620 /* unlocker not being the owner is an unlocking violation */
621 if(l->thread != thread_get_current())
623 fprintf(stderr, "spinlock_unlock->wrong thread");
624 exit(-1);
627 if (l->count > 0)
629 /* this thread still owns lock */
630 l->count--;
631 return;
634 /* clear owner */
635 l->thread = NULL;
636 l->locked = 0;
639 #ifdef HAVE_SEMAPHORE_OBJECTS
640 void semaphore_init(struct semaphore *s, int max, int start)
642 if(max <= 0 || start < 0 || start > max)
644 fprintf(stderr, "semaphore_init->inv arg");
645 exit(-1);
647 s->queue = NULL;
648 s->max = max;
649 s->count = start;
652 void semaphore_wait(struct semaphore *s)
654 if(--s->count >= 0)
655 return;
656 block_thread_no_listlock(&s->queue);
659 void semaphore_release(struct semaphore *s)
661 if(s->count < s->max)
663 if(++s->count <= 0)
665 if(s->queue == NULL)
667 /* there should be threads in this queue */
668 fprintf(stderr, "semaphore->wakeup");
669 exit(-1);
671 /* a thread was queued - wake it up */
672 wakeup_thread_no_listlock(&s->queue);
676 #endif /* HAVE_SEMAPHORE_OBJECTS */
678 #ifdef HAVE_EVENT_OBJECTS
679 void event_init(struct event *e, unsigned int flags)
681 e->queues[STATE_NONSIGNALED] = NULL;
682 e->queues[STATE_SIGNALED] = NULL;
683 e->state = flags & STATE_SIGNALED;
684 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
687 void event_wait(struct event *e, unsigned int for_state)
689 unsigned int last_state = e->state;
691 if(e->automatic != 0)
693 /* wait for false always satisfied by definition
694 or if it just changed to false */
695 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
697 /* automatic - unsignal */
698 e->state = STATE_NONSIGNALED;
699 return;
701 /* block until state matches */
703 else if(for_state == last_state)
705 /* the state being waited for is the current state */
706 return;
709 /* current state does not match wait-for state */
710 block_thread_no_listlock(&e->queues[for_state]);
713 void event_set_state(struct event *e, unsigned int state)
715 unsigned int last_state = e->state;
717 if(last_state == state)
719 /* no change */
720 return;
723 if(state == STATE_SIGNALED)
725 if(e->automatic != 0)
727 struct thread_entry *thread;
729 if(e->queues[STATE_NONSIGNALED] != NULL)
731 /* no thread should have ever blocked for nonsignaled */
732 fprintf(stderr, "set_event_state->queue[NS]:S");
733 exit(-1);
736 /* pass to next thread and keep unsignaled - "pulse" */
737 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
738 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
740 else
742 /* release all threads waiting for signaled */
743 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
744 e->state = STATE_SIGNALED;
747 else
749 /* release all threads waiting for unsignaled */
750 if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
752 /* no thread should have ever blocked */
753 fprintf(stderr, "set_event_state->queue[NS]:NS");
754 exit(-1);
757 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
758 e->state = STATE_NONSIGNALED;
761 #endif /* HAVE_EVENT_OBJECTS */