1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Felix Arends
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
22 #include <SDL_thread.h>
24 #include "system-sdl.h"
27 #include "thread-sdl.h"
31 /* Condition to signal that "interrupts" may proceed */
32 static SDL_cond
*sim_thread_cond
;
33 /* Mutex to serialize changing levels and exclude other threads while
35 static SDL_mutex
*sim_irq_mtx
;
36 static int interrupt_level
= HIGHEST_IRQ_LEVEL
;
37 static int handlers_pending
= 0;
38 static int status_reg
= 0;
40 extern struct core_entry cores
[NUM_CORES
];
43 * 1) All threads must pass unblocked
44 * 2) Current handler must always pass unblocked
45 * 3) Threads must be excluded when irq routine is running
46 * 4) No more than one handler routine should execute at a time
48 int set_irq_level(int level
)
50 SDL_LockMutex(sim_irq_mtx
);
52 int oldlevel
= interrupt_level
;
54 if (status_reg
== 0 && level
== 0 && oldlevel
!= 0)
56 /* Not in a handler and "interrupts" are being reenabled */
57 if (handlers_pending
> 0)
58 SDL_CondSignal(sim_thread_cond
);
61 interrupt_level
= level
; /* save new level */
63 SDL_UnlockMutex(sim_irq_mtx
);
67 void sim_enter_irq_handler(void)
69 SDL_LockMutex(sim_irq_mtx
);
72 if(interrupt_level
!= 0)
74 /* "Interrupts" are disabled. Wait for reenable */
75 SDL_CondWait(sim_thread_cond
, sim_irq_mtx
);
81 void sim_exit_irq_handler(void)
83 if (--handlers_pending
> 0)
84 SDL_CondSignal(sim_thread_cond
);
87 SDL_UnlockMutex(sim_irq_mtx
);
90 bool sim_kernel_init(void)
92 sim_irq_mtx
= SDL_CreateMutex();
93 if (sim_irq_mtx
== NULL
)
95 fprintf(stderr
, "Cannot create sim_handler_mtx\n");
99 sim_thread_cond
= SDL_CreateCond();
100 if (sim_thread_cond
== NULL
)
102 fprintf(stderr
, "Cannot create sim_thread_cond\n");
109 void sim_kernel_shutdown(void)
111 SDL_DestroyMutex(sim_irq_mtx
);
112 SDL_DestroyCond(sim_thread_cond
);
115 volatile long current_tick
= 0;
116 static void (*tick_funcs
[MAX_NUM_TICK_TASKS
])(void);
118 /* This array holds all queues that are initiated. It is used for broadcast. */
119 static struct event_queue
*all_queues
[MAX_NUM_QUEUES
];
120 static int num_queues
= 0;
122 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
123 /* Moves waiting thread's descriptor to the current sender when a
124 message is dequeued */
125 static void queue_fetch_sender(struct queue_sender_list
*send
,
128 struct thread_entry
**spp
= &send
->senders
[i
];
132 send
->curr_sender
= *spp
;
137 /* Puts the specified return value in the waiting thread's return value
138 and wakes the thread - a sender should be confirmed to exist first */
139 static void queue_release_sender(struct thread_entry
**sender
,
142 (*sender
)->retval
= retval
;
143 wakeup_thread_no_listlock(sender
);
146 fprintf(stderr
, "queue->send slot ovf: %p\n", *sender
);
151 /* Releases any waiting threads that are queued with queue_send -
153 static void queue_release_all_senders(struct event_queue
*q
)
158 for(i
= q
->read
; i
!= q
->write
; i
++)
160 struct thread_entry
**spp
=
161 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
164 queue_release_sender(spp
, 0);
170 /* Enables queue_send on the specified queue - caller allocates the extra
172 void queue_enable_queue_send(struct event_queue
*q
,
173 struct queue_sender_list
*send
)
175 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
180 memset(send
, 0, sizeof(*send
));
182 set_irq_level(oldlevel
);
184 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
186 void queue_init(struct event_queue
*q
, bool register_queue
)
188 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
192 thread_queue_init(&q
->queue
);
193 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
194 q
->send
= NULL
; /* No message sending by default */
199 if(num_queues
>= MAX_NUM_QUEUES
)
201 fprintf(stderr
, "queue_init->out of queues");
204 /* Add it to the all_queues array */
205 all_queues
[num_queues
++] = q
;
208 set_irq_level(oldlevel
);
211 void queue_delete(struct event_queue
*q
)
216 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
218 /* Find the queue to be deleted */
219 for(i
= 0;i
< num_queues
;i
++)
221 if(all_queues
[i
] == q
)
230 /* Move the following queues up in the list */
231 for(;i
< num_queues
-1;i
++)
233 all_queues
[i
] = all_queues
[i
+1];
239 /* Release threads waiting on queue head */
240 thread_queue_wake(&q
->queue
);
242 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
243 /* Release waiting threads and reply to any dequeued message
245 queue_release_all_senders(q
);
252 set_irq_level(oldlevel
);
255 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
258 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
260 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 if (q
->send
&& q
->send
->curr_sender
)
264 queue_release_sender(&q
->send
->curr_sender
, 0);
268 if (q
->read
== q
->write
)
272 block_thread(&q
->queue
);
273 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
275 while (q
->read
== q
->write
);
278 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
281 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
282 if(q
->send
&& q
->send
->senders
[rd
])
284 /* Get data for a waiting thread if one */
285 queue_fetch_sender(q
->send
, rd
);
289 set_irq_level(oldlevel
);
292 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
294 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
296 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
297 if (q
->send
&& q
->send
->curr_sender
)
300 queue_release_sender(&q
->send
->curr_sender
, 0);
304 if (q
->read
== q
->write
&& ticks
> 0)
306 block_thread_w_tmo(&q
->queue
, ticks
);
307 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
310 if(q
->read
!= q
->write
)
312 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
315 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
316 if(q
->send
&& q
->send
->senders
[rd
])
318 /* Get data for a waiting thread if one */
319 queue_fetch_sender(q
->send
, rd
);
325 ev
->id
= SYS_TIMEOUT
;
328 set_irq_level(oldlevel
);
331 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
333 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
335 unsigned int wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
337 q
->events
[wr
].id
= id
;
338 q
->events
[wr
].data
= data
;
340 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
343 struct thread_entry
**spp
= &q
->send
->senders
[wr
];
347 /* overflow protect - unblock any thread waiting at this index */
348 queue_release_sender(spp
, 0);
353 wakeup_thread(&q
->queue
);
355 set_irq_level(oldlevel
);
358 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
359 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
361 int oldlevel
= set_irq_level(oldlevel
);
363 unsigned int wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
365 q
->events
[wr
].id
= id
;
366 q
->events
[wr
].data
= data
;
370 struct thread_entry
**spp
= &q
->send
->senders
[wr
];
374 /* overflow protect - unblock any thread waiting at this index */
375 queue_release_sender(spp
, 0);
378 wakeup_thread(&q
->queue
);
380 block_thread_no_listlock(spp
);
381 return thread_get_current()->retval
;
384 /* Function as queue_post if sending is not enabled */
385 wakeup_thread(&q
->queue
);
386 set_irq_level(oldlevel
);
390 #if 0 /* not used now but probably will be later */
391 /* Query if the last message dequeued was added by queue_send or not */
392 bool queue_in_queue_send(struct event_queue
*q
)
394 return q
->send
&& q
->send
->curr_sender
;
398 /* Replies with retval to any dequeued message sent with queue_send */
399 void queue_reply(struct event_queue
*q
, intptr_t retval
)
401 if(q
->send
&& q
->send
->curr_sender
)
403 queue_release_sender(&q
->send
->curr_sender
, retval
);
406 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
408 bool queue_empty(const struct event_queue
* q
)
410 return ( q
->read
== q
->write
);
413 bool queue_peek(struct event_queue
*q
, struct queue_event
*ev
)
415 if (q
->read
== q
->write
)
418 bool have_msg
= false;
420 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
422 if (q
->read
!= q
->write
)
424 *ev
= q
->events
[q
->read
& QUEUE_LENGTH_MASK
];
428 set_irq_level(oldlevel
);
433 void queue_clear(struct event_queue
* q
)
435 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
437 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
438 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
439 /* Release all thread waiting in the queue for a reply -
440 dequeued sent message will be handled by owning thread */
441 queue_release_all_senders(q
);
446 set_irq_level(oldlevel
);
449 void queue_remove_from_head(struct event_queue
*q
, long id
)
451 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
453 while(q
->read
!= q
->write
)
455 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
457 if(q
->events
[rd
].id
!= id
)
462 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
465 struct thread_entry
**spp
= &q
->send
->senders
[rd
];
469 /* Release any thread waiting on this message */
470 queue_release_sender(spp
, 0);
477 set_irq_level(oldlevel
);
480 int queue_count(const struct event_queue
*q
)
482 return q
->write
- q
->read
;
485 int queue_broadcast(long id
, intptr_t data
)
487 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
490 for(i
= 0;i
< num_queues
;i
++)
492 queue_post(all_queues
[i
], id
, data
);
495 set_irq_level(oldlevel
);
504 void sleep(int ticks
)
509 void sim_tick_tasks(void)
513 /* Run through the list of tick tasks */
514 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
523 int tick_add_task(void (*f
)(void))
525 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
528 /* Add a task if there is room */
529 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
531 if(tick_funcs
[i
] == NULL
)
534 set_irq_level(oldlevel
);
538 fprintf(stderr
, "Error! tick_add_task(): out of tasks");
543 int tick_remove_task(void (*f
)(void))
545 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
548 /* Remove a task if it is there */
549 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
551 if(tick_funcs
[i
] == f
)
553 tick_funcs
[i
] = NULL
;
554 set_irq_level(oldlevel
);
559 set_irq_level(oldlevel
);
563 /* Very simple mutex simulation - won't work with pre-emptive
564 multitasking, but is better than nothing at all */
565 void mutex_init(struct mutex
*m
)
573 void mutex_lock(struct mutex
*m
)
575 struct thread_entry
*const thread
= thread_get_current();
577 if(thread
== m
->thread
)
583 if (!test_and_set(&m
->locked
, 1))
589 block_thread_no_listlock(&m
->queue
);
592 void mutex_unlock(struct mutex
*m
)
594 /* unlocker not being the owner is an unlocking violation */
595 if(m
->thread
!= thread_get_current())
597 fprintf(stderr
, "mutex_unlock->wrong thread");
603 /* this thread still owns lock */
608 m
->thread
= wakeup_thread_no_listlock(&m
->queue
);
610 if (m
->thread
== NULL
)
617 #ifdef HAVE_SEMAPHORE_OBJECTS
618 void semaphore_init(struct semaphore
*s
, int max
, int start
)
620 if(max
<= 0 || start
< 0 || start
> max
)
622 fprintf(stderr
, "semaphore_init->inv arg");
630 void semaphore_wait(struct semaphore
*s
)
634 block_thread_no_listlock(&s
->queue
);
637 void semaphore_release(struct semaphore
*s
)
639 if(s
->count
< s
->max
)
645 /* there should be threads in this queue */
646 fprintf(stderr
, "semaphore->wakeup");
649 /* a thread was queued - wake it up */
650 wakeup_thread_no_listlock(&s
->queue
);
654 #endif /* HAVE_SEMAPHORE_OBJECTS */
656 #ifdef HAVE_EVENT_OBJECTS
657 void event_init(struct event
*e
, unsigned int flags
)
659 e
->queues
[STATE_NONSIGNALED
] = NULL
;
660 e
->queues
[STATE_SIGNALED
] = NULL
;
661 e
->state
= flags
& STATE_SIGNALED
;
662 e
->automatic
= (flags
& EVENT_AUTOMATIC
) ? 1 : 0;
665 void event_wait(struct event
*e
, unsigned int for_state
)
667 unsigned int last_state
= e
->state
;
669 if(e
->automatic
!= 0)
671 /* wait for false always satisfied by definition
672 or if it just changed to false */
673 if(last_state
== STATE_SIGNALED
|| for_state
== STATE_NONSIGNALED
)
675 /* automatic - unsignal */
676 e
->state
= STATE_NONSIGNALED
;
679 /* block until state matches */
681 else if(for_state
== last_state
)
683 /* the state being waited for is the current state */
687 /* current state does not match wait-for state */
688 block_thread_no_listlock(&e
->queues
[for_state
]);
691 void event_set_state(struct event
*e
, unsigned int state
)
693 unsigned int last_state
= e
->state
;
695 if(last_state
== state
)
701 if(state
== STATE_SIGNALED
)
703 if(e
->automatic
!= 0)
705 struct thread_entry
*thread
;
707 if(e
->queues
[STATE_NONSIGNALED
] != NULL
)
709 /* no thread should have ever blocked for nonsignaled */
710 fprintf(stderr
, "set_event_state->queue[NS]:S");
714 /* pass to next thread and keep unsignaled - "pulse" */
715 thread
= wakeup_thread_no_listlock(&e
->queues
[STATE_SIGNALED
]);
716 e
->state
= thread
!= NULL
? STATE_NONSIGNALED
: STATE_SIGNALED
;
720 /* release all threads waiting for signaled */
721 thread_queue_wake_no_listlock(&e
->queues
[STATE_SIGNALED
]);
722 e
->state
= STATE_SIGNALED
;
727 /* release all threads waiting for unsignaled */
728 if(e
->queues
[STATE_NONSIGNALED
] != NULL
&& e
->automatic
!= 0)
730 /* no thread should have ever blocked */
731 fprintf(stderr
, "set_event_state->queue[NS]:NS");
735 thread_queue_wake_no_listlock(&e
->queues
[STATE_NONSIGNALED
]);
736 e
->state
= STATE_NONSIGNALED
;
739 #endif /* HAVE_EVENT_OBJECTS */