1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Felix Arends
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
22 #include <SDL_thread.h>
24 #include "system-sdl.h"
27 #include "thread-sdl.h"
31 /* Prevent "irq handler" from thread concurrent access as well as current
32 * access on multiple handlers */
33 static SDL_cond
*sim_thread_cond
;
34 /* Protect sim irq object when it is being changed */
35 static SDL_mutex
*sim_irq_mtx
;
36 static int interrupt_level
= HIGHEST_IRQ_LEVEL
;
37 static int status_reg
= 0;
39 extern struct core_entry cores
[NUM_CORES
];
42 * 1) All threads must pass unblocked
43 * 2) Current handler must always pass unblocked
44 * 3) Threads must be excluded when irq routine is running
45 * 4) No more than one handler routine should execute at a time
47 int set_irq_level(int level
)
49 SDL_LockMutex(sim_irq_mtx
);
51 int oldlevel
= interrupt_level
;
53 if (status_reg
== 0 && level
== 0 && oldlevel
!= 0)
55 /* Not in a handler and "interrupts" are being reenabled */
56 SDL_CondSignal(sim_thread_cond
);
59 interrupt_level
= level
; /* save new level */
61 SDL_UnlockMutex(sim_irq_mtx
);
65 void sim_enter_irq_handler(void)
67 SDL_LockMutex(sim_irq_mtx
);
68 if(interrupt_level
!= 0)
70 /* "Interrupts" are disabled. Wait for reenable */
71 SDL_CondWait(sim_thread_cond
, sim_irq_mtx
);
76 void sim_exit_irq_handler(void)
79 SDL_UnlockMutex(sim_irq_mtx
);
82 bool sim_kernel_init(void)
84 sim_irq_mtx
= SDL_CreateMutex();
85 if (sim_irq_mtx
== NULL
)
87 fprintf(stderr
, "Cannot create sim_handler_mtx\n");
91 /* Create with a count of 0 to have interrupts disabled by default */
92 sim_thread_cond
= SDL_CreateCond();
93 if (sim_thread_cond
== NULL
)
95 fprintf(stderr
, "Cannot create sim_thread_cond\n");
102 void sim_kernel_shutdown(void)
104 SDL_DestroyMutex(sim_irq_mtx
);
105 SDL_DestroyCond(sim_thread_cond
);
108 volatile long current_tick
= 0;
109 static void (*tick_funcs
[MAX_NUM_TICK_TASKS
])(void);
111 /* This array holds all queues that are initiated. It is used for broadcast. */
112 static struct event_queue
*all_queues
[MAX_NUM_QUEUES
];
113 static int num_queues
= 0;
115 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
116 /* Moves waiting thread's descriptor to the current sender when a
117 message is dequeued */
118 static void queue_fetch_sender(struct queue_sender_list
*send
,
121 struct thread_entry
**spp
= &send
->senders
[i
];
125 send
->curr_sender
= *spp
;
130 /* Puts the specified return value in the waiting thread's return value
131 and wakes the thread - a sender should be confirmed to exist first */
132 static void queue_release_sender(struct thread_entry
**sender
,
135 (*sender
)->retval
= retval
;
136 wakeup_thread_no_listlock(sender
);
139 fprintf(stderr
, "queue->send slot ovf: %p\n", *sender
);
144 /* Releases any waiting threads that are queued with queue_send -
146 static void queue_release_all_senders(struct event_queue
*q
)
151 for(i
= q
->read
; i
!= q
->write
; i
++)
153 struct thread_entry
**spp
=
154 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
157 queue_release_sender(spp
, 0);
163 /* Enables queue_send on the specified queue - caller allocates the extra
165 void queue_enable_queue_send(struct event_queue
*q
,
166 struct queue_sender_list
*send
)
168 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
173 memset(send
, 0, sizeof(*send
));
175 set_irq_level(oldlevel
);
177 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
179 void queue_init(struct event_queue
*q
, bool register_queue
)
181 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
185 thread_queue_init(&q
->queue
);
186 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
187 q
->send
= NULL
; /* No message sending by default */
192 if(num_queues
>= MAX_NUM_QUEUES
)
194 fprintf(stderr
, "queue_init->out of queues");
197 /* Add it to the all_queues array */
198 all_queues
[num_queues
++] = q
;
201 set_irq_level(oldlevel
);
204 void queue_delete(struct event_queue
*q
)
209 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
211 /* Find the queue to be deleted */
212 for(i
= 0;i
< num_queues
;i
++)
214 if(all_queues
[i
] == q
)
223 /* Move the following queues up in the list */
224 for(;i
< num_queues
-1;i
++)
226 all_queues
[i
] = all_queues
[i
+1];
232 /* Release threads waiting on queue head */
233 thread_queue_wake(&q
->queue
);
235 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
236 /* Release waiting threads and reply to any dequeued message
238 queue_release_all_senders(q
);
245 set_irq_level(oldlevel
);
248 void queue_wait(struct event_queue
*q
, struct queue_event
*ev
)
251 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
253 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
254 if (q
->send
&& q
->send
->curr_sender
)
257 queue_release_sender(&q
->send
->curr_sender
, 0);
261 if (q
->read
== q
->write
)
265 cores
[CURRENT_CORE
].irq_level
= oldlevel
;
266 block_thread(&q
->queue
);
267 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
269 while (q
->read
== q
->write
);
272 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
275 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
276 if(q
->send
&& q
->send
->senders
[rd
])
278 /* Get data for a waiting thread if one */
279 queue_fetch_sender(q
->send
, rd
);
283 set_irq_level(oldlevel
);
286 void queue_wait_w_tmo(struct event_queue
*q
, struct queue_event
*ev
, int ticks
)
288 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
290 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
291 if (q
->send
&& q
->send
->curr_sender
)
294 queue_release_sender(&q
->send
->curr_sender
, 0);
298 if (q
->read
== q
->write
&& ticks
> 0)
300 cores
[CURRENT_CORE
].irq_level
= oldlevel
;
301 block_thread_w_tmo(&q
->queue
, ticks
);
302 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
305 if(q
->read
!= q
->write
)
307 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
310 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
311 if(q
->send
&& q
->send
->senders
[rd
])
313 /* Get data for a waiting thread if one */
314 queue_fetch_sender(q
->send
, rd
);
320 ev
->id
= SYS_TIMEOUT
;
323 set_irq_level(oldlevel
);
326 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
328 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
330 unsigned int wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
332 q
->events
[wr
].id
= id
;
333 q
->events
[wr
].data
= data
;
335 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
338 struct thread_entry
**spp
= &q
->send
->senders
[wr
];
342 /* overflow protect - unblock any thread waiting at this index */
343 queue_release_sender(spp
, 0);
348 wakeup_thread(&q
->queue
);
350 set_irq_level(oldlevel
);
353 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
354 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
356 int oldlevel
= set_irq_level(oldlevel
);
358 unsigned int wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
360 q
->events
[wr
].id
= id
;
361 q
->events
[wr
].data
= data
;
365 struct thread_entry
**spp
= &q
->send
->senders
[wr
];
369 /* overflow protect - unblock any thread waiting at this index */
370 queue_release_sender(spp
, 0);
373 wakeup_thread(&q
->queue
);
375 cores
[CURRENT_CORE
].irq_level
= oldlevel
;
376 block_thread_no_listlock(spp
);
377 return thread_get_current()->retval
;
380 /* Function as queue_post if sending is not enabled */
381 wakeup_thread(&q
->queue
);
382 set_irq_level(oldlevel
);
386 #if 0 /* not used now but probably will be later */
387 /* Query if the last message dequeued was added by queue_send or not */
388 bool queue_in_queue_send(struct event_queue
*q
)
390 return q
->send
&& q
->send
->curr_sender
;
394 /* Replies with retval to any dequeued message sent with queue_send */
395 void queue_reply(struct event_queue
*q
, intptr_t retval
)
397 if(q
->send
&& q
->send
->curr_sender
)
399 queue_release_sender(&q
->send
->curr_sender
, retval
);
402 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
404 bool queue_empty(const struct event_queue
* q
)
406 return ( q
->read
== q
->write
);
409 void queue_clear(struct event_queue
* q
)
411 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
413 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
414 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
415 /* Release all thread waiting in the queue for a reply -
416 dequeued sent message will be handled by owning thread */
417 queue_release_all_senders(q
);
422 set_irq_level(oldlevel
);
425 void queue_remove_from_head(struct event_queue
*q
, long id
)
427 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
429 while(q
->read
!= q
->write
)
431 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
433 if(q
->events
[rd
].id
!= id
)
438 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
441 struct thread_entry
**spp
= &q
->send
->senders
[rd
];
445 /* Release any thread waiting on this message */
446 queue_release_sender(spp
, 0);
453 set_irq_level(oldlevel
);
456 int queue_count(const struct event_queue
*q
)
458 return q
->write
- q
->read
;
461 int queue_broadcast(long id
, intptr_t data
)
463 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
466 for(i
= 0;i
< num_queues
;i
++)
468 queue_post(all_queues
[i
], id
, data
);
471 set_irq_level(oldlevel
);
480 void sleep(int ticks
)
485 void sim_tick_tasks(void)
489 /* Run through the list of tick tasks */
490 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
499 int tick_add_task(void (*f
)(void))
501 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
504 /* Add a task if there is room */
505 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
507 if(tick_funcs
[i
] == NULL
)
510 set_irq_level(oldlevel
);
514 fprintf(stderr
, "Error! tick_add_task(): out of tasks");
519 int tick_remove_task(void (*f
)(void))
521 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
524 /* Remove a task if it is there */
525 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
527 if(tick_funcs
[i
] == f
)
529 tick_funcs
[i
] = NULL
;
530 set_irq_level(oldlevel
);
535 set_irq_level(oldlevel
);
539 /* Very simple mutex simulation - won't work with pre-emptive
540 multitasking, but is better than nothing at all */
541 void mutex_init(struct mutex
*m
)
549 void mutex_lock(struct mutex
*m
)
551 struct thread_entry
*const thread
= thread_get_current();
553 if(thread
== m
->thread
)
559 if (!test_and_set(&m
->locked
, 1))
565 block_thread_no_listlock(&m
->queue
);
568 void mutex_unlock(struct mutex
*m
)
570 /* unlocker not being the owner is an unlocking violation */
571 if(m
->thread
!= thread_get_current())
573 fprintf(stderr
, "spinlock_unlock->wrong thread");
579 /* this thread still owns lock */
584 m
->thread
= wakeup_thread_no_listlock(&m
->queue
);
586 if (m
->thread
== NULL
)
593 void spinlock_init(struct spinlock
*l
)
600 void spinlock_lock(struct spinlock
*l
)
602 struct thread_entry
*const thread
= thread_get_current();
604 if (l
->thread
== thread
)
610 while(test_and_set(&l
->locked
, 1))
618 void spinlock_unlock(struct spinlock
*l
)
620 /* unlocker not being the owner is an unlocking violation */
621 if(l
->thread
!= thread_get_current())
623 fprintf(stderr
, "spinlock_unlock->wrong thread");
629 /* this thread still owns lock */
639 #ifdef HAVE_SEMAPHORE_OBJECTS
640 void semaphore_init(struct semaphore
*s
, int max
, int start
)
642 if(max
<= 0 || start
< 0 || start
> max
)
644 fprintf(stderr
, "semaphore_init->inv arg");
652 void semaphore_wait(struct semaphore
*s
)
656 block_thread_no_listlock(&s
->queue
);
659 void semaphore_release(struct semaphore
*s
)
661 if(s
->count
< s
->max
)
667 /* there should be threads in this queue */
668 fprintf(stderr
, "semaphore->wakeup");
671 /* a thread was queued - wake it up */
672 wakeup_thread_no_listlock(&s
->queue
);
676 #endif /* HAVE_SEMAPHORE_OBJECTS */
678 #ifdef HAVE_EVENT_OBJECTS
679 void event_init(struct event
*e
, unsigned int flags
)
681 e
->queues
[STATE_NONSIGNALED
] = NULL
;
682 e
->queues
[STATE_SIGNALED
] = NULL
;
683 e
->state
= flags
& STATE_SIGNALED
;
684 e
->automatic
= (flags
& EVENT_AUTOMATIC
) ? 1 : 0;
687 void event_wait(struct event
*e
, unsigned int for_state
)
689 unsigned int last_state
= e
->state
;
691 if(e
->automatic
!= 0)
693 /* wait for false always satisfied by definition
694 or if it just changed to false */
695 if(last_state
== STATE_SIGNALED
|| for_state
== STATE_NONSIGNALED
)
697 /* automatic - unsignal */
698 e
->state
= STATE_NONSIGNALED
;
701 /* block until state matches */
703 else if(for_state
== last_state
)
705 /* the state being waited for is the current state */
709 /* current state does not match wait-for state */
710 block_thread_no_listlock(&e
->queues
[for_state
]);
713 void event_set_state(struct event
*e
, unsigned int state
)
715 unsigned int last_state
= e
->state
;
717 if(last_state
== state
)
723 if(state
== STATE_SIGNALED
)
725 if(e
->automatic
!= 0)
727 struct thread_entry
*thread
;
729 if(e
->queues
[STATE_NONSIGNALED
] != NULL
)
731 /* no thread should have ever blocked for nonsignaled */
732 fprintf(stderr
, "set_event_state->queue[NS]:S");
736 /* pass to next thread and keep unsignaled - "pulse" */
737 thread
= wakeup_thread_no_listlock(&e
->queues
[STATE_SIGNALED
]);
738 e
->state
= thread
!= NULL
? STATE_NONSIGNALED
: STATE_SIGNALED
;
742 /* release all threads waiting for signaled */
743 thread_queue_wake_no_listlock(&e
->queues
[STATE_SIGNALED
]);
744 e
->state
= STATE_SIGNALED
;
749 /* release all threads waiting for unsignaled */
750 if(e
->queues
[STATE_NONSIGNALED
] != NULL
&& e
->automatic
!= 0)
752 /* no thread should have ever blocked */
753 fprintf(stderr
, "set_event_state->queue[NS]:NS");
757 thread_queue_wake_no_listlock(&e
->queues
[STATE_NONSIGNALED
]);
758 e
->state
= STATE_NONSIGNALED
;
761 #endif /* HAVE_EVENT_OBJECTS */