Add some screenshots for Sansa c200.
[Rockbox.git] / firmware / kernel.c
blob11b10e287e4a328f1729433d570dbc44368dd731
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2002 by Björn Stenberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
19 #include <stdlib.h>
20 #include <string.h>
21 #include "config.h"
22 #include "kernel.h"
23 #include "thread.h"
24 #include "cpu.h"
25 #include "system.h"
26 #include "panic.h"
27 #if CONFIG_CPU == IMX31L
28 #include "avic-imx31.h"
29 #endif
31 #if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER)
32 volatile long current_tick NOCACHEDATA_ATTR = 0;
33 #endif
35 void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
37 /* This array holds all queues that are initiated. It is used for broadcast. */
38 static struct event_queue *all_queues[32] NOCACHEBSS_ATTR;
39 static int num_queues NOCACHEBSS_ATTR;
41 void queue_wait(struct event_queue *q, struct event *ev) ICODE_ATTR;
43 /****************************************************************************
44 * Standard kernel stuff
45 ****************************************************************************/
46 void kernel_init(void)
48 /* Init the threading API */
49 init_threads();
51 if(CURRENT_CORE == CPU)
53 memset(tick_funcs, 0, sizeof(tick_funcs));
55 num_queues = 0;
56 memset(all_queues, 0, sizeof(all_queues));
59 tick_start(1000/HZ);
62 void sleep(int ticks)
64 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
65 volatile int counter;
66 TCON &= ~(1 << 20); // stop timer 4
67 // TODO: this constant depends on dividers settings inherited from
68 // firmware. Set them explicitly somwhere.
69 TCNTB4 = 12193 * ticks / HZ;
70 TCON |= 1 << 21; // set manual bit
71 TCON &= ~(1 << 21); // reset manual bit
72 TCON &= ~(1 << 22); //autoreload Off
73 TCON |= (1 << 20); // start timer 4
74 do {
75 counter = TCNTO4;
76 } while(counter > 0);
78 #elif defined(CPU_PP) && defined(BOOTLOADER)
79 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
80 while (TIME_BEFORE(USEC_TIMER, stop))
81 switch_thread(true,NULL);
82 #else
83 sleep_thread(ticks);
84 #endif
87 void yield(void)
89 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER))
90 /* Some targets don't like yielding in the bootloader */
91 #else
92 switch_thread(true, NULL);
93 #endif
96 /****************************************************************************
97 * Queue handling stuff
98 ****************************************************************************/
100 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
101 /* Moves waiting thread's descriptor to the current sender when a
102 message is dequeued */
103 static void queue_fetch_sender(struct queue_sender_list *send,
104 unsigned int i)
106 struct thread_entry **spp = &send->senders[i];
108 if (*spp)
110 send->curr_sender = *spp;
111 *spp = NULL;
115 /* Puts the specified return value in the waiting thread's return value
116 * and wakes the thread.
117 * 1) A sender should be confirmed to exist before calling which makes it
118 * more efficent to reject the majority of cases that don't need this
119 called.
120 * 2) Requires interrupts disabled since queue overflows can cause posts
121 * from interrupt handlers to wake threads. Not doing so could cause
122 * an attempt at multiple wakes or other problems.
124 static void queue_release_sender(struct thread_entry **sender,
125 intptr_t retval)
127 (*sender)->retval = retval;
128 wakeup_thread_irq_safe(sender);
129 #if 0
130 /* This should _never_ happen - there must never be multiple
131 threads in this list and it is a corrupt state */
132 if (*sender != NULL)
133 panicf("Queue: send slot ovf");
134 #endif
137 /* Releases any waiting threads that are queued with queue_send -
138 * reply with 0.
139 * Disable IRQs before calling since it uses queue_release_sender.
141 static void queue_release_all_senders(struct event_queue *q)
143 if(q->send)
145 unsigned int i;
146 for(i = q->read; i != q->write; i++)
148 struct thread_entry **spp =
149 &q->send->senders[i & QUEUE_LENGTH_MASK];
151 if(*spp)
153 queue_release_sender(spp, 0);
159 /* Enables queue_send on the specified queue - caller allocates the extra
160 data structure */
161 void queue_enable_queue_send(struct event_queue *q,
162 struct queue_sender_list *send)
164 q->send = send;
165 memset(send, 0, sizeof(struct queue_sender_list));
167 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
170 void queue_init(struct event_queue *q, bool register_queue)
172 q->read = 0;
173 q->write = 0;
174 q->thread = NULL;
175 #if NUM_CORES > 1
176 q->irq_safe = false;
177 #endif
178 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
179 q->send = NULL; /* No message sending by default */
180 #endif
182 if(register_queue)
184 /* Add it to the all_queues array */
185 all_queues[num_queues++] = q;
189 #if NUM_CORES > 1
191 * If IRQ mode is enabled, some core-wise locking mechanisms are disabled
192 * causing accessing queue to be no longer thread safe from the other core.
193 * However, that locking mechanism would also kill IRQ handlers.
195 * @param q struct of an event_queue
196 * @param state enable/disable IRQ mode
197 * @default state disabled
199 void queue_set_irq_safe(struct event_queue *q, bool state)
201 q->irq_safe = state;
203 #endif
205 void queue_delete(struct event_queue *q)
207 int i;
208 bool found = false;
210 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
211 lock_cores();
213 /* Release theads waiting on queue */
214 wakeup_thread(&q->thread);
216 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
217 /* Release waiting threads and reply to any dequeued message
218 waiting for one. */
219 queue_release_all_senders(q);
220 queue_reply(q, 0);
221 #endif
223 /* Find the queue to be deleted */
224 for(i = 0;i < num_queues;i++)
226 if(all_queues[i] == q)
228 found = true;
229 break;
233 if(found)
235 /* Move the following queues up in the list */
236 for(;i < num_queues-1;i++)
238 all_queues[i] = all_queues[i+1];
241 num_queues--;
244 unlock_cores();
245 set_irq_level(oldlevel);
248 void queue_wait(struct event_queue *q, struct event *ev)
250 int oldlevel;
251 unsigned int rd;
253 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
254 lock_cores();
256 if (q->read == q->write)
258 set_irq_level_and_block_thread(&q->thread, oldlevel);
259 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
260 lock_cores();
263 rd = q->read++ & QUEUE_LENGTH_MASK;
264 *ev = q->events[rd];
266 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
267 if(q->send && q->send->senders[rd])
269 /* Get data for a waiting thread if one */
270 queue_fetch_sender(q->send, rd);
272 #endif
274 unlock_cores();
275 set_irq_level(oldlevel);
278 void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
280 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
281 lock_cores();
283 if (q->read == q->write && ticks > 0)
285 set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel);
286 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
287 lock_cores();
290 if (q->read != q->write)
292 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
293 *ev = q->events[rd];
295 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
296 if(q->send && q->send->senders[rd])
298 /* Get data for a waiting thread if one */
299 queue_fetch_sender(q->send, rd);
301 #endif
303 else
305 ev->id = SYS_TIMEOUT;
308 unlock_cores();
309 set_irq_level(oldlevel);
312 void queue_post(struct event_queue *q, long id, intptr_t data)
314 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
315 unsigned int wr;
317 #if NUM_CORES > 1
318 if (!q->irq_safe)
319 lock_cores();
320 #endif
322 wr = q->write++ & QUEUE_LENGTH_MASK;
324 q->events[wr].id = id;
325 q->events[wr].data = data;
327 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
328 if(q->send)
330 struct thread_entry **spp = &q->send->senders[wr];
332 if (*spp)
334 /* overflow protect - unblock any thread waiting at this index */
335 queue_release_sender(spp, 0);
338 #endif
340 wakeup_thread_irq_safe(&q->thread);
341 #if NUM_CORES > 1
342 if (!q->irq_safe)
343 unlock_cores();
344 #endif
345 set_irq_level(oldlevel);
349 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
350 /* No wakeup_thread_irq_safe here because IRQ handlers are not allowed
351 use of this function - we only aim to protect the queue integrity by
352 turning them off. */
353 intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
355 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
356 unsigned int wr;
358 lock_cores();
360 wr = q->write++ & QUEUE_LENGTH_MASK;
362 q->events[wr].id = id;
363 q->events[wr].data = data;
365 if(q->send)
367 struct thread_entry **spp = &q->send->senders[wr];
369 if (*spp)
371 /* overflow protect - unblock any thread waiting at this index */
372 queue_release_sender(spp, 0);
375 wakeup_thread(&q->thread);
376 set_irq_level_and_block_thread(spp, oldlevel);
377 return thread_get_current()->retval;
380 /* Function as queue_post if sending is not enabled */
381 wakeup_thread(&q->thread);
382 unlock_cores();
383 set_irq_level(oldlevel);
385 return 0;
388 #if 0 /* not used now but probably will be later */
389 /* Query if the last message dequeued was added by queue_send or not */
390 bool queue_in_queue_send(struct event_queue *q)
392 return q->send && q->send->curr_sender;
394 #endif
396 /* Replies with retval to any dequeued message sent with queue_send */
397 void queue_reply(struct event_queue *q, intptr_t retval)
399 lock_cores();
400 /* No IRQ lock here since IRQs cannot change this */
401 if(q->send && q->send->curr_sender)
403 queue_release_sender(&q->send->curr_sender, retval);
405 unlock_cores();
407 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
409 bool queue_empty(const struct event_queue* q)
411 bool is_empty;
413 #if NUM_CORES > 1
414 if (!q->irq_safe)
415 lock_cores();
416 #endif
418 is_empty = ( q->read == q->write );
419 #if NUM_CORES > 1
420 if (!q->irq_safe)
421 unlock_cores();
422 #endif
424 return is_empty;
427 void queue_clear(struct event_queue* q)
429 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
431 #if NUM_CORES > 1
432 if (!q->irq_safe)
433 lock_cores();
434 #endif
436 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
437 /* Release all thread waiting in the queue for a reply -
438 dequeued sent message will be handled by owning thread */
439 queue_release_all_senders(q);
440 #endif
442 q->read = 0;
443 q->write = 0;
445 #if NUM_CORES > 1
446 if (!q->irq_safe)
447 unlock_cores();
448 #endif
450 set_irq_level(oldlevel);
453 void queue_remove_from_head(struct event_queue *q, long id)
455 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
457 #if NUM_CORES > 1
458 if (!q->irq_safe)
459 lock_cores();
460 #endif
462 while(q->read != q->write)
464 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
466 if(q->events[rd].id != id)
468 break;
471 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
472 if(q->send)
474 struct thread_entry **spp = &q->send->senders[rd];
476 if (*spp)
478 /* Release any thread waiting on this message */
479 queue_release_sender(spp, 0);
482 #endif
483 q->read++;
486 #if NUM_CORES > 1
487 if (!q->irq_safe)
488 unlock_cores();
489 #endif
491 set_irq_level(oldlevel);
495 * The number of events waiting in the queue.
497 * @param struct of event_queue
498 * @return number of events in the queue
500 int queue_count(const struct event_queue *q)
502 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
503 int result;
505 #if NUM_CORES > 1
506 if (!q->irq_safe)
507 lock_cores();
508 #endif
510 result = q->write - q->read;
512 #if NUM_CORES > 1
513 if (!q->irq_safe)
514 unlock_cores();
515 #endif
517 set_irq_level(oldlevel);
519 return result;
522 int queue_broadcast(long id, intptr_t data)
524 int i;
526 for(i = 0;i < num_queues;i++)
528 queue_post(all_queues[i], id, data);
531 return num_queues;
534 /****************************************************************************
535 * Timer tick
536 ****************************************************************************/
537 #if CONFIG_CPU == SH7034
538 void tick_start(unsigned int interval_in_ms)
540 unsigned long count;
542 count = CPU_FREQ * interval_in_ms / 1000 / 8;
544 if(count > 0x10000)
546 panicf("Error! The tick interval is too long (%d ms)\n",
547 interval_in_ms);
548 return;
551 /* We are using timer 0 */
553 TSTR &= ~0x01; /* Stop the timer */
554 TSNC &= ~0x01; /* No synchronization */
555 TMDR &= ~0x01; /* Operate normally */
557 TCNT0 = 0; /* Start counting at 0 */
558 GRA0 = (unsigned short)(count - 1);
559 TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
561 /* Enable interrupt on level 1 */
562 IPRC = (IPRC & ~0x00f0) | 0x0010;
564 TSR0 &= ~0x01;
565 TIER0 = 0xf9; /* Enable GRA match interrupt */
567 TSTR |= 0x01; /* Start timer 1 */
570 void IMIA0(void) __attribute__ ((interrupt_handler));
571 void IMIA0(void)
573 int i;
575 /* Run through the list of tick tasks */
576 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
578 if(tick_funcs[i])
580 tick_funcs[i]();
584 current_tick++;
586 TSR0 &= ~0x01;
588 #elif defined(CPU_COLDFIRE)
589 void tick_start(unsigned int interval_in_ms)
591 unsigned long count;
592 int prescale;
594 count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
596 if(count > 0x10000)
598 panicf("Error! The tick interval is too long (%d ms)\n",
599 interval_in_ms);
600 return;
603 prescale = cpu_frequency / CPU_FREQ;
604 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
605 changes within timer.c */
607 /* We are using timer 0 */
609 TRR0 = (unsigned short)(count - 1); /* The reference count */
610 TCN0 = 0; /* reset the timer */
611 TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
612 /* restart, CLK/16, enabled, prescaler */
614 TER0 = 0xff; /* Clear all events */
616 ICR1 = 0x8c; /* Interrupt on level 3.0 */
617 IMR &= ~0x200;
620 void TIMER0(void) __attribute__ ((interrupt_handler));
621 void TIMER0(void)
623 int i;
625 /* Run through the list of tick tasks */
626 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
628 if(tick_funcs[i])
630 tick_funcs[i]();
634 current_tick++;
636 TER0 = 0xff; /* Clear all events */
639 #elif defined(CPU_PP)
641 #ifndef BOOTLOADER
642 void TIMER1(void)
644 int i;
646 TIMER1_VAL; /* Read value to ack IRQ */
647 /* Run through the list of tick tasks (using main core) */
648 if (CURRENT_CORE == CPU)
650 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
652 if (tick_funcs[i])
654 tick_funcs[i]();
658 current_tick++;
661 #endif
663 void tick_start(unsigned int interval_in_ms)
665 #ifndef BOOTLOADER
666 if(CURRENT_CORE == CPU)
668 TIMER1_CFG = 0x0;
669 TIMER1_VAL;
670 /* enable timer */
671 TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
672 /* unmask interrupt source */
673 CPU_INT_EN = TIMER1_MASK;
674 } else {
675 COP_INT_EN = TIMER1_MASK;
677 #else
678 /* We don't enable interrupts in the bootloader */
679 (void)interval_in_ms;
680 #endif
683 #elif CONFIG_CPU == PNX0101
685 void timer_handler(void)
687 int i;
689 /* Run through the list of tick tasks */
690 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
692 if(tick_funcs[i])
693 tick_funcs[i]();
696 current_tick++;
698 TIMER0.clr = 0;
701 void tick_start(unsigned int interval_in_ms)
703 TIMER0.ctrl &= ~0x80; /* Disable the counter */
704 TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
705 TIMER0.load = 3000000 * interval_in_ms / 1000;
706 TIMER0.ctrl &= ~0xc; /* No prescaler */
707 TIMER0.clr = 1; /* Clear the interrupt request */
709 irq_set_int_handler(IRQ_TIMER0, timer_handler);
710 irq_enable_int(IRQ_TIMER0);
712 TIMER0.ctrl |= 0x80; /* Enable the counter */
714 #elif CONFIG_CPU == IMX31L
715 void tick_start(unsigned int interval_in_ms)
717 EPITCR1 &= ~0x1; /* Disable the counter */
719 EPITCR1 &= ~0xE; /* Disable interrupt, count down from 0xFFFFFFFF */
720 EPITCR1 &= ~0xFFF0; /* Clear prescaler */
721 #ifdef BOOTLOADER
722 EPITCR1 |= (2700 << 2); /* Prescaler = 2700 */
723 #endif
724 EPITCR1 &= ~(0x3 << 24);
725 EPITCR1 |= (0x2 << 24); /* Set clock source to external clock (27mhz) */
726 EPITSR1 = 1; /* Clear the interrupt request */
727 #ifndef BOOTLOADER
728 EPITLR1 = 27000000 * interval_in_ms / 1000;
729 EPITCMPR1 = 27000000 * interval_in_ms / 1000;
730 #else
731 (void)interval_in_ms;
732 #endif
734 //avic_enable_int(EPIT1, IRQ, EPIT_HANDLER);
736 EPITCR1 |= 0x1; /* Enable the counter */
739 #ifndef BOOTLOADER
740 void EPIT_HANDLER(void) __attribute__((interrupt("IRQ")));
741 void EPIT_HANDLER(void) {
742 int i;
744 /* Run through the list of tick tasks */
745 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
747 if(tick_funcs[i])
748 tick_funcs[i]();
751 current_tick++;
753 EPITSR1 = 1; /* Clear the interrupt request */
755 #endif
756 #endif
758 int tick_add_task(void (*f)(void))
760 int i;
761 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
763 /* Add a task if there is room */
764 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
766 if(tick_funcs[i] == NULL)
768 tick_funcs[i] = f;
769 set_irq_level(oldlevel);
770 return 0;
773 set_irq_level(oldlevel);
774 panicf("Error! tick_add_task(): out of tasks");
775 return -1;
778 int tick_remove_task(void (*f)(void))
780 int i;
781 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
783 /* Remove a task if it is there */
784 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
786 if(tick_funcs[i] == f)
788 tick_funcs[i] = NULL;
789 set_irq_level(oldlevel);
790 return 0;
794 set_irq_level(oldlevel);
795 return -1;
798 /****************************************************************************
799 * Tick-based interval timers/one-shots - be mindful this is not really
800 * intended for continuous timers but for events that need to run for a short
801 * time and be cancelled without further software intervention.
802 ****************************************************************************/
803 #ifdef INCLUDE_TIMEOUT_API
804 static struct timeout *tmo_list = NULL; /* list of active timeout events */
806 /* timeout tick task - calls event handlers when they expire
807 * Event handlers may alter ticks, callback and data during operation.
809 static void timeout_tick(void)
811 unsigned long tick = current_tick;
812 struct timeout *curr, *next;
814 for (curr = tmo_list; curr != NULL; curr = next)
816 next = (struct timeout *)curr->next;
818 if (TIME_BEFORE(tick, curr->expires))
819 continue;
821 /* this event has expired - call callback */
822 if (curr->callback(curr))
823 *(long *)&curr->expires = tick + curr->ticks; /* reload */
824 else
825 timeout_cancel(curr); /* cancel */
829 /* Cancels a timeout callback - can be called from the ISR */
830 void timeout_cancel(struct timeout *tmo)
832 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
834 if (tmo_list != NULL)
836 struct timeout *curr = tmo_list;
837 struct timeout *prev = NULL;
839 while (curr != tmo && curr != NULL)
841 prev = curr;
842 curr = (struct timeout *)curr->next;
845 if (curr != NULL)
847 /* in list */
848 if (prev == NULL)
849 tmo_list = (struct timeout *)curr->next;
850 else
851 *(const struct timeout **)&prev->next = curr->next;
853 if (tmo_list == NULL)
854 tick_remove_task(timeout_tick); /* last one - remove task */
856 /* not in list or tmo == NULL */
859 set_irq_level(oldlevel);
862 /* Adds a timeout callback - calling with an active timeout resets the
863 interval - can be called from the ISR */
864 void timeout_register(struct timeout *tmo, timeout_cb_type callback,
865 int ticks, intptr_t data)
867 int oldlevel;
868 struct timeout *curr;
870 if (tmo == NULL)
871 return;
873 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
875 /* see if this one is already registered */
876 curr = tmo_list;
877 while (curr != tmo && curr != NULL)
878 curr = (struct timeout *)curr->next;
880 if (curr == NULL)
882 /* not found - add it */
883 if (tmo_list == NULL)
884 tick_add_task(timeout_tick); /* first one - add task */
886 *(struct timeout **)&tmo->next = tmo_list;
887 tmo_list = tmo;
890 tmo->callback = callback;
891 tmo->ticks = ticks;
892 tmo->data = data;
893 *(long *)&tmo->expires = current_tick + ticks;
895 set_irq_level(oldlevel);
898 #endif /* INCLUDE_TIMEOUT_API */
900 #ifndef SIMULATOR
902 * Simulator versions in uisimulator/SIMVER/
905 /****************************************************************************
906 * Simple mutex functions
907 ****************************************************************************/
908 void mutex_init(struct mutex *m)
910 m->locked = false;
911 m->thread = NULL;
914 void mutex_lock(struct mutex *m)
916 if (test_and_set(&m->locked, 1))
918 /* Wait until the lock is open... */
919 block_thread(&m->thread);
923 void mutex_unlock(struct mutex *m)
925 lock_cores();
927 if (m->thread == NULL)
928 m->locked = 0;
929 else
930 wakeup_thread(&m->thread);
932 unlock_cores();
935 void spinlock_lock(struct mutex *m)
937 while (test_and_set(&m->locked, 1))
939 /* wait until the lock is open... */
940 switch_thread(true, NULL);
944 void spinlock_unlock(struct mutex *m)
946 m->locked = 0;
949 #endif /* ndef SIMULATOR */