1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Björn Stenberg
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
27 #if CONFIG_CPU == IMX31L
28 #include "avic-imx31.h"
31 #if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER)
32 volatile long current_tick NOCACHEDATA_ATTR
= 0;
35 void (*tick_funcs
[MAX_NUM_TICK_TASKS
])(void);
37 /* This array holds all queues that are initiated. It is used for broadcast. */
38 static struct event_queue
*all_queues
[32] NOCACHEBSS_ATTR
;
39 static int num_queues NOCACHEBSS_ATTR
;
41 /****************************************************************************
42 * Standard kernel stuff
43 ****************************************************************************/
44 void kernel_init(void)
46 /* Init the threading API */
49 /* Other processors will not reach this point in a multicore build.
50 * In a single-core build with multiple cores they fall-through and
51 * sleep in cop_main without returning. */
52 if (CURRENT_CORE
== CPU
)
54 memset(tick_funcs
, 0, sizeof(tick_funcs
));
56 memset(all_queues
, 0, sizeof(all_queues
));
63 #if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
65 TCON
&= ~(1 << 20); // stop timer 4
66 // TODO: this constant depends on dividers settings inherited from
67 // firmware. Set them explicitly somwhere.
68 TCNTB4
= 12193 * ticks
/ HZ
;
69 TCON
|= 1 << 21; // set manual bit
70 TCON
&= ~(1 << 21); // reset manual bit
71 TCON
&= ~(1 << 22); //autoreload Off
72 TCON
|= (1 << 20); // start timer 4
77 #elif defined(CPU_PP) && defined(BOOTLOADER)
78 unsigned stop
= USEC_TIMER
+ ticks
* (1000000/HZ
);
79 while (TIME_BEFORE(USEC_TIMER
, stop
))
80 switch_thread(true,NULL
);
88 #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER))
89 /* Some targets don't like yielding in the bootloader */
91 switch_thread(true, NULL
);
95 /****************************************************************************
96 * Queue handling stuff
97 ****************************************************************************/
99 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
100 /* Moves waiting thread's descriptor to the current sender when a
101 message is dequeued */
102 static void queue_fetch_sender(struct queue_sender_list
*send
,
105 struct thread_entry
**spp
= &send
->senders
[i
];
109 send
->curr_sender
= *spp
;
114 /* Puts the specified return value in the waiting thread's return value
115 * and wakes the thread.
116 * 1) A sender should be confirmed to exist before calling which makes it
117 * more efficent to reject the majority of cases that don't need this
119 * 2) Requires interrupts disabled since queue overflows can cause posts
120 * from interrupt handlers to wake threads. Not doing so could cause
121 * an attempt at multiple wakes or other problems.
123 static void queue_release_sender(struct thread_entry
**sender
,
126 (*sender
)->retval
= retval
;
127 wakeup_thread_irq_safe(sender
);
129 /* This should _never_ happen - there must never be multiple
130 threads in this list and it is a corrupt state */
132 panicf("Queue: send slot ovf");
136 /* Releases any waiting threads that are queued with queue_send -
138 * Disable IRQs before calling since it uses queue_release_sender.
140 static void queue_release_all_senders(struct event_queue
*q
)
145 for(i
= q
->read
; i
!= q
->write
; i
++)
147 struct thread_entry
**spp
=
148 &q
->send
->senders
[i
& QUEUE_LENGTH_MASK
];
152 queue_release_sender(spp
, 0);
158 /* Enables queue_send on the specified queue - caller allocates the extra
160 void queue_enable_queue_send(struct event_queue
*q
,
161 struct queue_sender_list
*send
)
164 memset(send
, 0, sizeof(struct queue_sender_list
));
166 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
169 void queue_init(struct event_queue
*q
, bool register_queue
)
174 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
175 q
->send
= NULL
; /* No message sending by default */
180 /* Add it to the all_queues array */
181 all_queues
[num_queues
++] = q
;
185 void queue_delete(struct event_queue
*q
)
190 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
192 /* Release theads waiting on queue */
193 wakeup_thread(&q
->thread
);
195 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
196 /* Release waiting threads and reply to any dequeued message
198 queue_release_all_senders(q
);
202 /* Find the queue to be deleted */
203 for(i
= 0;i
< num_queues
;i
++)
205 if(all_queues
[i
] == q
)
214 /* Move the following queues up in the list */
215 for(;i
< num_queues
-1;i
++)
217 all_queues
[i
] = all_queues
[i
+1];
223 set_irq_level(oldlevel
);
226 void queue_wait(struct event_queue
*q
, struct event
*ev
)
231 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
233 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
234 if(q
->send
&& q
->send
->curr_sender
)
237 queue_release_sender(&q
->send
->curr_sender
, 0);
241 if (q
->read
== q
->write
)
243 set_irq_level_and_block_thread(&q
->thread
, oldlevel
);
244 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
247 rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
250 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
251 if(q
->send
&& q
->send
->senders
[rd
])
253 /* Get data for a waiting thread if one */
254 queue_fetch_sender(q
->send
, rd
);
258 set_irq_level(oldlevel
);
261 void queue_wait_w_tmo(struct event_queue
*q
, struct event
*ev
, int ticks
)
263 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
265 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
266 if (q
->send
&& q
->send
->curr_sender
)
269 queue_release_sender(&q
->send
->curr_sender
, 0);
273 if (q
->read
== q
->write
&& ticks
> 0)
275 set_irq_level_and_block_thread_w_tmo(&q
->thread
, ticks
, oldlevel
);
276 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
279 if (q
->read
!= q
->write
)
281 unsigned int rd
= q
->read
++ & QUEUE_LENGTH_MASK
;
284 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
285 if(q
->send
&& q
->send
->senders
[rd
])
287 /* Get data for a waiting thread if one */
288 queue_fetch_sender(q
->send
, rd
);
294 ev
->id
= SYS_TIMEOUT
;
297 set_irq_level(oldlevel
);
300 void queue_post(struct event_queue
*q
, long id
, intptr_t data
)
302 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
305 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
307 q
->events
[wr
].id
= id
;
308 q
->events
[wr
].data
= data
;
310 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
313 struct thread_entry
**spp
= &q
->send
->senders
[wr
];
317 /* overflow protect - unblock any thread waiting at this index */
318 queue_release_sender(spp
, 0);
323 wakeup_thread_irq_safe(&q
->thread
);
324 set_irq_level(oldlevel
);
328 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
329 /* No wakeup_thread_irq_safe here because IRQ handlers are not allowed
330 use of this function - we only aim to protect the queue integrity by
332 intptr_t queue_send(struct event_queue
*q
, long id
, intptr_t data
)
334 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
337 wr
= q
->write
++ & QUEUE_LENGTH_MASK
;
339 q
->events
[wr
].id
= id
;
340 q
->events
[wr
].data
= data
;
344 struct thread_entry
**spp
= &q
->send
->senders
[wr
];
348 /* overflow protect - unblock any thread waiting at this index */
349 queue_release_sender(spp
, 0);
352 wakeup_thread(&q
->thread
);
353 set_irq_level_and_block_thread(spp
, oldlevel
);
354 return thread_get_current()->retval
;
357 /* Function as queue_post if sending is not enabled */
358 wakeup_thread(&q
->thread
);
359 set_irq_level(oldlevel
);
364 #if 0 /* not used now but probably will be later */
365 /* Query if the last message dequeued was added by queue_send or not */
366 bool queue_in_queue_send(struct event_queue
*q
)
368 return q
->send
&& q
->send
->curr_sender
;
372 /* Replies with retval to any dequeued message sent with queue_send */
373 void queue_reply(struct event_queue
*q
, intptr_t retval
)
375 /* No IRQ lock here since IRQs cannot change this */
376 if(q
->send
&& q
->send
->curr_sender
)
378 queue_release_sender(&q
->send
->curr_sender
, retval
);
381 #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
383 bool queue_empty(const struct event_queue
* q
)
385 return ( q
->read
== q
->write
);
388 void queue_clear(struct event_queue
* q
)
390 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
392 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
393 /* Release all thread waiting in the queue for a reply -
394 dequeued sent message will be handled by owning thread */
395 queue_release_all_senders(q
);
401 set_irq_level(oldlevel
);
404 void queue_remove_from_head(struct event_queue
*q
, long id
)
406 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
408 while(q
->read
!= q
->write
)
410 unsigned int rd
= q
->read
& QUEUE_LENGTH_MASK
;
412 if(q
->events
[rd
].id
!= id
)
417 #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
420 struct thread_entry
**spp
= &q
->send
->senders
[rd
];
424 /* Release any thread waiting on this message */
425 queue_release_sender(spp
, 0);
432 set_irq_level(oldlevel
);
436 * The number of events waiting in the queue.
438 * @param struct of event_queue
439 * @return number of events in the queue
441 int queue_count(const struct event_queue
*q
)
443 return q
->write
- q
->read
;
446 int queue_broadcast(long id
, intptr_t data
)
450 for(i
= 0;i
< num_queues
;i
++)
452 queue_post(all_queues
[i
], id
, data
);
458 /****************************************************************************
460 ****************************************************************************/
461 #if CONFIG_CPU == SH7034
462 void tick_start(unsigned int interval_in_ms
)
466 count
= CPU_FREQ
* interval_in_ms
/ 1000 / 8;
470 panicf("Error! The tick interval is too long (%d ms)\n",
475 /* We are using timer 0 */
477 TSTR
&= ~0x01; /* Stop the timer */
478 TSNC
&= ~0x01; /* No synchronization */
479 TMDR
&= ~0x01; /* Operate normally */
481 TCNT0
= 0; /* Start counting at 0 */
482 GRA0
= (unsigned short)(count
- 1);
483 TCR0
= 0x23; /* Clear at GRA match, sysclock/8 */
485 /* Enable interrupt on level 1 */
486 IPRC
= (IPRC
& ~0x00f0) | 0x0010;
489 TIER0
= 0xf9; /* Enable GRA match interrupt */
491 TSTR
|= 0x01; /* Start timer 1 */
494 void IMIA0(void) __attribute__ ((interrupt_handler
));
499 /* Run through the list of tick tasks */
500 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
512 #elif defined(CPU_COLDFIRE)
513 void tick_start(unsigned int interval_in_ms
)
518 count
= CPU_FREQ
/2 * interval_in_ms
/ 1000 / 16;
522 panicf("Error! The tick interval is too long (%d ms)\n",
527 prescale
= cpu_frequency
/ CPU_FREQ
;
528 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
529 changes within timer.c */
531 /* We are using timer 0 */
533 TRR0
= (unsigned short)(count
- 1); /* The reference count */
534 TCN0
= 0; /* reset the timer */
535 TMR0
= 0x001d | ((unsigned short)(prescale
- 1) << 8);
536 /* restart, CLK/16, enabled, prescaler */
538 TER0
= 0xff; /* Clear all events */
540 ICR1
= 0x8c; /* Interrupt on level 3.0 */
544 void TIMER0(void) __attribute__ ((interrupt_handler
));
549 /* Run through the list of tick tasks */
550 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
560 TER0
= 0xff; /* Clear all events */
563 #elif defined(CPU_PP)
570 TIMER1_VAL
; /* Read value to ack IRQ */
572 /* Run through the list of tick tasks using main CPU core -
573 wake up the COP through its control interface to provide pulse */
574 for (i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
585 /* If COP is sleeping - give it a kick */
586 /* TODO: Use a mailbox in addition to make sure it doesn't go to
587 * sleep if kicked just as it's headed to rest to make sure its
588 * tick checks won't be jittery. Don't bother at all if it owns no
590 unsigned int cop_ctl
;
593 if (cop_ctl
& PROC_SLEEP
)
595 COP_CTL
= cop_ctl
& ~PROC_SLEEP
;
601 #endif /* NUM_CORES */
607 /* Must be last function called init kernel/thread initialization */
608 void tick_start(unsigned int interval_in_ms
)
614 TIMER1_CFG
= 0xc0000000 | (interval_in_ms
*1000 - 1);
615 /* unmask interrupt source */
616 CPU_INT_EN
= TIMER1_MASK
;
618 /* We don't enable interrupts in the bootloader */
619 (void)interval_in_ms
;
623 #elif CONFIG_CPU == PNX0101
625 void timer_handler(void)
629 /* Run through the list of tick tasks */
630 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
641 void tick_start(unsigned int interval_in_ms
)
643 TIMER0
.ctrl
&= ~0x80; /* Disable the counter */
644 TIMER0
.ctrl
|= 0x40; /* Reload after counting down to zero */
645 TIMER0
.load
= 3000000 * interval_in_ms
/ 1000;
646 TIMER0
.ctrl
&= ~0xc; /* No prescaler */
647 TIMER0
.clr
= 1; /* Clear the interrupt request */
649 irq_set_int_handler(IRQ_TIMER0
, timer_handler
);
650 irq_enable_int(IRQ_TIMER0
);
652 TIMER0
.ctrl
|= 0x80; /* Enable the counter */
654 #elif CONFIG_CPU == IMX31L
655 void tick_start(unsigned int interval_in_ms
)
657 EPITCR1
&= ~0x1; /* Disable the counter */
659 EPITCR1
&= ~0xE; /* Disable interrupt, count down from 0xFFFFFFFF */
660 EPITCR1
&= ~0xFFF0; /* Clear prescaler */
662 EPITCR1
|= (2700 << 2); /* Prescaler = 2700 */
664 EPITCR1
&= ~(0x3 << 24);
665 EPITCR1
|= (0x2 << 24); /* Set clock source to external clock (27mhz) */
666 EPITSR1
= 1; /* Clear the interrupt request */
668 EPITLR1
= 27000000 * interval_in_ms
/ 1000;
669 EPITCMPR1
= 27000000 * interval_in_ms
/ 1000;
671 (void)interval_in_ms
;
674 //avic_enable_int(EPIT1, IRQ, EPIT_HANDLER);
676 EPITCR1
|= 0x1; /* Enable the counter */
680 void EPIT_HANDLER(void) __attribute__((interrupt("IRQ")));
681 void EPIT_HANDLER(void) {
684 /* Run through the list of tick tasks */
685 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
693 EPITSR1
= 1; /* Clear the interrupt request */
698 int tick_add_task(void (*f
)(void))
701 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
703 /* Add a task if there is room */
704 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
706 if(tick_funcs
[i
] == NULL
)
709 set_irq_level(oldlevel
);
713 set_irq_level(oldlevel
);
714 panicf("Error! tick_add_task(): out of tasks");
718 int tick_remove_task(void (*f
)(void))
721 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
723 /* Remove a task if it is there */
724 for(i
= 0;i
< MAX_NUM_TICK_TASKS
;i
++)
726 if(tick_funcs
[i
] == f
)
728 tick_funcs
[i
] = NULL
;
729 set_irq_level(oldlevel
);
734 set_irq_level(oldlevel
);
738 /****************************************************************************
739 * Tick-based interval timers/one-shots - be mindful this is not really
740 * intended for continuous timers but for events that need to run for a short
741 * time and be cancelled without further software intervention.
742 ****************************************************************************/
743 #ifdef INCLUDE_TIMEOUT_API
744 static struct timeout
*tmo_list
= NULL
; /* list of active timeout events */
746 /* timeout tick task - calls event handlers when they expire
747 * Event handlers may alter ticks, callback and data during operation.
749 static void timeout_tick(void)
751 unsigned long tick
= current_tick
;
752 struct timeout
*curr
, *next
;
754 for (curr
= tmo_list
; curr
!= NULL
; curr
= next
)
756 next
= (struct timeout
*)curr
->next
;
758 if (TIME_BEFORE(tick
, curr
->expires
))
761 /* this event has expired - call callback */
762 if (curr
->callback(curr
))
763 *(long *)&curr
->expires
= tick
+ curr
->ticks
; /* reload */
765 timeout_cancel(curr
); /* cancel */
769 /* Cancels a timeout callback - can be called from the ISR */
770 void timeout_cancel(struct timeout
*tmo
)
772 int oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
774 if (tmo_list
!= NULL
)
776 struct timeout
*curr
= tmo_list
;
777 struct timeout
*prev
= NULL
;
779 while (curr
!= tmo
&& curr
!= NULL
)
782 curr
= (struct timeout
*)curr
->next
;
789 tmo_list
= (struct timeout
*)curr
->next
;
791 *(const struct timeout
**)&prev
->next
= curr
->next
;
793 if (tmo_list
== NULL
)
794 tick_remove_task(timeout_tick
); /* last one - remove task */
796 /* not in list or tmo == NULL */
799 set_irq_level(oldlevel
);
802 /* Adds a timeout callback - calling with an active timeout resets the
803 interval - can be called from the ISR */
804 void timeout_register(struct timeout
*tmo
, timeout_cb_type callback
,
805 int ticks
, intptr_t data
)
808 struct timeout
*curr
;
813 oldlevel
= set_irq_level(HIGHEST_IRQ_LEVEL
);
815 /* see if this one is already registered */
817 while (curr
!= tmo
&& curr
!= NULL
)
818 curr
= (struct timeout
*)curr
->next
;
822 /* not found - add it */
823 if (tmo_list
== NULL
)
824 tick_add_task(timeout_tick
); /* first one - add task */
826 *(struct timeout
**)&tmo
->next
= tmo_list
;
830 tmo
->callback
= callback
;
833 *(long *)&tmo
->expires
= current_tick
+ ticks
;
835 set_irq_level(oldlevel
);
838 #endif /* INCLUDE_TIMEOUT_API */
842 * Simulator versions in uisimulator/SIMVER/
845 /****************************************************************************
846 * Simple mutex functions
847 ****************************************************************************/
848 void mutex_init(struct mutex
*m
)
854 void mutex_lock(struct mutex
*m
)
856 if (test_and_set(&m
->locked
, 1))
858 /* Wait until the lock is open... */
859 block_thread(&m
->thread
);
863 void mutex_unlock(struct mutex
*m
)
865 if (m
->thread
== NULL
)
868 wakeup_thread(&m
->thread
);
871 void spinlock_lock(struct mutex
*m
)
873 while (test_and_set(&m
->locked
, 1))
875 /* wait until the lock is open... */
876 switch_thread(true, NULL
);
880 void spinlock_unlock(struct mutex
*m
)
885 #endif /* ndef SIMULATOR */