2 * linux/arch/blackfin/kernel/ipipe.c
4 * Copyright (C) 2005-2007 Philippe Gerum.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9 * USA; either version 2 of the License, or (at your option) any later
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * Architecture-dependent I-pipe support for the Blackfin.
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/module.h>
27 #include <linux/interrupt.h>
28 #include <linux/percpu.h>
29 #include <linux/bitops.h>
30 #include <linux/slab.h>
31 #include <linux/errno.h>
32 #include <linux/kthread.h>
33 #include <asm/unistd.h>
34 #include <asm/system.h>
35 #include <asm/atomic.h>
38 static int create_irq_threads
;
40 DEFINE_PER_CPU(struct pt_regs
, __ipipe_tick_regs
);
42 static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask
);
44 static DEFINE_PER_CPU(int [IVG13
+ 1], pending_irq_count
);
46 asmlinkage
void asm_do_IRQ(unsigned int irq
, struct pt_regs
*regs
);
48 static void __ipipe_no_irqtail(void);
50 unsigned long __ipipe_irq_tail_hook
= (unsigned long)&__ipipe_no_irqtail
;
51 EXPORT_SYMBOL(__ipipe_irq_tail_hook
);
53 unsigned long __ipipe_core_clock
;
54 EXPORT_SYMBOL(__ipipe_core_clock
);
56 unsigned long __ipipe_freq_scale
;
57 EXPORT_SYMBOL(__ipipe_freq_scale
);
59 atomic_t __ipipe_irq_lvdepth
[IVG15
+ 1];
61 unsigned long __ipipe_irq_lvmask
= __all_masked_irq_flags
;
62 EXPORT_SYMBOL(__ipipe_irq_lvmask
);
64 static void __ipipe_ack_irq(unsigned irq
, struct irq_desc
*desc
)
66 desc
->ipipe_ack(irq
, desc
);
70 * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
71 * interrupts are off, and secondary CPUs are still lost in space.
73 void __ipipe_enable_pipeline(void)
77 __ipipe_core_clock
= get_cclk(); /* Fetch this once. */
78 __ipipe_freq_scale
= 1000000000UL / __ipipe_core_clock
;
80 for (irq
= 0; irq
< NR_IRQS
; ++irq
)
81 ipipe_virtualize_irq(ipipe_root_domain
,
83 (ipipe_irq_handler_t
)&asm_do_IRQ
,
86 IPIPE_HANDLE_MASK
| IPIPE_PASS_MASK
);
90 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
91 * interrupt protection log is maintained here for each domain. Hw
92 * interrupts are masked on entry.
94 void __ipipe_handle_irq(unsigned irq
, struct pt_regs
*regs
)
96 struct ipipe_domain
*this_domain
, *next_domain
;
97 struct list_head
*head
, *pos
;
101 * Software-triggered IRQs do not need any ack. The contents
102 * of the register frame should only be used when processing
103 * the timer interrupt, but not for handling any other
106 m_ack
= (regs
== NULL
|| irq
== IRQ_SYSTMR
|| irq
== IRQ_CORETMR
);
108 this_domain
= ipipe_current_domain
;
110 if (unlikely(test_bit(IPIPE_STICKY_FLAG
, &this_domain
->irqs
[irq
].control
)))
111 head
= &this_domain
->p_link
;
113 head
= __ipipe_pipeline
.next
;
114 next_domain
= list_entry(head
, struct ipipe_domain
, p_link
);
115 if (likely(test_bit(IPIPE_WIRED_FLAG
, &next_domain
->irqs
[irq
].control
))) {
116 if (!m_ack
&& next_domain
->irqs
[irq
].acknowledge
!= NULL
)
117 next_domain
->irqs
[irq
].acknowledge(irq
, irq_desc
+ irq
);
118 if (test_bit(IPIPE_ROOTLOCK_FLAG
, &ipipe_root_domain
->flags
))
119 s
= __test_and_set_bit(IPIPE_STALL_FLAG
,
120 &ipipe_root_cpudom_var(status
));
121 __ipipe_dispatch_wired(next_domain
, irq
);
127 /* Ack the interrupt. */
131 while (pos
!= &__ipipe_pipeline
) {
132 next_domain
= list_entry(pos
, struct ipipe_domain
, p_link
);
134 * For each domain handling the incoming IRQ, mark it
135 * as pending in its log.
137 if (test_bit(IPIPE_HANDLE_FLAG
, &next_domain
->irqs
[irq
].control
)) {
139 * Domains that handle this IRQ are polled for
140 * acknowledging it by decreasing priority
141 * order. The interrupt must be made pending
142 * _first_ in the domain's status flags before
143 * the PIC is unlocked.
145 __ipipe_set_irq_pending(next_domain
, irq
);
147 if (!m_ack
&& next_domain
->irqs
[irq
].acknowledge
!= NULL
) {
148 next_domain
->irqs
[irq
].acknowledge(irq
, irq_desc
+ irq
);
154 * If the domain does not want the IRQ to be passed
155 * down the interrupt pipe, exit the loop now.
157 if (!test_bit(IPIPE_PASS_FLAG
, &next_domain
->irqs
[irq
].control
))
160 pos
= next_domain
->p_link
.next
;
164 * Now walk the pipeline, yielding control to the highest
165 * priority domain that has pending interrupt(s) or
166 * immediately to the current domain if the interrupt has been
167 * marked as 'sticky'. This search does not go beyond the
168 * current domain in the pipeline. We also enforce the
169 * additional root stage lock (blackfin-specific). */
171 if (test_bit(IPIPE_ROOTLOCK_FLAG
, &ipipe_root_domain
->flags
))
172 s
= __test_and_set_bit(IPIPE_STALL_FLAG
,
173 &ipipe_root_cpudom_var(status
));
176 __ipipe_walk_pipeline(head
);
179 __clear_bit(IPIPE_STALL_FLAG
,
180 &ipipe_root_cpudom_var(status
));
183 int __ipipe_check_root(void)
185 return ipipe_root_domain_p
;
188 void __ipipe_enable_irqdesc(struct ipipe_domain
*ipd
, unsigned irq
)
190 struct irq_desc
*desc
= irq_desc
+ irq
;
191 int prio
= desc
->ic_prio
;
194 if (ipd
!= &ipipe_root
&&
195 atomic_inc_return(&__ipipe_irq_lvdepth
[prio
]) == 1)
196 __set_bit(prio
, &__ipipe_irq_lvmask
);
198 EXPORT_SYMBOL(__ipipe_enable_irqdesc
);
200 void __ipipe_disable_irqdesc(struct ipipe_domain
*ipd
, unsigned irq
)
202 struct irq_desc
*desc
= irq_desc
+ irq
;
203 int prio
= desc
->ic_prio
;
205 if (ipd
!= &ipipe_root
&&
206 atomic_dec_and_test(&__ipipe_irq_lvdepth
[prio
]))
207 __clear_bit(prio
, &__ipipe_irq_lvmask
);
209 EXPORT_SYMBOL(__ipipe_disable_irqdesc
);
211 void __ipipe_stall_root_raw(void)
214 * This code is called by the ins{bwl} routines (see
215 * arch/blackfin/lib/ins.S), which are heavily used by the
216 * network stack. It masks all interrupts but those handled by
217 * non-root domains, so that we keep decent network transfer
218 * rates for Linux without inducing pathological jitter for
219 * the real-time domain.
221 __asm__
__volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask
));
223 __set_bit(IPIPE_STALL_FLAG
,
224 &ipipe_root_cpudom_var(status
));
227 void __ipipe_unstall_root_raw(void)
229 __clear_bit(IPIPE_STALL_FLAG
,
230 &ipipe_root_cpudom_var(status
));
232 __asm__
__volatile__ ("sti %0;" : : "d"(bfin_irq_flags
));
235 int __ipipe_syscall_root(struct pt_regs
*regs
)
239 /* We need to run the IRQ tail hook whenever we don't
240 * propagate a syscall to higher domains, because we know that
241 * important operations might be pending there (e.g. Xenomai
242 * deferred rescheduling). */
244 if (!__ipipe_syscall_watched_p(current
, regs
->orig_p0
)) {
245 void (*hook
)(void) = (void (*)(void))__ipipe_irq_tail_hook
;
251 * This routine either returns:
252 * 0 -- if the syscall is to be passed to Linux;
253 * 1 -- if the syscall should not be passed to Linux, and no
254 * tail work should be performed;
255 * -1 -- if the syscall should not be passed to Linux but the
256 * tail work has to be performed (for handling signals etc).
259 if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL
) &&
260 __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL
, regs
) > 0) {
261 if (ipipe_root_domain_p
&& !in_atomic()) {
263 * Sync pending VIRQs before _TIF_NEED_RESCHED
266 local_irq_save_hw(flags
);
267 if ((ipipe_root_cpudom_var(irqpend_himask
) & IPIPE_IRQMASK_VIRT
) != 0)
268 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT
);
269 local_irq_restore_hw(flags
);
278 unsigned long ipipe_critical_enter(void (*syncfn
) (void))
282 local_irq_save_hw(flags
);
287 void ipipe_critical_exit(unsigned long flags
)
289 local_irq_restore_hw(flags
);
292 static void __ipipe_no_irqtail(void)
296 int ipipe_get_sysinfo(struct ipipe_sysinfo
*info
)
298 info
->ncpus
= num_online_cpus();
299 info
->cpufreq
= ipipe_cpu_freq();
300 info
->archdep
.tmirq
= IPIPE_TIMER_IRQ
;
301 info
->archdep
.tmfreq
= info
->cpufreq
;
307 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
308 * just like if it has been actually received from a hw source. Also
309 * works for virtual interrupts.
311 int ipipe_trigger_irq(unsigned irq
)
315 if (irq
>= IPIPE_NR_IRQS
||
316 (ipipe_virtual_irq_p(irq
)
317 && !test_bit(irq
- IPIPE_VIRQ_BASE
, &__ipipe_virtual_irq_map
)))
320 local_irq_save_hw(flags
);
322 __ipipe_handle_irq(irq
, NULL
);
324 local_irq_restore_hw(flags
);
329 /* Move Linux IRQ to threads. */
331 static int do_irqd(void *__desc
)
333 struct irq_desc
*desc
= __desc
;
334 unsigned irq
= desc
- irq_desc
;
335 int thrprio
= desc
->thr_prio
;
336 int thrmask
= 1 << thrprio
;
337 int cpu
= smp_processor_id();
340 sigfillset(¤t
->blocked
);
341 current
->flags
|= PF_NOFREEZE
;
342 cpumask
= cpumask_of_cpu(cpu
);
343 set_cpus_allowed(current
, cpumask
);
344 ipipe_setscheduler_root(current
, SCHED_FIFO
, 50 + thrprio
);
346 while (!kthread_should_stop()) {
348 if (!(desc
->status
& IRQ_SCHEDULED
)) {
349 set_current_state(TASK_INTERRUPTIBLE
);
355 __set_current_state(TASK_RUNNING
);
357 * If higher priority interrupt servers are ready to
358 * run, reschedule immediately. We need this for the
359 * GPIO demux IRQ handler to unmask the interrupt line
360 * _last_, after all GPIO IRQs have run.
362 if (per_cpu(pending_irqthread_mask
, cpu
) & ~(thrmask
|(thrmask
-1)))
364 if (--per_cpu(pending_irq_count
[thrprio
], cpu
) == 0)
365 per_cpu(pending_irqthread_mask
, cpu
) &= ~thrmask
;
366 desc
->status
&= ~IRQ_SCHEDULED
;
367 desc
->thr_handler(irq
, &__raw_get_cpu_var(__ipipe_tick_regs
));
370 __set_current_state(TASK_RUNNING
);
374 static void kick_irqd(unsigned irq
, void *cookie
)
376 struct irq_desc
*desc
= irq_desc
+ irq
;
377 int thrprio
= desc
->thr_prio
;
378 int thrmask
= 1 << thrprio
;
379 int cpu
= smp_processor_id();
381 if (!(desc
->status
& IRQ_SCHEDULED
)) {
382 desc
->status
|= IRQ_SCHEDULED
;
383 per_cpu(pending_irqthread_mask
, cpu
) |= thrmask
;
384 ++per_cpu(pending_irq_count
[thrprio
], cpu
);
385 wake_up_process(desc
->thread
);
389 int ipipe_start_irq_thread(unsigned irq
, struct irq_desc
*desc
)
391 if (desc
->thread
|| !create_irq_threads
)
394 desc
->thread
= kthread_create(do_irqd
, desc
, "IRQ %d", irq
);
395 if (desc
->thread
== NULL
) {
396 printk(KERN_ERR
"irqd: could not create IRQ thread %d!\n", irq
);
400 wake_up_process(desc
->thread
);
402 desc
->thr_handler
= ipipe_root_domain
->irqs
[irq
].handler
;
403 ipipe_root_domain
->irqs
[irq
].handler
= &kick_irqd
;
408 void __init
ipipe_init_irq_threads(void)
411 struct irq_desc
*desc
;
413 create_irq_threads
= 1;
415 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
416 desc
= irq_desc
+ irq
;
417 if (desc
->action
!= NULL
||
418 (desc
->status
& IRQ_NOREQUEST
) != 0)
419 ipipe_start_irq_thread(irq
, desc
);
423 EXPORT_SYMBOL(show_stack
);
425 #ifdef CONFIG_IPIPE_TRACE_MCOUNT
426 void notrace
_mcount(void);
427 EXPORT_SYMBOL(_mcount
);
428 #endif /* CONFIG_IPIPE_TRACE_MCOUNT */