x86-32: use non-lazy io bitmap context switching
[linux-2.6/verdex.git] / arch / x86 / kernel / irq_32.c
blob9dc6b2b24275cdc77b88fb8da2c3d7daebbf45b2
1 /*
2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
18 #include <linux/uaccess.h>
20 #include <asm/apic.h>
22 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
23 EXPORT_PER_CPU_SYMBOL(irq_stat);
25 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26 EXPORT_PER_CPU_SYMBOL(irq_regs);
28 #ifdef CONFIG_DEBUG_STACKOVERFLOW
29 /* Debugging check for stack overflow: is there less than 1KB free? */
30 static int check_stack_overflow(void)
32 long sp;
34 __asm__ __volatile__("andl %%esp,%0" :
35 "=r" (sp) : "0" (THREAD_SIZE - 1));
37 return sp < (sizeof(struct thread_info) + STACK_WARN);
40 static void print_stack_overflow(void)
42 printk(KERN_WARNING "low stack detected by irq handler\n");
43 dump_stack();
46 #else
47 static inline int check_stack_overflow(void) { return 0; }
48 static inline void print_stack_overflow(void) { }
49 #endif
51 #ifdef CONFIG_4KSTACKS
53 * per-CPU IRQ handling contexts (thread information and stack)
55 union irq_ctx {
56 struct thread_info tinfo;
57 u32 stack[THREAD_SIZE/sizeof(u32)];
60 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
61 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
63 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
64 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
66 static void call_on_stack(void *func, void *stack)
68 asm volatile("xchgl %%ebx,%%esp \n"
69 "call *%%edi \n"
70 "movl %%ebx,%%esp \n"
71 : "=b" (stack)
72 : "0" (stack),
73 "D"(func)
74 : "memory", "cc", "edx", "ecx", "eax");
77 static inline int
78 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
80 union irq_ctx *curctx, *irqctx;
81 u32 *isp, arg1, arg2;
83 curctx = (union irq_ctx *) current_thread_info();
84 irqctx = hardirq_ctx[smp_processor_id()];
87 * this is where we switch to the IRQ stack. However, if we are
88 * already using the IRQ stack (because we interrupted a hardirq
89 * handler) we can't do that and just have to keep using the
90 * current stack (which is the irq stack already after all)
92 if (unlikely(curctx == irqctx))
93 return 0;
95 /* build the stack frame on the IRQ stack */
96 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
97 irqctx->tinfo.task = curctx->tinfo.task;
98 irqctx->tinfo.previous_esp = current_stack_pointer;
101 * Copy the softirq bits in preempt_count so that the
102 * softirq checks work in the hardirq context.
104 irqctx->tinfo.preempt_count =
105 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
106 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
108 if (unlikely(overflow))
109 call_on_stack(print_stack_overflow, isp);
111 asm volatile("xchgl %%ebx,%%esp \n"
112 "call *%%edi \n"
113 "movl %%ebx,%%esp \n"
114 : "=a" (arg1), "=d" (arg2), "=b" (isp)
115 : "0" (irq), "1" (desc), "2" (isp),
116 "D" (desc->handle_irq)
117 : "memory", "cc", "ecx");
118 return 1;
122 * allocate per-cpu stacks for hardirq and for softirq processing
124 void __cpuinit irq_ctx_init(int cpu)
126 union irq_ctx *irqctx;
128 if (hardirq_ctx[cpu])
129 return;
131 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
132 irqctx->tinfo.task = NULL;
133 irqctx->tinfo.exec_domain = NULL;
134 irqctx->tinfo.cpu = cpu;
135 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
136 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
138 hardirq_ctx[cpu] = irqctx;
140 irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE];
141 irqctx->tinfo.task = NULL;
142 irqctx->tinfo.exec_domain = NULL;
143 irqctx->tinfo.cpu = cpu;
144 irqctx->tinfo.preempt_count = 0;
145 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
147 softirq_ctx[cpu] = irqctx;
149 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
150 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
153 void irq_ctx_exit(int cpu)
155 hardirq_ctx[cpu] = NULL;
158 asmlinkage void do_softirq(void)
160 unsigned long flags;
161 struct thread_info *curctx;
162 union irq_ctx *irqctx;
163 u32 *isp;
165 if (in_interrupt())
166 return;
168 local_irq_save(flags);
170 if (local_softirq_pending()) {
171 curctx = current_thread_info();
172 irqctx = softirq_ctx[smp_processor_id()];
173 irqctx->tinfo.task = curctx->task;
174 irqctx->tinfo.previous_esp = current_stack_pointer;
176 /* build the stack frame on the softirq stack */
177 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
179 call_on_stack(__do_softirq, isp);
181 * Shouldnt happen, we returned above if in_interrupt():
183 WARN_ON_ONCE(softirq_count());
186 local_irq_restore(flags);
189 #else
190 static inline int
191 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
192 #endif
194 bool handle_irq(unsigned irq, struct pt_regs *regs)
196 struct irq_desc *desc;
197 int overflow;
199 overflow = check_stack_overflow();
201 desc = irq_to_desc(irq);
202 if (unlikely(!desc))
203 return false;
205 if (!execute_on_irq_stack(overflow, desc, irq)) {
206 if (unlikely(overflow))
207 print_stack_overflow();
208 desc->handle_irq(irq, desc);
211 return true;
214 #ifdef CONFIG_HOTPLUG_CPU
216 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
217 void fixup_irqs(void)
219 unsigned int irq;
220 static int warned;
221 struct irq_desc *desc;
223 for_each_irq_desc(irq, desc) {
224 const struct cpumask *affinity;
226 if (!desc)
227 continue;
228 if (irq == 2)
229 continue;
231 affinity = desc->affinity;
232 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
233 printk("Breaking affinity for irq %i\n", irq);
234 affinity = cpu_all_mask;
236 if (desc->chip->set_affinity)
237 desc->chip->set_affinity(irq, affinity);
238 else if (desc->action && !(warned++))
239 printk("Cannot set affinity for irq %i\n", irq);
242 #if 0
243 barrier();
244 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
245 [note the nop - the interrupt-enable boundary on x86 is two
246 instructions from sti] - to flush out pending hardirqs and
247 IPIs. After this point nothing is supposed to reach this CPU." */
248 __asm__ __volatile__("sti; nop; cli");
249 barrier();
250 #else
251 /* That doesn't seem sufficient. Give it 1ms. */
252 local_irq_enable();
253 mdelay(1);
254 local_irq_disable();
255 #endif
257 #endif