Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfashe...
[linux-2.6.22.y-op.git] / include / linux / interrupt.h
blobe36e86c869fb4e6576e3c67fd1febd0d16ba1ea2
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/hardirq.h>
12 #include <linux/sched.h>
13 #include <linux/irqflags.h>
14 #include <linux/bottom_half.h>
15 #include <asm/atomic.h>
16 #include <asm/ptrace.h>
17 #include <asm/system.h>
20 * These correspond to the IORESOURCE_IRQ_* defines in
21 * linux/ioport.h to select the interrupt line behaviour. When
22 * requesting an interrupt without specifying a IRQF_TRIGGER, the
23 * setting should be assumed to be "as already configured", which
24 * may be as per machine or firmware initialisation.
26 #define IRQF_TRIGGER_NONE 0x00000000
27 #define IRQF_TRIGGER_RISING 0x00000001
28 #define IRQF_TRIGGER_FALLING 0x00000002
29 #define IRQF_TRIGGER_HIGH 0x00000004
30 #define IRQF_TRIGGER_LOW 0x00000008
31 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
32 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
33 #define IRQF_TRIGGER_PROBE 0x00000010
36 * These flags used only by the kernel as part of the
37 * irq handling routines.
39 * IRQF_DISABLED - keep irqs disabled when calling the action handler
40 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
41 * IRQF_SHARED - allow sharing the irq among several devices
42 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
43 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 #define IRQF_DISABLED 0x00000020
46 #define IRQF_SAMPLE_RANDOM 0x00000040
47 #define IRQF_SHARED 0x00000080
48 #define IRQF_PROBE_SHARED 0x00000100
49 #define IRQF_TIMER 0x00000200
50 #define IRQF_PERCPU 0x00000400
53 * Migration helpers. Scheduled for removal in 1/2007
54 * Do not use for new code !
56 #define SA_INTERRUPT IRQF_DISABLED
57 #define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM
58 #define SA_SHIRQ IRQF_SHARED
59 #define SA_PROBEIRQ IRQF_PROBE_SHARED
60 #define SA_PERCPU IRQF_PERCPU
62 #define SA_TRIGGER_LOW IRQF_TRIGGER_LOW
63 #define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH
64 #define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING
65 #define SA_TRIGGER_RISING IRQF_TRIGGER_RISING
66 #define SA_TRIGGER_MASK IRQF_TRIGGER_MASK
68 typedef irqreturn_t (*irq_handler_t)(int, void *);
70 struct irqaction {
71 irq_handler_t handler;
72 unsigned long flags;
73 cpumask_t mask;
74 const char *name;
75 void *dev_id;
76 struct irqaction *next;
77 int irq;
78 struct proc_dir_entry *dir;
81 extern irqreturn_t no_action(int cpl, void *dev_id);
82 extern int request_irq(unsigned int, irq_handler_t handler,
83 unsigned long, const char *, void *);
84 extern void free_irq(unsigned int, void *);
87 * On lockdep we dont want to enable hardirqs in hardirq
88 * context. Use local_irq_enable_in_hardirq() to annotate
89 * kernel code that has to do this nevertheless (pretty much
90 * the only valid case is for old/broken hardware that is
91 * insanely slow).
93 * NOTE: in theory this might break fragile code that relies
94 * on hardirq delivery - in practice we dont seem to have such
95 * places left. So the only effect should be slightly increased
96 * irqs-off latencies.
98 #ifdef CONFIG_LOCKDEP
99 # define local_irq_enable_in_hardirq() do { } while (0)
100 #else
101 # define local_irq_enable_in_hardirq() local_irq_enable()
102 #endif
104 #ifdef CONFIG_GENERIC_HARDIRQS
105 extern void disable_irq_nosync(unsigned int irq);
106 extern void disable_irq(unsigned int irq);
107 extern void enable_irq(unsigned int irq);
110 * Special lockdep variants of irq disabling/enabling.
111 * These should be used for locking constructs that
112 * know that a particular irq context which is disabled,
113 * and which is the only irq-context user of a lock,
114 * that it's safe to take the lock in the irq-disabled
115 * section without disabling hardirqs.
117 * On !CONFIG_LOCKDEP they are equivalent to the normal
118 * irq disable/enable methods.
120 static inline void disable_irq_nosync_lockdep(unsigned int irq)
122 disable_irq_nosync(irq);
123 #ifdef CONFIG_LOCKDEP
124 local_irq_disable();
125 #endif
128 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
130 disable_irq_nosync(irq);
131 #ifdef CONFIG_LOCKDEP
132 local_irq_save(*flags);
133 #endif
136 static inline void disable_irq_lockdep(unsigned int irq)
138 disable_irq(irq);
139 #ifdef CONFIG_LOCKDEP
140 local_irq_disable();
141 #endif
144 static inline void enable_irq_lockdep(unsigned int irq)
146 #ifdef CONFIG_LOCKDEP
147 local_irq_enable();
148 #endif
149 enable_irq(irq);
152 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
154 #ifdef CONFIG_LOCKDEP
155 local_irq_restore(*flags);
156 #endif
157 enable_irq(irq);
160 /* IRQ wakeup (PM) control: */
161 extern int set_irq_wake(unsigned int irq, unsigned int on);
163 static inline int enable_irq_wake(unsigned int irq)
165 return set_irq_wake(irq, 1);
168 static inline int disable_irq_wake(unsigned int irq)
170 return set_irq_wake(irq, 0);
173 #else /* !CONFIG_GENERIC_HARDIRQS */
175 * NOTE: non-genirq architectures, if they want to support the lock
176 * validator need to define the methods below in their asm/irq.h
177 * files, under an #ifdef CONFIG_LOCKDEP section.
179 # ifndef CONFIG_LOCKDEP
180 # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
181 # define disable_irq_lockdep(irq) disable_irq(irq)
182 # define enable_irq_lockdep(irq) enable_irq(irq)
183 # endif
185 #endif /* CONFIG_GENERIC_HARDIRQS */
187 #ifndef __ARCH_SET_SOFTIRQ_PENDING
188 #define set_softirq_pending(x) (local_softirq_pending() = (x))
189 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
190 #endif
193 * Temporary defines for UP kernels, until all code gets fixed.
195 #ifndef CONFIG_SMP
196 static inline void __deprecated cli(void)
198 local_irq_disable();
200 static inline void __deprecated sti(void)
202 local_irq_enable();
204 static inline void __deprecated save_flags(unsigned long *x)
206 local_save_flags(*x);
208 #define save_flags(x) save_flags(&x)
209 static inline void __deprecated restore_flags(unsigned long x)
211 local_irq_restore(x);
214 static inline void __deprecated save_and_cli(unsigned long *x)
216 local_irq_save(*x);
218 #define save_and_cli(x) save_and_cli(&x)
219 #endif /* CONFIG_SMP */
221 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
222 frequency threaded job scheduling. For almost all the purposes
223 tasklets are more than enough. F.e. all serial device BHs et
224 al. should be converted to tasklets, not to softirqs.
227 enum
229 HI_SOFTIRQ=0,
230 TIMER_SOFTIRQ,
231 NET_TX_SOFTIRQ,
232 NET_RX_SOFTIRQ,
233 BLOCK_SOFTIRQ,
234 TASKLET_SOFTIRQ,
235 SCHED_SOFTIRQ,
238 /* softirq mask and active fields moved to irq_cpustat_t in
239 * asm/hardirq.h to get better cache usage. KAO
242 struct softirq_action
244 void (*action)(struct softirq_action *);
245 void *data;
248 asmlinkage void do_softirq(void);
249 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
250 extern void softirq_init(void);
251 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
252 extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
253 extern void FASTCALL(raise_softirq(unsigned int nr));
256 /* Tasklets --- multithreaded analogue of BHs.
258 Main feature differing them of generic softirqs: tasklet
259 is running only on one CPU simultaneously.
261 Main feature differing them of BHs: different tasklets
262 may be run simultaneously on different CPUs.
264 Properties:
265 * If tasklet_schedule() is called, then tasklet is guaranteed
266 to be executed on some cpu at least once after this.
267 * If the tasklet is already scheduled, but its excecution is still not
268 started, it will be executed only once.
269 * If this tasklet is already running on another CPU (or schedule is called
270 from tasklet itself), it is rescheduled for later.
271 * Tasklet is strictly serialized wrt itself, but not
272 wrt another tasklets. If client needs some intertask synchronization,
273 he makes it with spinlocks.
276 struct tasklet_struct
278 struct tasklet_struct *next;
279 unsigned long state;
280 atomic_t count;
281 void (*func)(unsigned long);
282 unsigned long data;
285 #define DECLARE_TASKLET(name, func, data) \
286 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
288 #define DECLARE_TASKLET_DISABLED(name, func, data) \
289 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
292 enum
294 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
295 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
298 #ifdef CONFIG_SMP
299 static inline int tasklet_trylock(struct tasklet_struct *t)
301 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
304 static inline void tasklet_unlock(struct tasklet_struct *t)
306 smp_mb__before_clear_bit();
307 clear_bit(TASKLET_STATE_RUN, &(t)->state);
310 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
312 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
314 #else
315 #define tasklet_trylock(t) 1
316 #define tasklet_unlock_wait(t) do { } while (0)
317 #define tasklet_unlock(t) do { } while (0)
318 #endif
320 extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
322 static inline void tasklet_schedule(struct tasklet_struct *t)
324 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
325 __tasklet_schedule(t);
328 extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
330 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
332 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
333 __tasklet_hi_schedule(t);
337 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
339 atomic_inc(&t->count);
340 smp_mb__after_atomic_inc();
343 static inline void tasklet_disable(struct tasklet_struct *t)
345 tasklet_disable_nosync(t);
346 tasklet_unlock_wait(t);
347 smp_mb();
350 static inline void tasklet_enable(struct tasklet_struct *t)
352 smp_mb__before_atomic_dec();
353 atomic_dec(&t->count);
356 static inline void tasklet_hi_enable(struct tasklet_struct *t)
358 smp_mb__before_atomic_dec();
359 atomic_dec(&t->count);
362 extern void tasklet_kill(struct tasklet_struct *t);
363 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
364 extern void tasklet_init(struct tasklet_struct *t,
365 void (*func)(unsigned long), unsigned long data);
368 * Autoprobing for irqs:
370 * probe_irq_on() and probe_irq_off() provide robust primitives
371 * for accurate IRQ probing during kernel initialization. They are
372 * reasonably simple to use, are not "fooled" by spurious interrupts,
373 * and, unlike other attempts at IRQ probing, they do not get hung on
374 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
376 * For reasonably foolproof probing, use them as follows:
378 * 1. clear and/or mask the device's internal interrupt.
379 * 2. sti();
380 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
381 * 4. enable the device and cause it to trigger an interrupt.
382 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
383 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
384 * 7. service the device to clear its pending interrupt.
385 * 8. loop again if paranoia is required.
387 * probe_irq_on() returns a mask of allocated irq's.
389 * probe_irq_off() takes the mask as a parameter,
390 * and returns the irq number which occurred,
391 * or zero if none occurred, or a negative irq number
392 * if more than one irq occurred.
395 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
396 static inline unsigned long probe_irq_on(void)
398 return 0;
400 static inline int probe_irq_off(unsigned long val)
402 return 0;
404 static inline unsigned int probe_irq_mask(unsigned long val)
406 return 0;
408 #else
409 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
410 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
411 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
412 #endif
414 #endif