net: Fix recursive descent in __scm_destroy().
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / interrupt.h
blob58ff4e74b2f362bcc2078e76e9c5d10bdfba1f83
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/hardirq.h>
12 #include <linux/sched.h>
13 #include <linux/irqflags.h>
14 #include <asm/atomic.h>
15 #include <asm/ptrace.h>
16 #include <asm/system.h>
19 * These correspond to the IORESOURCE_IRQ_* defines in
20 * linux/ioport.h to select the interrupt line behaviour. When
21 * requesting an interrupt without specifying a IRQF_TRIGGER, the
22 * setting should be assumed to be "as already configured", which
23 * may be as per machine or firmware initialisation.
25 #define IRQF_TRIGGER_NONE 0x00000000
26 #define IRQF_TRIGGER_RISING 0x00000001
27 #define IRQF_TRIGGER_FALLING 0x00000002
28 #define IRQF_TRIGGER_HIGH 0x00000004
29 #define IRQF_TRIGGER_LOW 0x00000008
30 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
31 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
32 #define IRQF_TRIGGER_PROBE 0x00000010
35 * These flags used only by the kernel as part of the
36 * irq handling routines.
38 * IRQF_DISABLED - keep irqs disabled when calling the action handler
39 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
40 * IRQF_SHARED - allow sharing the irq among several devices
41 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
42 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
43 * IRQF_PERCPU - Interrupt is per cpu
44 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
45 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
46 * registered first in an shared interrupt is considered for
47 * performance reasons)
49 #define IRQF_DISABLED 0x00000020
50 #define IRQF_SAMPLE_RANDOM 0x00000040
51 #define IRQF_SHARED 0x00000080
52 #define IRQF_PROBE_SHARED 0x00000100
53 #define IRQF_TIMER 0x00000200
54 #define IRQF_PERCPU 0x00000400
55 #define IRQF_NOBALANCING 0x00000800
56 #define IRQF_IRQPOLL 0x00001000
58 typedef irqreturn_t (*irq_handler_t)(int, void *);
60 struct irqaction {
61 irq_handler_t handler;
62 unsigned long flags;
63 cpumask_t mask;
64 const char *name;
65 void *dev_id;
66 struct irqaction *next;
67 int irq;
68 struct proc_dir_entry *dir;
71 extern irqreturn_t no_action(int cpl, void *dev_id);
72 extern int __must_check request_irq(unsigned int, irq_handler_t handler,
73 unsigned long, const char *, void *);
74 extern void free_irq(unsigned int, void *);
76 struct device;
78 extern int __must_check devm_request_irq(struct device *dev, unsigned int irq,
79 irq_handler_t handler, unsigned long irqflags,
80 const char *devname, void *dev_id);
81 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
84 * On lockdep we dont want to enable hardirqs in hardirq
85 * context. Use local_irq_enable_in_hardirq() to annotate
86 * kernel code that has to do this nevertheless (pretty much
87 * the only valid case is for old/broken hardware that is
88 * insanely slow).
90 * NOTE: in theory this might break fragile code that relies
91 * on hardirq delivery - in practice we dont seem to have such
92 * places left. So the only effect should be slightly increased
93 * irqs-off latencies.
95 #ifdef CONFIG_LOCKDEP
96 # define local_irq_enable_in_hardirq() do { } while (0)
97 #else
98 # define local_irq_enable_in_hardirq() local_irq_enable()
99 #endif
101 extern void disable_irq_nosync(unsigned int irq);
102 extern void disable_irq(unsigned int irq);
103 extern void enable_irq(unsigned int irq);
105 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
107 extern cpumask_t irq_default_affinity;
109 extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
110 extern int irq_can_set_affinity(unsigned int irq);
111 extern int irq_select_affinity(unsigned int irq);
113 #else /* CONFIG_SMP */
115 static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
117 return -EINVAL;
120 static inline int irq_can_set_affinity(unsigned int irq)
122 return 0;
125 static inline int irq_select_affinity(unsigned int irq) { return 0; }
127 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
129 #ifdef CONFIG_GENERIC_HARDIRQS
131 * Special lockdep variants of irq disabling/enabling.
132 * These should be used for locking constructs that
133 * know that a particular irq context which is disabled,
134 * and which is the only irq-context user of a lock,
135 * that it's safe to take the lock in the irq-disabled
136 * section without disabling hardirqs.
138 * On !CONFIG_LOCKDEP they are equivalent to the normal
139 * irq disable/enable methods.
141 static inline void disable_irq_nosync_lockdep(unsigned int irq)
143 disable_irq_nosync(irq);
144 #ifdef CONFIG_LOCKDEP
145 local_irq_disable();
146 #endif
149 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
151 disable_irq_nosync(irq);
152 #ifdef CONFIG_LOCKDEP
153 local_irq_save(*flags);
154 #endif
157 static inline void disable_irq_lockdep(unsigned int irq)
159 disable_irq(irq);
160 #ifdef CONFIG_LOCKDEP
161 local_irq_disable();
162 #endif
165 static inline void enable_irq_lockdep(unsigned int irq)
167 #ifdef CONFIG_LOCKDEP
168 local_irq_enable();
169 #endif
170 enable_irq(irq);
173 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
175 #ifdef CONFIG_LOCKDEP
176 local_irq_restore(*flags);
177 #endif
178 enable_irq(irq);
181 /* IRQ wakeup (PM) control: */
182 extern int set_irq_wake(unsigned int irq, unsigned int on);
184 static inline int enable_irq_wake(unsigned int irq)
186 return set_irq_wake(irq, 1);
189 static inline int disable_irq_wake(unsigned int irq)
191 return set_irq_wake(irq, 0);
194 #else /* !CONFIG_GENERIC_HARDIRQS */
196 * NOTE: non-genirq architectures, if they want to support the lock
197 * validator need to define the methods below in their asm/irq.h
198 * files, under an #ifdef CONFIG_LOCKDEP section.
200 #ifndef CONFIG_LOCKDEP
201 # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
202 # define disable_irq_nosync_lockdep_irqsave(irq, flags) \
203 disable_irq_nosync(irq)
204 # define disable_irq_lockdep(irq) disable_irq(irq)
205 # define enable_irq_lockdep(irq) enable_irq(irq)
206 # define enable_irq_lockdep_irqrestore(irq, flags) \
207 enable_irq(irq)
208 # endif
210 static inline int enable_irq_wake(unsigned int irq)
212 return 0;
215 static inline int disable_irq_wake(unsigned int irq)
217 return 0;
219 #endif /* CONFIG_GENERIC_HARDIRQS */
221 #ifndef __ARCH_SET_SOFTIRQ_PENDING
222 #define set_softirq_pending(x) (local_softirq_pending() = (x))
223 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
224 #endif
226 /* Some architectures might implement lazy enabling/disabling of
227 * interrupts. In some cases, such as stop_machine, we might want
228 * to ensure that after a local_irq_disable(), interrupts have
229 * really been disabled in hardware. Such architectures need to
230 * implement the following hook.
232 #ifndef hard_irq_disable
233 #define hard_irq_disable() do { } while(0)
234 #endif
236 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
237 frequency threaded job scheduling. For almost all the purposes
238 tasklets are more than enough. F.e. all serial device BHs et
239 al. should be converted to tasklets, not to softirqs.
242 enum
244 HI_SOFTIRQ=0,
245 TIMER_SOFTIRQ,
246 NET_TX_SOFTIRQ,
247 NET_RX_SOFTIRQ,
248 BLOCK_SOFTIRQ,
249 TASKLET_SOFTIRQ,
250 SCHED_SOFTIRQ,
251 #ifdef CONFIG_HIGH_RES_TIMERS
252 HRTIMER_SOFTIRQ,
253 #endif
254 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
257 /* softirq mask and active fields moved to irq_cpustat_t in
258 * asm/hardirq.h to get better cache usage. KAO
261 struct softirq_action
263 void (*action)(struct softirq_action *);
266 asmlinkage void do_softirq(void);
267 asmlinkage void __do_softirq(void);
268 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
269 extern void softirq_init(void);
270 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
271 extern void raise_softirq_irqoff(unsigned int nr);
272 extern void raise_softirq(unsigned int nr);
275 /* Tasklets --- multithreaded analogue of BHs.
277 Main feature differing them of generic softirqs: tasklet
278 is running only on one CPU simultaneously.
280 Main feature differing them of BHs: different tasklets
281 may be run simultaneously on different CPUs.
283 Properties:
284 * If tasklet_schedule() is called, then tasklet is guaranteed
285 to be executed on some cpu at least once after this.
286 * If the tasklet is already scheduled, but its excecution is still not
287 started, it will be executed only once.
288 * If this tasklet is already running on another CPU (or schedule is called
289 from tasklet itself), it is rescheduled for later.
290 * Tasklet is strictly serialized wrt itself, but not
291 wrt another tasklets. If client needs some intertask synchronization,
292 he makes it with spinlocks.
295 struct tasklet_struct
297 struct tasklet_struct *next;
298 unsigned long state;
299 atomic_t count;
300 void (*func)(unsigned long);
301 unsigned long data;
304 #define DECLARE_TASKLET(name, func, data) \
305 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
307 #define DECLARE_TASKLET_DISABLED(name, func, data) \
308 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
311 enum
313 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
314 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
317 #ifdef CONFIG_SMP
318 static inline int tasklet_trylock(struct tasklet_struct *t)
320 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
323 static inline void tasklet_unlock(struct tasklet_struct *t)
325 smp_mb__before_clear_bit();
326 clear_bit(TASKLET_STATE_RUN, &(t)->state);
329 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
331 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
333 #else
334 #define tasklet_trylock(t) 1
335 #define tasklet_unlock_wait(t) do { } while (0)
336 #define tasklet_unlock(t) do { } while (0)
337 #endif
339 extern void __tasklet_schedule(struct tasklet_struct *t);
341 static inline void tasklet_schedule(struct tasklet_struct *t)
343 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
344 __tasklet_schedule(t);
347 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
349 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
351 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
352 __tasklet_hi_schedule(t);
356 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
358 atomic_inc(&t->count);
359 smp_mb__after_atomic_inc();
362 static inline void tasklet_disable(struct tasklet_struct *t)
364 tasklet_disable_nosync(t);
365 tasklet_unlock_wait(t);
366 smp_mb();
369 static inline void tasklet_enable(struct tasklet_struct *t)
371 smp_mb__before_atomic_dec();
372 atomic_dec(&t->count);
375 static inline void tasklet_hi_enable(struct tasklet_struct *t)
377 smp_mb__before_atomic_dec();
378 atomic_dec(&t->count);
381 extern void tasklet_kill(struct tasklet_struct *t);
382 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
383 extern void tasklet_init(struct tasklet_struct *t,
384 void (*func)(unsigned long), unsigned long data);
387 * Autoprobing for irqs:
389 * probe_irq_on() and probe_irq_off() provide robust primitives
390 * for accurate IRQ probing during kernel initialization. They are
391 * reasonably simple to use, are not "fooled" by spurious interrupts,
392 * and, unlike other attempts at IRQ probing, they do not get hung on
393 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
395 * For reasonably foolproof probing, use them as follows:
397 * 1. clear and/or mask the device's internal interrupt.
398 * 2. sti();
399 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
400 * 4. enable the device and cause it to trigger an interrupt.
401 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
402 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
403 * 7. service the device to clear its pending interrupt.
404 * 8. loop again if paranoia is required.
406 * probe_irq_on() returns a mask of allocated irq's.
408 * probe_irq_off() takes the mask as a parameter,
409 * and returns the irq number which occurred,
410 * or zero if none occurred, or a negative irq number
411 * if more than one irq occurred.
414 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
415 static inline unsigned long probe_irq_on(void)
417 return 0;
419 static inline int probe_irq_off(unsigned long val)
421 return 0;
423 static inline unsigned int probe_irq_mask(unsigned long val)
425 return 0;
427 #else
428 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
429 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
430 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
431 #endif
433 #ifdef CONFIG_PROC_FS
434 /* Initialize /proc/irq/ */
435 extern void init_irq_proc(void);
436 #else
437 static inline void init_irq_proc(void)
440 #endif
442 int show_interrupts(struct seq_file *p, void *v);
444 #endif