2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
18 #include "internals.h"
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 static struct lock_class_key irq_desc_lock_class
;
25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26 static void __init
init_irq_default_affinity(void)
28 alloc_cpumask_var(&irq_default_affinity
, GFP_NOWAIT
);
29 cpumask_setall(irq_default_affinity
);
32 static void __init
init_irq_default_affinity(void)
38 static int alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
)
40 if (!zalloc_cpumask_var_node(&desc
->irq_data
.affinity
, gfp
, node
))
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc
->pending_mask
, gfp
, node
)) {
45 free_cpumask_var(desc
->irq_data
.affinity
);
52 static void desc_smp_init(struct irq_desc
*desc
, int node
)
54 desc
->irq_data
.node
= node
;
55 cpumask_copy(desc
->irq_data
.affinity
, irq_default_affinity
);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc
->pending_mask
);
61 static inline int desc_node(struct irq_desc
*desc
)
63 return desc
->irq_data
.node
;
68 alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
) { return 0; }
69 static inline void desc_smp_init(struct irq_desc
*desc
, int node
) { }
70 static inline int desc_node(struct irq_desc
*desc
) { return 0; }
73 static void desc_set_defaults(unsigned int irq
, struct irq_desc
*desc
, int node
)
75 desc
->irq_data
.irq
= irq
;
76 desc
->irq_data
.chip
= &no_irq_chip
;
77 desc
->irq_data
.chip_data
= NULL
;
78 desc
->irq_data
.handler_data
= NULL
;
79 desc
->irq_data
.msi_desc
= NULL
;
80 desc
->status
= IRQ_DEFAULT_INIT_FLAGS
;
81 desc
->handle_irq
= handle_bad_irq
;
84 desc
->irqs_unhandled
= 0;
86 memset(desc
->kstat_irqs
, 0, nr_cpu_ids
* sizeof(*(desc
->kstat_irqs
)));
87 desc_smp_init(desc
, node
);
90 int nr_irqs
= NR_IRQS
;
91 EXPORT_SYMBOL_GPL(nr_irqs
);
93 static DEFINE_MUTEX(sparse_irq_lock
);
94 static DECLARE_BITMAP(allocated_irqs
, NR_IRQS
);
96 #ifdef CONFIG_SPARSE_IRQ
98 static RADIX_TREE(irq_desc_tree
, GFP_KERNEL
);
100 static void irq_insert_desc(unsigned int irq
, struct irq_desc
*desc
)
102 radix_tree_insert(&irq_desc_tree
, irq
, desc
);
105 struct irq_desc
*irq_to_desc(unsigned int irq
)
107 return radix_tree_lookup(&irq_desc_tree
, irq
);
110 static void delete_irq_desc(unsigned int irq
)
112 radix_tree_delete(&irq_desc_tree
, irq
);
116 static void free_masks(struct irq_desc
*desc
)
118 #ifdef CONFIG_GENERIC_PENDING_IRQ
119 free_cpumask_var(desc
->pending_mask
);
121 free_cpumask_var(desc
->irq_data
.affinity
);
124 static inline void free_masks(struct irq_desc
*desc
) { }
127 static struct irq_desc
*alloc_desc(int irq
, int node
)
129 struct irq_desc
*desc
;
130 gfp_t gfp
= GFP_KERNEL
;
132 desc
= kzalloc_node(sizeof(*desc
), gfp
, node
);
135 /* allocate based on nr_cpu_ids */
136 desc
->kstat_irqs
= kzalloc_node(nr_cpu_ids
* sizeof(*desc
->kstat_irqs
),
138 if (!desc
->kstat_irqs
)
141 if (alloc_masks(desc
, gfp
, node
))
144 raw_spin_lock_init(&desc
->lock
);
145 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
147 desc_set_defaults(irq
, desc
, node
);
152 kfree(desc
->kstat_irqs
);
158 static void free_desc(unsigned int irq
)
160 struct irq_desc
*desc
= irq_to_desc(irq
);
162 unregister_irq_proc(irq
, desc
);
164 mutex_lock(&sparse_irq_lock
);
165 delete_irq_desc(irq
);
166 mutex_unlock(&sparse_irq_lock
);
169 kfree(desc
->kstat_irqs
);
173 static int alloc_descs(unsigned int start
, unsigned int cnt
, int node
)
175 struct irq_desc
*desc
;
178 for (i
= 0; i
< cnt
; i
++) {
179 desc
= alloc_desc(start
+ i
, node
);
182 mutex_lock(&sparse_irq_lock
);
183 irq_insert_desc(start
+ i
, desc
);
184 mutex_unlock(&sparse_irq_lock
);
189 for (i
--; i
>= 0; i
--)
190 free_desc(start
+ i
);
192 mutex_lock(&sparse_irq_lock
);
193 bitmap_clear(allocated_irqs
, start
, cnt
);
194 mutex_unlock(&sparse_irq_lock
);
198 struct irq_desc
* __ref
irq_to_desc_alloc_node(unsigned int irq
, int node
)
200 int res
= irq_alloc_descs(irq
, irq
, 1, node
);
202 if (res
== -EEXIST
|| res
== irq
)
203 return irq_to_desc(irq
);
207 int __init
early_irq_init(void)
209 int i
, initcnt
, node
= first_online_node
;
210 struct irq_desc
*desc
;
212 init_irq_default_affinity();
214 /* Let arch update nr_irqs and return the nr of preallocated irqs */
215 initcnt
= arch_probe_nr_irqs();
216 printk(KERN_INFO
"NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS
, nr_irqs
, initcnt
);
218 for (i
= 0; i
< initcnt
; i
++) {
219 desc
= alloc_desc(i
, node
);
220 set_bit(i
, allocated_irqs
);
221 irq_insert_desc(i
, desc
);
223 return arch_early_irq_init();
226 #else /* !CONFIG_SPARSE_IRQ */
228 struct irq_desc irq_desc
[NR_IRQS
] __cacheline_aligned_in_smp
= {
229 [0 ... NR_IRQS
-1] = {
230 .status
= IRQ_DEFAULT_INIT_FLAGS
,
231 .handle_irq
= handle_bad_irq
,
233 .lock
= __RAW_SPIN_LOCK_UNLOCKED(irq_desc
->lock
),
237 static unsigned int kstat_irqs_all
[NR_IRQS
][NR_CPUS
];
238 int __init
early_irq_init(void)
240 int count
, i
, node
= first_online_node
;
241 struct irq_desc
*desc
;
243 init_irq_default_affinity();
245 printk(KERN_INFO
"NR_IRQS:%d\n", NR_IRQS
);
248 count
= ARRAY_SIZE(irq_desc
);
250 for (i
= 0; i
< count
; i
++) {
251 desc
[i
].irq_data
.irq
= i
;
252 desc
[i
].irq_data
.chip
= &no_irq_chip
;
253 desc
[i
].kstat_irqs
= kstat_irqs_all
[i
];
254 alloc_masks(desc
+ i
, GFP_KERNEL
, node
);
255 desc_smp_init(desc
+ i
, node
);
256 lockdep_set_class(&desc
[i
].lock
, &irq_desc_lock_class
);
258 return arch_early_irq_init();
261 struct irq_desc
*irq_to_desc(unsigned int irq
)
263 return (irq
< NR_IRQS
) ? irq_desc
+ irq
: NULL
;
266 struct irq_desc
*irq_to_desc_alloc_node(unsigned int irq
, int node
)
268 return irq_to_desc(irq
);
271 static void free_desc(unsigned int irq
)
273 dynamic_irq_cleanup(irq
);
276 static inline int alloc_descs(unsigned int start
, unsigned int cnt
, int node
)
280 #endif /* !CONFIG_SPARSE_IRQ */
282 /* Dynamic interrupt handling */
285 * irq_free_descs - free irq descriptors
286 * @from: Start of descriptor range
287 * @cnt: Number of consecutive irqs to free
289 void irq_free_descs(unsigned int from
, unsigned int cnt
)
293 if (from
>= nr_irqs
|| (from
+ cnt
) > nr_irqs
)
296 for (i
= 0; i
< cnt
; i
++)
299 mutex_lock(&sparse_irq_lock
);
300 bitmap_clear(allocated_irqs
, from
, cnt
);
301 mutex_unlock(&sparse_irq_lock
);
305 * irq_alloc_descs - allocate and initialize a range of irq descriptors
306 * @irq: Allocate for specific irq number if irq >= 0
307 * @from: Start the search from this irq number
308 * @cnt: Number of consecutive irqs to allocate.
309 * @node: Preferred node on which the irq descriptor should be allocated
311 * Returns the first irq number or error code
314 irq_alloc_descs(int irq
, unsigned int from
, unsigned int cnt
, int node
)
321 mutex_lock(&sparse_irq_lock
);
323 start
= bitmap_find_next_zero_area(allocated_irqs
, nr_irqs
, from
, cnt
, 0);
325 if (irq
>=0 && start
!= irq
)
329 if (start
>= nr_irqs
)
332 bitmap_set(allocated_irqs
, start
, cnt
);
333 mutex_unlock(&sparse_irq_lock
);
334 return alloc_descs(start
, cnt
, node
);
337 mutex_unlock(&sparse_irq_lock
);
342 * irq_reserve_irqs - mark irqs allocated
343 * @from: mark from irq number
344 * @cnt: number of irqs to mark
346 * Returns 0 on success or an appropriate error code
348 int irq_reserve_irqs(unsigned int from
, unsigned int cnt
)
353 if (!cnt
|| (from
+ cnt
) > nr_irqs
)
356 mutex_lock(&sparse_irq_lock
);
357 start
= bitmap_find_next_zero_area(allocated_irqs
, nr_irqs
, from
, cnt
, 0);
359 bitmap_set(allocated_irqs
, start
, cnt
);
362 mutex_unlock(&sparse_irq_lock
);
367 * irq_get_next_irq - get next allocated irq number
368 * @offset: where to start the search
370 * Returns next irq number after offset or nr_irqs if none is found.
372 unsigned int irq_get_next_irq(unsigned int offset
)
374 return find_next_bit(allocated_irqs
, nr_irqs
, offset
);
378 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
379 * @irq: irq number to initialize
381 void dynamic_irq_cleanup(unsigned int irq
)
383 struct irq_desc
*desc
= irq_to_desc(irq
);
386 raw_spin_lock_irqsave(&desc
->lock
, flags
);
387 desc_set_defaults(irq
, desc
, desc_node(desc
));
388 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
391 unsigned int kstat_irqs_cpu(unsigned int irq
, int cpu
)
393 struct irq_desc
*desc
= irq_to_desc(irq
);
394 return desc
? desc
->kstat_irqs
[cpu
] : 0;