2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
18 #include "internals.h"
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 static struct lock_class_key irq_desc_lock_class
;
25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26 static void __init
init_irq_default_affinity(void)
28 alloc_cpumask_var(&irq_default_affinity
, GFP_NOWAIT
);
29 cpumask_setall(irq_default_affinity
);
32 static void __init
init_irq_default_affinity(void)
38 static int alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
)
40 if (!zalloc_cpumask_var_node(&desc
->irq_data
.affinity
, gfp
, node
))
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc
->pending_mask
, gfp
, node
)) {
45 free_cpumask_var(desc
->irq_data
.affinity
);
52 static void desc_smp_init(struct irq_desc
*desc
, int node
)
54 desc
->irq_data
.node
= node
;
55 cpumask_copy(desc
->irq_data
.affinity
, irq_default_affinity
);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc
->pending_mask
);
61 static inline int desc_node(struct irq_desc
*desc
)
63 return desc
->irq_data
.node
;
68 alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
) { return 0; }
69 static inline void desc_smp_init(struct irq_desc
*desc
, int node
) { }
70 static inline int desc_node(struct irq_desc
*desc
) { return 0; }
73 static void desc_set_defaults(unsigned int irq
, struct irq_desc
*desc
, int node
)
77 desc
->irq_data
.irq
= irq
;
78 desc
->irq_data
.chip
= &no_irq_chip
;
79 desc
->irq_data
.chip_data
= NULL
;
80 desc
->irq_data
.handler_data
= NULL
;
81 desc
->irq_data
.msi_desc
= NULL
;
82 desc
->status
= IRQ_DEFAULT_INIT_FLAGS
;
83 desc
->handle_irq
= handle_bad_irq
;
86 desc
->irqs_unhandled
= 0;
88 for_each_possible_cpu(cpu
)
89 *per_cpu_ptr(desc
->kstat_irqs
, cpu
) = 0;
90 desc_smp_init(desc
, node
);
93 int nr_irqs
= NR_IRQS
;
94 EXPORT_SYMBOL_GPL(nr_irqs
);
96 static DEFINE_MUTEX(sparse_irq_lock
);
97 static DECLARE_BITMAP(allocated_irqs
, NR_IRQS
);
99 #ifdef CONFIG_SPARSE_IRQ
101 static RADIX_TREE(irq_desc_tree
, GFP_KERNEL
);
103 static void irq_insert_desc(unsigned int irq
, struct irq_desc
*desc
)
105 radix_tree_insert(&irq_desc_tree
, irq
, desc
);
108 struct irq_desc
*irq_to_desc(unsigned int irq
)
110 return radix_tree_lookup(&irq_desc_tree
, irq
);
113 static void delete_irq_desc(unsigned int irq
)
115 radix_tree_delete(&irq_desc_tree
, irq
);
119 static void free_masks(struct irq_desc
*desc
)
121 #ifdef CONFIG_GENERIC_PENDING_IRQ
122 free_cpumask_var(desc
->pending_mask
);
124 free_cpumask_var(desc
->irq_data
.affinity
);
127 static inline void free_masks(struct irq_desc
*desc
) { }
130 static struct irq_desc
*alloc_desc(int irq
, int node
)
132 struct irq_desc
*desc
;
133 gfp_t gfp
= GFP_KERNEL
;
135 desc
= kzalloc_node(sizeof(*desc
), gfp
, node
);
138 /* allocate based on nr_cpu_ids */
139 desc
->kstat_irqs
= alloc_percpu(unsigned int);
140 if (!desc
->kstat_irqs
)
143 if (alloc_masks(desc
, gfp
, node
))
146 raw_spin_lock_init(&desc
->lock
);
147 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
149 desc_set_defaults(irq
, desc
, node
);
154 free_percpu(desc
->kstat_irqs
);
160 static void free_desc(unsigned int irq
)
162 struct irq_desc
*desc
= irq_to_desc(irq
);
164 unregister_irq_proc(irq
, desc
);
166 mutex_lock(&sparse_irq_lock
);
167 delete_irq_desc(irq
);
168 mutex_unlock(&sparse_irq_lock
);
171 free_percpu(desc
->kstat_irqs
);
175 static int alloc_descs(unsigned int start
, unsigned int cnt
, int node
)
177 struct irq_desc
*desc
;
180 for (i
= 0; i
< cnt
; i
++) {
181 desc
= alloc_desc(start
+ i
, node
);
184 mutex_lock(&sparse_irq_lock
);
185 irq_insert_desc(start
+ i
, desc
);
186 mutex_unlock(&sparse_irq_lock
);
191 for (i
--; i
>= 0; i
--)
192 free_desc(start
+ i
);
194 mutex_lock(&sparse_irq_lock
);
195 bitmap_clear(allocated_irqs
, start
, cnt
);
196 mutex_unlock(&sparse_irq_lock
);
200 struct irq_desc
* __ref
irq_to_desc_alloc_node(unsigned int irq
, int node
)
202 int res
= irq_alloc_descs(irq
, irq
, 1, node
);
204 if (res
== -EEXIST
|| res
== irq
)
205 return irq_to_desc(irq
);
209 int __init
early_irq_init(void)
211 int i
, initcnt
, node
= first_online_node
;
212 struct irq_desc
*desc
;
214 init_irq_default_affinity();
216 /* Let arch update nr_irqs and return the nr of preallocated irqs */
217 initcnt
= arch_probe_nr_irqs();
218 printk(KERN_INFO
"NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS
, nr_irqs
, initcnt
);
220 for (i
= 0; i
< initcnt
; i
++) {
221 desc
= alloc_desc(i
, node
);
222 set_bit(i
, allocated_irqs
);
223 irq_insert_desc(i
, desc
);
225 return arch_early_irq_init();
228 #else /* !CONFIG_SPARSE_IRQ */
230 struct irq_desc irq_desc
[NR_IRQS
] __cacheline_aligned_in_smp
= {
231 [0 ... NR_IRQS
-1] = {
232 .status
= IRQ_DEFAULT_INIT_FLAGS
,
233 .handle_irq
= handle_bad_irq
,
235 .lock
= __RAW_SPIN_LOCK_UNLOCKED(irq_desc
->lock
),
239 int __init
early_irq_init(void)
241 int count
, i
, node
= first_online_node
;
242 struct irq_desc
*desc
;
244 init_irq_default_affinity();
246 printk(KERN_INFO
"NR_IRQS:%d\n", NR_IRQS
);
249 count
= ARRAY_SIZE(irq_desc
);
251 for (i
= 0; i
< count
; i
++) {
252 desc
[i
].irq_data
.irq
= i
;
253 desc
[i
].irq_data
.chip
= &no_irq_chip
;
254 /* TODO : do this allocation on-demand ... */
255 desc
[i
].kstat_irqs
= alloc_percpu(unsigned int);
256 alloc_masks(desc
+ i
, GFP_KERNEL
, node
);
257 desc_smp_init(desc
+ i
, node
);
258 lockdep_set_class(&desc
[i
].lock
, &irq_desc_lock_class
);
260 return arch_early_irq_init();
263 struct irq_desc
*irq_to_desc(unsigned int irq
)
265 return (irq
< NR_IRQS
) ? irq_desc
+ irq
: NULL
;
268 struct irq_desc
*irq_to_desc_alloc_node(unsigned int irq
, int node
)
270 return irq_to_desc(irq
);
273 static void free_desc(unsigned int irq
)
275 dynamic_irq_cleanup(irq
);
278 static inline int alloc_descs(unsigned int start
, unsigned int cnt
, int node
)
280 #if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
281 struct irq_desc
*desc
;
284 for (i
= 0; i
< cnt
; i
++) {
285 desc
= irq_to_desc(start
+ i
);
286 if (desc
&& !desc
->kstat_irqs
) {
287 unsigned int __percpu
*stats
= alloc_percpu(unsigned int);
291 if (cmpxchg(&desc
->kstat_irqs
, NULL
, stats
) != NULL
)
298 #endif /* !CONFIG_SPARSE_IRQ */
300 /* Dynamic interrupt handling */
303 * irq_free_descs - free irq descriptors
304 * @from: Start of descriptor range
305 * @cnt: Number of consecutive irqs to free
307 void irq_free_descs(unsigned int from
, unsigned int cnt
)
311 if (from
>= nr_irqs
|| (from
+ cnt
) > nr_irqs
)
314 for (i
= 0; i
< cnt
; i
++)
317 mutex_lock(&sparse_irq_lock
);
318 bitmap_clear(allocated_irqs
, from
, cnt
);
319 mutex_unlock(&sparse_irq_lock
);
323 * irq_alloc_descs - allocate and initialize a range of irq descriptors
324 * @irq: Allocate for specific irq number if irq >= 0
325 * @from: Start the search from this irq number
326 * @cnt: Number of consecutive irqs to allocate.
327 * @node: Preferred node on which the irq descriptor should be allocated
329 * Returns the first irq number or error code
332 irq_alloc_descs(int irq
, unsigned int from
, unsigned int cnt
, int node
)
339 mutex_lock(&sparse_irq_lock
);
341 start
= bitmap_find_next_zero_area(allocated_irqs
, nr_irqs
, from
, cnt
, 0);
343 if (irq
>=0 && start
!= irq
)
347 if (start
>= nr_irqs
)
350 bitmap_set(allocated_irqs
, start
, cnt
);
351 mutex_unlock(&sparse_irq_lock
);
352 return alloc_descs(start
, cnt
, node
);
355 mutex_unlock(&sparse_irq_lock
);
360 * irq_reserve_irqs - mark irqs allocated
361 * @from: mark from irq number
362 * @cnt: number of irqs to mark
364 * Returns 0 on success or an appropriate error code
366 int irq_reserve_irqs(unsigned int from
, unsigned int cnt
)
371 if (!cnt
|| (from
+ cnt
) > nr_irqs
)
374 mutex_lock(&sparse_irq_lock
);
375 start
= bitmap_find_next_zero_area(allocated_irqs
, nr_irqs
, from
, cnt
, 0);
377 bitmap_set(allocated_irqs
, start
, cnt
);
380 mutex_unlock(&sparse_irq_lock
);
385 * irq_get_next_irq - get next allocated irq number
386 * @offset: where to start the search
388 * Returns next irq number after offset or nr_irqs if none is found.
390 unsigned int irq_get_next_irq(unsigned int offset
)
392 return find_next_bit(allocated_irqs
, nr_irqs
, offset
);
396 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
397 * @irq: irq number to initialize
399 void dynamic_irq_cleanup(unsigned int irq
)
401 struct irq_desc
*desc
= irq_to_desc(irq
);
404 raw_spin_lock_irqsave(&desc
->lock
, flags
);
405 desc_set_defaults(irq
, desc
, desc_node(desc
));
406 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
409 unsigned int kstat_irqs_cpu(unsigned int irq
, int cpu
)
411 struct irq_desc
*desc
= irq_to_desc(irq
);
413 return desc
&& desc
->kstat_irqs
?
414 *per_cpu_ptr(desc
->kstat_irqs
, cpu
) : 0;
417 #ifdef CONFIG_GENERIC_HARDIRQS
418 unsigned int kstat_irqs(unsigned int irq
)
420 struct irq_desc
*desc
= irq_to_desc(irq
);
424 if (!desc
|| !desc
->kstat_irqs
)
426 for_each_possible_cpu(cpu
)
427 sum
+= *per_cpu_ptr(desc
->kstat_irqs
, cpu
);
430 #endif /* CONFIG_GENERIC_HARDIRQS */