2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
18 #include "internals.h"
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 struct lock_class_key irq_desc_lock_class
;
25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26 static void __init
init_irq_default_affinity(void)
28 alloc_cpumask_var(&irq_default_affinity
, GFP_NOWAIT
);
29 cpumask_setall(irq_default_affinity
);
32 static void __init
init_irq_default_affinity(void)
38 static int alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
)
40 if (!zalloc_cpumask_var_node(&desc
->irq_data
.affinity
, gfp
, node
))
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc
->pending_mask
, gfp
, node
)) {
45 free_cpumask_var(desc
->irq_data
.affinity
);
52 static void desc_smp_init(struct irq_desc
*desc
, int node
)
55 cpumask_copy(desc
->irq_data
.affinity
, irq_default_affinity
);
60 alloc_masks(struct irq_desc
*desc
, gfp_t gfp
, int node
) { return 0; }
61 static inline void desc_smp_init(struct irq_desc
*desc
, int node
) { }
64 static void desc_set_defaults(unsigned int irq
, struct irq_desc
*desc
, int node
)
66 desc
->irq_data
.irq
= irq
;
67 desc
->irq_data
.chip
= &no_irq_chip
;
68 desc
->irq_data
.chip_data
= NULL
;
69 desc
->irq_data
.handler_data
= NULL
;
70 desc
->irq_data
.msi_desc
= NULL
;
71 desc
->status
= IRQ_DEFAULT_INIT_FLAGS
;
72 desc
->handle_irq
= handle_bad_irq
;
75 memset(desc
->kstat_irqs
, 0, nr_cpu_ids
* sizeof(*(desc
->kstat_irqs
)));
76 desc_smp_init(desc
, node
);
79 int nr_irqs
= NR_IRQS
;
80 EXPORT_SYMBOL_GPL(nr_irqs
);
82 DEFINE_RAW_SPINLOCK(sparse_irq_lock
);
83 static DECLARE_BITMAP(allocated_irqs
, NR_IRQS
);
85 #ifdef CONFIG_SPARSE_IRQ
87 static struct irq_desc irq_desc_init
= {
88 .status
= IRQ_DEFAULT_INIT_FLAGS
,
89 .handle_irq
= handle_bad_irq
,
91 .lock
= __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init
.lock
),
94 void __ref
init_kstat_irqs(struct irq_desc
*desc
, int node
, int nr
)
98 ptr
= kzalloc_node(nr
* sizeof(*desc
->kstat_irqs
),
102 * don't overwite if can not get new one
103 * init_copy_kstat_irqs() could still use old one
106 printk(KERN_DEBUG
" alloc kstat_irqs on node %d\n", node
);
107 desc
->kstat_irqs
= ptr
;
111 static void init_one_irq_desc(int irq
, struct irq_desc
*desc
, int node
)
113 memcpy(desc
, &irq_desc_init
, sizeof(struct irq_desc
));
115 raw_spin_lock_init(&desc
->lock
);
116 desc
->irq_data
.irq
= irq
;
118 desc
->irq_data
.node
= node
;
120 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
121 init_kstat_irqs(desc
, node
, nr_cpu_ids
);
122 if (!desc
->kstat_irqs
) {
123 printk(KERN_ERR
"can not alloc kstat_irqs\n");
126 if (!alloc_desc_masks(desc
, node
, false)) {
127 printk(KERN_ERR
"can not alloc irq_desc cpumasks\n");
130 init_desc_masks(desc
);
131 arch_init_chip_data(desc
, node
);
134 static RADIX_TREE(irq_desc_tree
, GFP_ATOMIC
);
136 static void irq_insert_desc(unsigned int irq
, struct irq_desc
*desc
)
138 radix_tree_insert(&irq_desc_tree
, irq
, desc
);
141 struct irq_desc
*irq_to_desc(unsigned int irq
)
143 return radix_tree_lookup(&irq_desc_tree
, irq
);
146 void replace_irq_desc(unsigned int irq
, struct irq_desc
*desc
)
150 ptr
= radix_tree_lookup_slot(&irq_desc_tree
, irq
);
152 radix_tree_replace_slot(ptr
, desc
);
155 static void delete_irq_desc(unsigned int irq
)
157 radix_tree_delete(&irq_desc_tree
, irq
);
161 static void free_masks(struct irq_desc
*desc
)
163 #ifdef CONFIG_GENERIC_PENDING_IRQ
164 free_cpumask_var(desc
->pending_mask
);
166 free_cpumask_var(desc
->affinity
);
169 static inline void free_masks(struct irq_desc
*desc
) { }
172 static struct irq_desc
*alloc_desc(int irq
, int node
)
174 struct irq_desc
*desc
;
175 gfp_t gfp
= GFP_KERNEL
;
177 desc
= kzalloc_node(sizeof(*desc
), gfp
, node
);
180 /* allocate based on nr_cpu_ids */
181 desc
->kstat_irqs
= kzalloc_node(nr_cpu_ids
* sizeof(*desc
->kstat_irqs
),
183 if (!desc
->kstat_irqs
)
186 if (alloc_masks(desc
, gfp
, node
))
189 raw_spin_lock_init(&desc
->lock
);
190 lockdep_set_class(&desc
->lock
, &irq_desc_lock_class
);
192 desc_set_defaults(irq
, desc
, node
);
197 kfree(desc
->kstat_irqs
);
203 static void free_desc(unsigned int irq
)
205 struct irq_desc
*desc
= irq_to_desc(irq
);
208 unregister_irq_proc(irq
, desc
);
210 raw_spin_lock_irqsave(&sparse_irq_lock
, flags
);
211 delete_irq_desc(irq
);
212 raw_spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
215 kfree(desc
->kstat_irqs
);
219 static int alloc_descs(unsigned int start
, unsigned int cnt
, int node
)
221 struct irq_desc
*desc
;
225 for (i
= 0; i
< cnt
; i
++) {
226 desc
= alloc_desc(start
+ i
, node
);
229 raw_spin_lock_irqsave(&sparse_irq_lock
, flags
);
230 irq_insert_desc(start
+ i
, desc
);
231 raw_spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
236 for (i
--; i
>= 0; i
--)
237 free_desc(start
+ i
);
239 raw_spin_lock_irqsave(&sparse_irq_lock
, flags
);
240 bitmap_clear(allocated_irqs
, start
, cnt
);
241 raw_spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
245 static struct irq_desc irq_desc_legacy
[NR_IRQS_LEGACY
] __cacheline_aligned_in_smp
= {
246 [0 ... NR_IRQS_LEGACY
-1] = {
247 .status
= IRQ_DEFAULT_INIT_FLAGS
,
248 .handle_irq
= handle_bad_irq
,
250 .lock
= __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init
.lock
),
254 static unsigned int *kstat_irqs_legacy
;
256 int __init
early_irq_init(void)
258 struct irq_desc
*desc
;
263 init_irq_default_affinity();
265 /* initialize nr_irqs based on nr_cpu_ids */
266 arch_probe_nr_irqs();
267 printk(KERN_INFO
"NR_IRQS:%d nr_irqs:%d\n", NR_IRQS
, nr_irqs
);
269 desc
= irq_desc_legacy
;
270 legacy_count
= ARRAY_SIZE(irq_desc_legacy
);
271 node
= first_online_node
;
273 /* allocate based on nr_cpu_ids */
274 kstat_irqs_legacy
= kzalloc_node(NR_IRQS_LEGACY
* nr_cpu_ids
*
275 sizeof(int), GFP_NOWAIT
, node
);
277 irq_desc_init
.irq_data
.chip
= &no_irq_chip
;
279 for (i
= 0; i
< legacy_count
; i
++) {
280 desc
[i
].irq_data
.irq
= i
;
281 desc
[i
].irq_data
.chip
= &no_irq_chip
;
283 desc
[i
].irq_data
.node
= node
;
285 desc
[i
].kstat_irqs
= kstat_irqs_legacy
+ i
* nr_cpu_ids
;
286 lockdep_set_class(&desc
[i
].lock
, &irq_desc_lock_class
);
287 alloc_desc_masks(&desc
[i
], node
, true);
288 init_desc_masks(&desc
[i
]);
289 irq_insert_desc(i
, &desc
[i
]);
292 return arch_early_irq_init();
295 struct irq_desc
* __ref
irq_to_desc_alloc_node(unsigned int irq
, int node
)
297 struct irq_desc
*desc
;
300 if (irq
>= nr_irqs
) {
301 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
306 desc
= irq_to_desc(irq
);
310 raw_spin_lock_irqsave(&sparse_irq_lock
, flags
);
312 /* We have to check it to avoid races with another CPU */
313 desc
= irq_to_desc(irq
);
317 desc
= kzalloc_node(sizeof(*desc
), GFP_ATOMIC
, node
);
319 printk(KERN_DEBUG
" alloc irq_desc for %d on node %d\n", irq
, node
);
321 printk(KERN_ERR
"can not alloc irq_desc\n");
324 init_one_irq_desc(irq
, desc
, node
);
326 irq_insert_desc(irq
, desc
);
329 raw_spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
334 #else /* !CONFIG_SPARSE_IRQ */
336 struct irq_desc irq_desc
[NR_IRQS
] __cacheline_aligned_in_smp
= {
337 [0 ... NR_IRQS
-1] = {
338 .status
= IRQ_DEFAULT_INIT_FLAGS
,
339 .handle_irq
= handle_bad_irq
,
341 .lock
= __RAW_SPIN_LOCK_UNLOCKED(irq_desc
->lock
),
345 static unsigned int kstat_irqs_all
[NR_IRQS
][NR_CPUS
];
346 int __init
early_irq_init(void)
348 struct irq_desc
*desc
;
352 init_irq_default_affinity();
354 printk(KERN_INFO
"NR_IRQS:%d\n", NR_IRQS
);
357 count
= ARRAY_SIZE(irq_desc
);
359 for (i
= 0; i
< count
; i
++) {
360 desc
[i
].irq_data
.irq
= i
;
361 desc
[i
].irq_data
.chip
= &no_irq_chip
;
362 alloc_desc_masks(&desc
[i
], 0, true);
363 init_desc_masks(&desc
[i
]);
364 desc
[i
].kstat_irqs
= kstat_irqs_all
[i
];
365 lockdep_set_class(&desc
[i
].lock
, &irq_desc_lock_class
);
367 return arch_early_irq_init();
370 struct irq_desc
*irq_to_desc(unsigned int irq
)
372 return (irq
< NR_IRQS
) ? irq_desc
+ irq
: NULL
;
375 struct irq_desc
*irq_to_desc_alloc_node(unsigned int irq
, int node
)
377 return irq_to_desc(irq
);
381 static inline int desc_node(struct irq_desc
*desc
)
383 return desc
->irq_data
.node
;
386 static inline int desc_node(struct irq_desc
*desc
) { return 0; }
389 static void free_desc(unsigned int irq
)
391 struct irq_desc
*desc
= irq_to_desc(irq
);
394 raw_spin_lock_irqsave(&desc
->lock
, flags
);
395 desc_set_defaults(irq
, desc
, desc_node(desc
));
396 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
399 static inline int alloc_descs(unsigned int start
, unsigned int cnt
, int node
)
403 #endif /* !CONFIG_SPARSE_IRQ */
405 /* Dynamic interrupt handling */
408 * irq_free_descs - free irq descriptors
409 * @from: Start of descriptor range
410 * @cnt: Number of consecutive irqs to free
412 void irq_free_descs(unsigned int from
, unsigned int cnt
)
417 if (from
>= nr_irqs
|| (from
+ cnt
) > nr_irqs
)
420 for (i
= 0; i
< cnt
; i
++)
423 raw_spin_lock_irqsave(&sparse_irq_lock
, flags
);
424 bitmap_clear(allocated_irqs
, from
, cnt
);
425 raw_spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
429 * irq_alloc_descs - allocate and initialize a range of irq descriptors
430 * @irq: Allocate for specific irq number if irq >= 0
431 * @from: Start the search from this irq number
432 * @cnt: Number of consecutive irqs to allocate.
433 * @node: Preferred node on which the irq descriptor should be allocated
435 * Returns the first irq number or error code
438 irq_alloc_descs(int irq
, unsigned int from
, unsigned int cnt
, int node
)
446 raw_spin_lock_irqsave(&sparse_irq_lock
, flags
);
448 start
= bitmap_find_next_zero_area(allocated_irqs
, nr_irqs
, from
, cnt
, 0);
450 if (irq
>=0 && start
!= irq
)
454 if (start
>= nr_irqs
)
457 bitmap_set(allocated_irqs
, start
, cnt
);
458 raw_spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
459 return alloc_descs(start
, cnt
, node
);
462 raw_spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
467 * irq_reserve_irqs - mark irqs allocated
468 * @from: mark from irq number
469 * @cnt: number of irqs to mark
471 * Returns 0 on success or an appropriate error code
473 int irq_reserve_irqs(unsigned int from
, unsigned int cnt
)
479 if (!cnt
|| (from
+ cnt
) > nr_irqs
)
482 raw_spin_lock_irqsave(&sparse_irq_lock
, flags
);
483 start
= bitmap_find_next_zero_area(allocated_irqs
, nr_irqs
, from
, cnt
, 0);
485 bitmap_set(allocated_irqs
, start
, cnt
);
488 raw_spin_unlock_irqrestore(&sparse_irq_lock
, flags
);
493 * irq_get_next_irq - get next allocated irq number
494 * @offset: where to start the search
496 * Returns next irq number after offset or nr_irqs if none is found.
498 unsigned int irq_get_next_irq(unsigned int offset
)
500 return find_next_bit(allocated_irqs
, nr_irqs
, offset
);
503 /* Statistics access */
504 void clear_kstat_irqs(struct irq_desc
*desc
)
506 memset(desc
->kstat_irqs
, 0, nr_cpu_ids
* sizeof(*(desc
->kstat_irqs
)));
509 unsigned int kstat_irqs_cpu(unsigned int irq
, int cpu
)
511 struct irq_desc
*desc
= irq_to_desc(irq
);
512 return desc
? desc
->kstat_irqs
[cpu
] : 0;