ipv4: Include linux/prefetch.h in fib_trie.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / irq / irqdesc.c
blob886e80347b322795b56630857fd98996545ea5a4
1 /*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
9 */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
18 #include "internals.h"
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 static struct lock_class_key irq_desc_lock_class;
25 #if defined(CONFIG_SMP)
26 static void __init init_irq_default_affinity(void)
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
31 #else
32 static void __init init_irq_default_affinity(void)
35 #endif
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 return -ENOMEM;
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
46 return -ENOMEM;
48 #endif
49 return 0;
52 static void desc_smp_init(struct irq_desc *desc, int node)
54 desc->irq_data.node = node;
55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc->pending_mask);
58 #endif
61 static inline int desc_node(struct irq_desc *desc)
63 return desc->irq_data.node;
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
75 int cpu;
77 desc->irq_data.irq = irq;
78 desc->irq_data.chip = &no_irq_chip;
79 desc->irq_data.chip_data = NULL;
80 desc->irq_data.handler_data = NULL;
81 desc->irq_data.msi_desc = NULL;
82 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
83 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
84 desc->handle_irq = handle_bad_irq;
85 desc->depth = 1;
86 desc->irq_count = 0;
87 desc->irqs_unhandled = 0;
88 desc->name = NULL;
89 for_each_possible_cpu(cpu)
90 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
91 desc_smp_init(desc, node);
94 int nr_irqs = NR_IRQS;
95 EXPORT_SYMBOL_GPL(nr_irqs);
97 static DEFINE_MUTEX(sparse_irq_lock);
98 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
100 #ifdef CONFIG_SPARSE_IRQ
102 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
104 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
106 radix_tree_insert(&irq_desc_tree, irq, desc);
109 struct irq_desc *irq_to_desc(unsigned int irq)
111 return radix_tree_lookup(&irq_desc_tree, irq);
114 static void delete_irq_desc(unsigned int irq)
116 radix_tree_delete(&irq_desc_tree, irq);
119 #ifdef CONFIG_SMP
120 static void free_masks(struct irq_desc *desc)
122 #ifdef CONFIG_GENERIC_PENDING_IRQ
123 free_cpumask_var(desc->pending_mask);
124 #endif
125 free_cpumask_var(desc->irq_data.affinity);
127 #else
128 static inline void free_masks(struct irq_desc *desc) { }
129 #endif
131 static struct irq_desc *alloc_desc(int irq, int node)
133 struct irq_desc *desc;
134 gfp_t gfp = GFP_KERNEL;
136 desc = kzalloc_node(sizeof(*desc), gfp, node);
137 if (!desc)
138 return NULL;
139 /* allocate based on nr_cpu_ids */
140 desc->kstat_irqs = alloc_percpu(unsigned int);
141 if (!desc->kstat_irqs)
142 goto err_desc;
144 if (alloc_masks(desc, gfp, node))
145 goto err_kstat;
147 raw_spin_lock_init(&desc->lock);
148 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
150 desc_set_defaults(irq, desc, node);
152 return desc;
154 err_kstat:
155 free_percpu(desc->kstat_irqs);
156 err_desc:
157 kfree(desc);
158 return NULL;
161 static void free_desc(unsigned int irq)
163 struct irq_desc *desc = irq_to_desc(irq);
165 unregister_irq_proc(irq, desc);
167 mutex_lock(&sparse_irq_lock);
168 delete_irq_desc(irq);
169 mutex_unlock(&sparse_irq_lock);
171 free_masks(desc);
172 free_percpu(desc->kstat_irqs);
173 kfree(desc);
176 static int alloc_descs(unsigned int start, unsigned int cnt, int node)
178 struct irq_desc *desc;
179 int i;
181 for (i = 0; i < cnt; i++) {
182 desc = alloc_desc(start + i, node);
183 if (!desc)
184 goto err;
185 mutex_lock(&sparse_irq_lock);
186 irq_insert_desc(start + i, desc);
187 mutex_unlock(&sparse_irq_lock);
189 return start;
191 err:
192 for (i--; i >= 0; i--)
193 free_desc(start + i);
195 mutex_lock(&sparse_irq_lock);
196 bitmap_clear(allocated_irqs, start, cnt);
197 mutex_unlock(&sparse_irq_lock);
198 return -ENOMEM;
201 static int irq_expand_nr_irqs(unsigned int nr)
203 if (nr > IRQ_BITMAP_BITS)
204 return -ENOMEM;
205 nr_irqs = nr;
206 return 0;
209 int __init early_irq_init(void)
211 int i, initcnt, node = first_online_node;
212 struct irq_desc *desc;
214 init_irq_default_affinity();
216 /* Let arch update nr_irqs and return the nr of preallocated irqs */
217 initcnt = arch_probe_nr_irqs();
218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
220 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
221 nr_irqs = IRQ_BITMAP_BITS;
223 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
224 initcnt = IRQ_BITMAP_BITS;
226 if (initcnt > nr_irqs)
227 nr_irqs = initcnt;
229 for (i = 0; i < initcnt; i++) {
230 desc = alloc_desc(i, node);
231 set_bit(i, allocated_irqs);
232 irq_insert_desc(i, desc);
234 return arch_early_irq_init();
237 #else /* !CONFIG_SPARSE_IRQ */
239 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
240 [0 ... NR_IRQS-1] = {
241 .handle_irq = handle_bad_irq,
242 .depth = 1,
243 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
247 int __init early_irq_init(void)
249 int count, i, node = first_online_node;
250 struct irq_desc *desc;
252 init_irq_default_affinity();
254 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
256 desc = irq_desc;
257 count = ARRAY_SIZE(irq_desc);
259 for (i = 0; i < count; i++) {
260 desc[i].irq_data.irq = i;
261 desc[i].irq_data.chip = &no_irq_chip;
262 desc[i].kstat_irqs = alloc_percpu(unsigned int);
263 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
264 alloc_masks(desc + i, GFP_KERNEL, node);
265 desc_smp_init(desc + i, node);
266 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
268 return arch_early_irq_init();
271 struct irq_desc *irq_to_desc(unsigned int irq)
273 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
276 static void free_desc(unsigned int irq)
278 dynamic_irq_cleanup(irq);
281 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
283 return start;
286 static int irq_expand_nr_irqs(unsigned int nr)
288 return -ENOMEM;
291 #endif /* !CONFIG_SPARSE_IRQ */
294 * generic_handle_irq - Invoke the handler for a particular irq
295 * @irq: The irq number to handle
298 int generic_handle_irq(unsigned int irq)
300 struct irq_desc *desc = irq_to_desc(irq);
302 if (!desc)
303 return -EINVAL;
304 generic_handle_irq_desc(irq, desc);
305 return 0;
307 EXPORT_SYMBOL_GPL(generic_handle_irq);
309 /* Dynamic interrupt handling */
312 * irq_free_descs - free irq descriptors
313 * @from: Start of descriptor range
314 * @cnt: Number of consecutive irqs to free
316 void irq_free_descs(unsigned int from, unsigned int cnt)
318 int i;
320 if (from >= nr_irqs || (from + cnt) > nr_irqs)
321 return;
323 for (i = 0; i < cnt; i++)
324 free_desc(from + i);
326 mutex_lock(&sparse_irq_lock);
327 bitmap_clear(allocated_irqs, from, cnt);
328 mutex_unlock(&sparse_irq_lock);
330 EXPORT_SYMBOL_GPL(irq_free_descs);
333 * irq_alloc_descs - allocate and initialize a range of irq descriptors
334 * @irq: Allocate for specific irq number if irq >= 0
335 * @from: Start the search from this irq number
336 * @cnt: Number of consecutive irqs to allocate.
337 * @node: Preferred node on which the irq descriptor should be allocated
339 * Returns the first irq number or error code
341 int __ref
342 irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
344 int start, ret;
346 if (!cnt)
347 return -EINVAL;
349 mutex_lock(&sparse_irq_lock);
351 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
352 from, cnt, 0);
353 ret = -EEXIST;
354 if (irq >=0 && start != irq)
355 goto err;
357 if (start + cnt > nr_irqs) {
358 ret = irq_expand_nr_irqs(start + cnt);
359 if (ret)
360 goto err;
363 bitmap_set(allocated_irqs, start, cnt);
364 mutex_unlock(&sparse_irq_lock);
365 return alloc_descs(start, cnt, node);
367 err:
368 mutex_unlock(&sparse_irq_lock);
369 return ret;
371 EXPORT_SYMBOL_GPL(irq_alloc_descs);
374 * irq_reserve_irqs - mark irqs allocated
375 * @from: mark from irq number
376 * @cnt: number of irqs to mark
378 * Returns 0 on success or an appropriate error code
380 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
382 unsigned int start;
383 int ret = 0;
385 if (!cnt || (from + cnt) > nr_irqs)
386 return -EINVAL;
388 mutex_lock(&sparse_irq_lock);
389 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
390 if (start == from)
391 bitmap_set(allocated_irqs, start, cnt);
392 else
393 ret = -EEXIST;
394 mutex_unlock(&sparse_irq_lock);
395 return ret;
399 * irq_get_next_irq - get next allocated irq number
400 * @offset: where to start the search
402 * Returns next irq number after offset or nr_irqs if none is found.
404 unsigned int irq_get_next_irq(unsigned int offset)
406 return find_next_bit(allocated_irqs, nr_irqs, offset);
409 struct irq_desc *
410 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
412 struct irq_desc *desc = irq_to_desc(irq);
414 if (desc) {
415 if (bus)
416 chip_bus_lock(desc);
417 raw_spin_lock_irqsave(&desc->lock, *flags);
419 return desc;
422 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
424 raw_spin_unlock_irqrestore(&desc->lock, flags);
425 if (bus)
426 chip_bus_sync_unlock(desc);
430 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
431 * @irq: irq number to initialize
433 void dynamic_irq_cleanup(unsigned int irq)
435 struct irq_desc *desc = irq_to_desc(irq);
436 unsigned long flags;
438 raw_spin_lock_irqsave(&desc->lock, flags);
439 desc_set_defaults(irq, desc, desc_node(desc));
440 raw_spin_unlock_irqrestore(&desc->lock, flags);
443 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
445 struct irq_desc *desc = irq_to_desc(irq);
447 return desc && desc->kstat_irqs ?
448 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
451 unsigned int kstat_irqs(unsigned int irq)
453 struct irq_desc *desc = irq_to_desc(irq);
454 int cpu;
455 int sum = 0;
457 if (!desc || !desc->kstat_irqs)
458 return 0;
459 for_each_possible_cpu(cpu)
460 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
461 return sum;