2 * linux/kernel/irq/chip.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code, for irq-chip
10 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
19 #include <trace/events/irq.h>
21 #include "internals.h"
24 * irq_set_chip - set the irq chip for an irq
26 * @chip: pointer to irq chip description structure
28 int irq_set_chip(unsigned int irq
, struct irq_chip
*chip
)
31 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
39 desc
->irq_data
.chip
= chip
;
40 irq_put_desc_unlock(desc
, flags
);
42 * For !CONFIG_SPARSE_IRQ make the irq show up in
43 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
44 * already marked, and this call is harmless.
49 EXPORT_SYMBOL(irq_set_chip
);
52 * irq_set_type - set the irq trigger type for an irq
54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
56 int irq_set_irq_type(unsigned int irq
, unsigned int type
)
59 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
65 type
&= IRQ_TYPE_SENSE_MASK
;
66 ret
= __irq_set_trigger(desc
, irq
, type
);
67 irq_put_desc_busunlock(desc
, flags
);
70 EXPORT_SYMBOL(irq_set_irq_type
);
73 * irq_set_handler_data - set irq handler data for an irq
74 * @irq: Interrupt number
75 * @data: Pointer to interrupt specific data
77 * Set the hardware irq controller data for an irq
79 int irq_set_handler_data(unsigned int irq
, void *data
)
82 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
86 desc
->irq_data
.handler_data
= data
;
87 irq_put_desc_unlock(desc
, flags
);
90 EXPORT_SYMBOL(irq_set_handler_data
);
93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
94 * @irq_base: Interrupt number base
95 * @irq_offset: Interrupt number offset
96 * @entry: Pointer to MSI descriptor data
98 * Set the MSI descriptor entry for an irq at offset
100 int irq_set_msi_desc_off(unsigned int irq_base
, unsigned int irq_offset
,
101 struct msi_desc
*entry
)
104 struct irq_desc
*desc
= irq_get_desc_lock(irq_base
+ irq_offset
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
108 desc
->irq_data
.msi_desc
= entry
;
109 if (entry
&& !irq_offset
)
110 entry
->irq
= irq_base
;
111 irq_put_desc_unlock(desc
, flags
);
116 * irq_set_msi_desc - set MSI descriptor data for an irq
117 * @irq: Interrupt number
118 * @entry: Pointer to MSI descriptor data
120 * Set the MSI descriptor entry for an irq
122 int irq_set_msi_desc(unsigned int irq
, struct msi_desc
*entry
)
124 return irq_set_msi_desc_off(irq
, 0, entry
);
128 * irq_set_chip_data - set irq chip data for an irq
129 * @irq: Interrupt number
130 * @data: Pointer to chip specific data
132 * Set the hardware irq chip data for an irq
134 int irq_set_chip_data(unsigned int irq
, void *data
)
137 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
141 desc
->irq_data
.chip_data
= data
;
142 irq_put_desc_unlock(desc
, flags
);
145 EXPORT_SYMBOL(irq_set_chip_data
);
147 struct irq_data
*irq_get_irq_data(unsigned int irq
)
149 struct irq_desc
*desc
= irq_to_desc(irq
);
151 return desc
? &desc
->irq_data
: NULL
;
153 EXPORT_SYMBOL_GPL(irq_get_irq_data
);
155 static void irq_state_clr_disabled(struct irq_desc
*desc
)
157 irqd_clear(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
160 static void irq_state_set_disabled(struct irq_desc
*desc
)
162 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
165 static void irq_state_clr_masked(struct irq_desc
*desc
)
167 irqd_clear(&desc
->irq_data
, IRQD_IRQ_MASKED
);
170 static void irq_state_set_masked(struct irq_desc
*desc
)
172 irqd_set(&desc
->irq_data
, IRQD_IRQ_MASKED
);
175 int irq_startup(struct irq_desc
*desc
, bool resend
)
179 irq_state_clr_disabled(desc
);
182 if (desc
->irq_data
.chip
->irq_startup
) {
183 ret
= desc
->irq_data
.chip
->irq_startup(&desc
->irq_data
);
184 irq_state_clr_masked(desc
);
189 check_irq_resend(desc
, desc
->irq_data
.irq
);
193 void irq_shutdown(struct irq_desc
*desc
)
195 irq_state_set_disabled(desc
);
197 if (desc
->irq_data
.chip
->irq_shutdown
)
198 desc
->irq_data
.chip
->irq_shutdown(&desc
->irq_data
);
199 else if (desc
->irq_data
.chip
->irq_disable
)
200 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
202 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
203 irq_state_set_masked(desc
);
206 void irq_enable(struct irq_desc
*desc
)
208 irq_state_clr_disabled(desc
);
209 if (desc
->irq_data
.chip
->irq_enable
)
210 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
212 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
213 irq_state_clr_masked(desc
);
217 * irq_disable - Mark interrupt disabled
218 * @desc: irq descriptor which should be disabled
220 * If the chip does not implement the irq_disable callback, we
221 * use a lazy disable approach. That means we mark the interrupt
222 * disabled, but leave the hardware unmasked. That's an
223 * optimization because we avoid the hardware access for the
224 * common case where no interrupt happens after we marked it
225 * disabled. If an interrupt happens, then the interrupt flow
226 * handler masks the line at the hardware level and marks it
229 void irq_disable(struct irq_desc
*desc
)
231 irq_state_set_disabled(desc
);
232 if (desc
->irq_data
.chip
->irq_disable
) {
233 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
234 irq_state_set_masked(desc
);
238 void irq_percpu_enable(struct irq_desc
*desc
, unsigned int cpu
)
240 if (desc
->irq_data
.chip
->irq_enable
)
241 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
243 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
244 cpumask_set_cpu(cpu
, desc
->percpu_enabled
);
247 void irq_percpu_disable(struct irq_desc
*desc
, unsigned int cpu
)
249 if (desc
->irq_data
.chip
->irq_disable
)
250 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
252 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
253 cpumask_clear_cpu(cpu
, desc
->percpu_enabled
);
256 static inline void mask_ack_irq(struct irq_desc
*desc
)
258 if (desc
->irq_data
.chip
->irq_mask_ack
)
259 desc
->irq_data
.chip
->irq_mask_ack(&desc
->irq_data
);
261 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
262 if (desc
->irq_data
.chip
->irq_ack
)
263 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
265 irq_state_set_masked(desc
);
268 void mask_irq(struct irq_desc
*desc
)
270 if (desc
->irq_data
.chip
->irq_mask
) {
271 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
272 irq_state_set_masked(desc
);
276 void unmask_irq(struct irq_desc
*desc
)
278 if (desc
->irq_data
.chip
->irq_unmask
) {
279 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
280 irq_state_clr_masked(desc
);
284 void unmask_threaded_irq(struct irq_desc
*desc
)
286 struct irq_chip
*chip
= desc
->irq_data
.chip
;
288 if (chip
->flags
& IRQCHIP_EOI_THREADED
)
289 chip
->irq_eoi(&desc
->irq_data
);
291 if (chip
->irq_unmask
) {
292 chip
->irq_unmask(&desc
->irq_data
);
293 irq_state_clr_masked(desc
);
298 * handle_nested_irq - Handle a nested irq from a irq thread
299 * @irq: the interrupt number
301 * Handle interrupts which are nested into a threaded interrupt
302 * handler. The handler function is called inside the calling
305 void handle_nested_irq(unsigned int irq
)
307 struct irq_desc
*desc
= irq_to_desc(irq
);
308 struct irqaction
*action
;
309 irqreturn_t action_ret
;
313 raw_spin_lock_irq(&desc
->lock
);
315 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
316 kstat_incr_irqs_this_cpu(irq
, desc
);
318 action
= desc
->action
;
319 if (unlikely(!action
|| irqd_irq_disabled(&desc
->irq_data
))) {
320 desc
->istate
|= IRQS_PENDING
;
324 irqd_set(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
325 raw_spin_unlock_irq(&desc
->lock
);
327 action_ret
= action
->thread_fn(action
->irq
, action
->dev_id
);
329 note_interrupt(irq
, desc
, action_ret
);
331 raw_spin_lock_irq(&desc
->lock
);
332 irqd_clear(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
335 raw_spin_unlock_irq(&desc
->lock
);
337 EXPORT_SYMBOL_GPL(handle_nested_irq
);
339 static bool irq_check_poll(struct irq_desc
*desc
)
341 if (!(desc
->istate
& IRQS_POLL_INPROGRESS
))
343 return irq_wait_for_poll(desc
);
347 * handle_simple_irq - Simple and software-decoded IRQs.
348 * @irq: the interrupt number
349 * @desc: the interrupt description structure for this irq
351 * Simple interrupts are either sent from a demultiplexing interrupt
352 * handler or come from hardware, where no interrupt hardware control
355 * Note: The caller is expected to handle the ack, clear, mask and
356 * unmask issues if necessary.
359 handle_simple_irq(unsigned int irq
, struct irq_desc
*desc
)
361 raw_spin_lock(&desc
->lock
);
363 if (unlikely(irqd_irq_inprogress(&desc
->irq_data
)))
364 if (!irq_check_poll(desc
))
367 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
368 kstat_incr_irqs_this_cpu(irq
, desc
);
370 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
371 desc
->istate
|= IRQS_PENDING
;
375 handle_irq_event(desc
);
378 raw_spin_unlock(&desc
->lock
);
380 EXPORT_SYMBOL_GPL(handle_simple_irq
);
383 * Called unconditionally from handle_level_irq() and only for oneshot
384 * interrupts from handle_fasteoi_irq()
386 static void cond_unmask_irq(struct irq_desc
*desc
)
389 * We need to unmask in the following cases:
390 * - Standard level irq (IRQF_ONESHOT is not set)
391 * - Oneshot irq which did not wake the thread (caused by a
392 * spurious interrupt or a primary handler handling it
395 if (!irqd_irq_disabled(&desc
->irq_data
) &&
396 irqd_irq_masked(&desc
->irq_data
) && !desc
->threads_oneshot
)
401 * handle_level_irq - Level type irq handler
402 * @irq: the interrupt number
403 * @desc: the interrupt description structure for this irq
405 * Level type interrupts are active as long as the hardware line has
406 * the active level. This may require to mask the interrupt and unmask
407 * it after the associated handler has acknowledged the device, so the
408 * interrupt line is back to inactive.
411 handle_level_irq(unsigned int irq
, struct irq_desc
*desc
)
413 raw_spin_lock(&desc
->lock
);
416 if (unlikely(irqd_irq_inprogress(&desc
->irq_data
)))
417 if (!irq_check_poll(desc
))
420 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
421 kstat_incr_irqs_this_cpu(irq
, desc
);
424 * If its disabled or no action available
425 * keep it masked and get out of here
427 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
428 desc
->istate
|= IRQS_PENDING
;
432 handle_irq_event(desc
);
434 cond_unmask_irq(desc
);
437 raw_spin_unlock(&desc
->lock
);
439 EXPORT_SYMBOL_GPL(handle_level_irq
);
441 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
442 static inline void preflow_handler(struct irq_desc
*desc
)
444 if (desc
->preflow_handler
)
445 desc
->preflow_handler(&desc
->irq_data
);
448 static inline void preflow_handler(struct irq_desc
*desc
) { }
451 static void cond_unmask_eoi_irq(struct irq_desc
*desc
, struct irq_chip
*chip
)
453 if (!(desc
->istate
& IRQS_ONESHOT
)) {
454 chip
->irq_eoi(&desc
->irq_data
);
458 * We need to unmask in the following cases:
459 * - Oneshot irq which did not wake the thread (caused by a
460 * spurious interrupt or a primary handler handling it
463 if (!irqd_irq_disabled(&desc
->irq_data
) &&
464 irqd_irq_masked(&desc
->irq_data
) && !desc
->threads_oneshot
) {
465 chip
->irq_eoi(&desc
->irq_data
);
467 } else if (!(chip
->flags
& IRQCHIP_EOI_THREADED
)) {
468 chip
->irq_eoi(&desc
->irq_data
);
473 * handle_fasteoi_irq - irq handler for transparent controllers
474 * @irq: the interrupt number
475 * @desc: the interrupt description structure for this irq
477 * Only a single callback will be issued to the chip: an ->eoi()
478 * call when the interrupt has been serviced. This enables support
479 * for modern forms of interrupt handlers, which handle the flow
480 * details in hardware, transparently.
483 handle_fasteoi_irq(unsigned int irq
, struct irq_desc
*desc
)
485 struct irq_chip
*chip
= desc
->irq_data
.chip
;
487 raw_spin_lock(&desc
->lock
);
489 if (unlikely(irqd_irq_inprogress(&desc
->irq_data
)))
490 if (!irq_check_poll(desc
))
493 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
494 kstat_incr_irqs_this_cpu(irq
, desc
);
497 * If its disabled or no action available
498 * then mask it and get out of here:
500 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
501 desc
->istate
|= IRQS_PENDING
;
506 if (desc
->istate
& IRQS_ONESHOT
)
509 preflow_handler(desc
);
510 handle_irq_event(desc
);
512 cond_unmask_eoi_irq(desc
, chip
);
514 raw_spin_unlock(&desc
->lock
);
517 if (!(chip
->flags
& IRQCHIP_EOI_IF_HANDLED
))
518 chip
->irq_eoi(&desc
->irq_data
);
519 raw_spin_unlock(&desc
->lock
);
523 * handle_edge_irq - edge type IRQ handler
524 * @irq: the interrupt number
525 * @desc: the interrupt description structure for this irq
527 * Interrupt occures on the falling and/or rising edge of a hardware
528 * signal. The occurrence is latched into the irq controller hardware
529 * and must be acked in order to be reenabled. After the ack another
530 * interrupt can happen on the same source even before the first one
531 * is handled by the associated event handler. If this happens it
532 * might be necessary to disable (mask) the interrupt depending on the
533 * controller hardware. This requires to reenable the interrupt inside
534 * of the loop which handles the interrupts which have arrived while
535 * the handler was running. If all pending interrupts are handled, the
539 handle_edge_irq(unsigned int irq
, struct irq_desc
*desc
)
541 raw_spin_lock(&desc
->lock
);
543 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
545 * If we're currently running this IRQ, or its disabled,
546 * we shouldn't process the IRQ. Mark it pending, handle
547 * the necessary masking and go out
549 if (unlikely(irqd_irq_disabled(&desc
->irq_data
) ||
550 irqd_irq_inprogress(&desc
->irq_data
) || !desc
->action
)) {
551 if (!irq_check_poll(desc
)) {
552 desc
->istate
|= IRQS_PENDING
;
557 kstat_incr_irqs_this_cpu(irq
, desc
);
559 /* Start handling the irq */
560 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
563 if (unlikely(!desc
->action
)) {
569 * When another irq arrived while we were handling
570 * one, we could have masked the irq.
571 * Renable it, if it was not disabled in meantime.
573 if (unlikely(desc
->istate
& IRQS_PENDING
)) {
574 if (!irqd_irq_disabled(&desc
->irq_data
) &&
575 irqd_irq_masked(&desc
->irq_data
))
579 handle_irq_event(desc
);
581 } while ((desc
->istate
& IRQS_PENDING
) &&
582 !irqd_irq_disabled(&desc
->irq_data
));
585 raw_spin_unlock(&desc
->lock
);
587 EXPORT_SYMBOL(handle_edge_irq
);
589 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
591 * handle_edge_eoi_irq - edge eoi type IRQ handler
592 * @irq: the interrupt number
593 * @desc: the interrupt description structure for this irq
595 * Similar as the above handle_edge_irq, but using eoi and w/o the
598 void handle_edge_eoi_irq(unsigned int irq
, struct irq_desc
*desc
)
600 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
602 raw_spin_lock(&desc
->lock
);
604 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
606 * If we're currently running this IRQ, or its disabled,
607 * we shouldn't process the IRQ. Mark it pending, handle
608 * the necessary masking and go out
610 if (unlikely(irqd_irq_disabled(&desc
->irq_data
) ||
611 irqd_irq_inprogress(&desc
->irq_data
) || !desc
->action
)) {
612 if (!irq_check_poll(desc
)) {
613 desc
->istate
|= IRQS_PENDING
;
617 kstat_incr_irqs_this_cpu(irq
, desc
);
620 if (unlikely(!desc
->action
))
623 handle_irq_event(desc
);
625 } while ((desc
->istate
& IRQS_PENDING
) &&
626 !irqd_irq_disabled(&desc
->irq_data
));
629 chip
->irq_eoi(&desc
->irq_data
);
630 raw_spin_unlock(&desc
->lock
);
635 * handle_percpu_irq - Per CPU local irq handler
636 * @irq: the interrupt number
637 * @desc: the interrupt description structure for this irq
639 * Per CPU interrupts on SMP machines without locking requirements
642 handle_percpu_irq(unsigned int irq
, struct irq_desc
*desc
)
644 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
646 kstat_incr_irqs_this_cpu(irq
, desc
);
649 chip
->irq_ack(&desc
->irq_data
);
651 handle_irq_event_percpu(desc
, desc
->action
);
654 chip
->irq_eoi(&desc
->irq_data
);
658 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
659 * @irq: the interrupt number
660 * @desc: the interrupt description structure for this irq
662 * Per CPU interrupts on SMP machines without locking requirements. Same as
663 * handle_percpu_irq() above but with the following extras:
665 * action->percpu_dev_id is a pointer to percpu variables which
666 * contain the real device id for the cpu on which this handler is
669 void handle_percpu_devid_irq(unsigned int irq
, struct irq_desc
*desc
)
671 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
672 struct irqaction
*action
= desc
->action
;
673 void *dev_id
= __this_cpu_ptr(action
->percpu_dev_id
);
676 kstat_incr_irqs_this_cpu(irq
, desc
);
679 chip
->irq_ack(&desc
->irq_data
);
681 trace_irq_handler_entry(irq
, action
);
682 res
= action
->handler(irq
, dev_id
);
683 trace_irq_handler_exit(irq
, action
, res
);
686 chip
->irq_eoi(&desc
->irq_data
);
690 __irq_set_handler(unsigned int irq
, irq_flow_handler_t handle
, int is_chained
,
694 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, 0);
700 handle
= handle_bad_irq
;
702 if (WARN_ON(desc
->irq_data
.chip
== &no_irq_chip
))
707 if (handle
== handle_bad_irq
) {
708 if (desc
->irq_data
.chip
!= &no_irq_chip
)
710 irq_state_set_disabled(desc
);
713 desc
->handle_irq
= handle
;
716 if (handle
!= handle_bad_irq
&& is_chained
) {
717 irq_settings_set_noprobe(desc
);
718 irq_settings_set_norequest(desc
);
719 irq_settings_set_nothread(desc
);
720 irq_startup(desc
, true);
723 irq_put_desc_busunlock(desc
, flags
);
725 EXPORT_SYMBOL_GPL(__irq_set_handler
);
728 irq_set_chip_and_handler_name(unsigned int irq
, struct irq_chip
*chip
,
729 irq_flow_handler_t handle
, const char *name
)
731 irq_set_chip(irq
, chip
);
732 __irq_set_handler(irq
, handle
, 0, name
);
734 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name
);
736 void irq_modify_status(unsigned int irq
, unsigned long clr
, unsigned long set
)
739 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
743 irq_settings_clr_and_set(desc
, clr
, set
);
745 irqd_clear(&desc
->irq_data
, IRQD_NO_BALANCING
| IRQD_PER_CPU
|
746 IRQD_TRIGGER_MASK
| IRQD_LEVEL
| IRQD_MOVE_PCNTXT
);
747 if (irq_settings_has_no_balance_set(desc
))
748 irqd_set(&desc
->irq_data
, IRQD_NO_BALANCING
);
749 if (irq_settings_is_per_cpu(desc
))
750 irqd_set(&desc
->irq_data
, IRQD_PER_CPU
);
751 if (irq_settings_can_move_pcntxt(desc
))
752 irqd_set(&desc
->irq_data
, IRQD_MOVE_PCNTXT
);
753 if (irq_settings_is_level(desc
))
754 irqd_set(&desc
->irq_data
, IRQD_LEVEL
);
756 irqd_set(&desc
->irq_data
, irq_settings_get_trigger_mask(desc
));
758 irq_put_desc_unlock(desc
, flags
);
760 EXPORT_SYMBOL_GPL(irq_modify_status
);
763 * irq_cpu_online - Invoke all irq_cpu_online functions.
765 * Iterate through all irqs and invoke the chip.irq_cpu_online()
768 void irq_cpu_online(void)
770 struct irq_desc
*desc
;
771 struct irq_chip
*chip
;
775 for_each_active_irq(irq
) {
776 desc
= irq_to_desc(irq
);
780 raw_spin_lock_irqsave(&desc
->lock
, flags
);
782 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
783 if (chip
&& chip
->irq_cpu_online
&&
784 (!(chip
->flags
& IRQCHIP_ONOFFLINE_ENABLED
) ||
785 !irqd_irq_disabled(&desc
->irq_data
)))
786 chip
->irq_cpu_online(&desc
->irq_data
);
788 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
793 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
795 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
798 void irq_cpu_offline(void)
800 struct irq_desc
*desc
;
801 struct irq_chip
*chip
;
805 for_each_active_irq(irq
) {
806 desc
= irq_to_desc(irq
);
810 raw_spin_lock_irqsave(&desc
->lock
, flags
);
812 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
813 if (chip
&& chip
->irq_cpu_offline
&&
814 (!(chip
->flags
& IRQCHIP_ONOFFLINE_ENABLED
) ||
815 !irqd_irq_disabled(&desc
->irq_data
)))
816 chip
->irq_cpu_offline(&desc
->irq_data
);
818 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);