1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 - Cambridge Greys Ltd
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
23 #include <linux/time-internal.h>
26 /* When epoll triggers we do not know why it did so
27 * we can also have different IRQs for read and write.
28 * This is why we keep a small irq_reg array for each fd -
29 * one entry per IRQ type
34 /* it's cheaper to store this than to query it */
39 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
41 void (*timetravel_handler
)(int, int, void *,
42 struct time_travel_event
*);
43 struct time_travel_event event
;
48 struct list_head list
;
50 struct irq_reg reg
[NUM_IRQ_TYPES
];
52 bool sigio_workaround
;
55 static DEFINE_SPINLOCK(irq_lock
);
56 static LIST_HEAD(active_fds
);
57 static DECLARE_BITMAP(irqs_allocated
, UM_LAST_SIGNAL_IRQ
);
58 static bool irqs_suspended
;
59 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
60 static bool irqs_pending
;
63 static void irq_io_loop(struct irq_reg
*irq
, struct uml_pt_regs
*regs
)
66 * irq->active guards against reentry
67 * irq->pending accumulates pending requests
68 * if pending is raised the irq_handler is re-run
69 * until pending is cleared
76 do_IRQ(irq
->irq
, regs
);
77 } while (irq
->pending
);
85 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
86 static void irq_event_handler(struct time_travel_event
*ev
)
88 struct irq_reg
*reg
= container_of(ev
, struct irq_reg
, event
);
90 /* do nothing if suspended; just cause a wakeup and mark as pending */
93 reg
->pending_event
= true;
97 generic_handle_irq(reg
->irq
);
100 static bool irq_do_timetravel_handler(struct irq_entry
*entry
,
103 struct irq_reg
*reg
= &entry
->reg
[t
];
105 if (!reg
->timetravel_handler
)
109 * Handle all messages - we might get multiple even while
110 * interrupts are already suspended, due to suspend order
111 * etc. Note that time_travel_add_irq_event() will not add
112 * an event twice, if it's pending already "first wins".
114 reg
->timetravel_handler(reg
->irq
, entry
->fd
, reg
->id
, ®
->event
);
116 if (!reg
->event
.pending
)
122 static void irq_do_pending_events(bool timetravel_handlers_only
)
124 struct irq_entry
*entry
;
126 if (!irqs_pending
|| timetravel_handlers_only
)
129 irqs_pending
= false;
131 list_for_each_entry(entry
, &active_fds
, list
) {
134 for (t
= 0; t
< NUM_IRQ_TYPES
; t
++) {
135 struct irq_reg
*reg
= &entry
->reg
[t
];
138 * Any timetravel_handler was invoked already, just
139 * directly run the IRQ.
141 if (reg
->pending_event
) {
143 generic_handle_irq(reg
->irq
);
145 reg
->pending_event
= false;
151 static bool irq_do_timetravel_handler(struct irq_entry
*entry
,
157 static void irq_do_pending_events(bool timetravel_handlers_only
)
162 static void sigio_reg_handler(int idx
, struct irq_entry
*entry
, enum um_irq_type t
,
163 struct uml_pt_regs
*regs
,
164 bool timetravel_handlers_only
)
166 struct irq_reg
*reg
= &entry
->reg
[t
];
171 if (os_epoll_triggered(idx
, reg
->events
) <= 0)
174 if (irq_do_timetravel_handler(entry
, t
))
178 * If we're called to only run time-travel handlers then don't
179 * actually proceed but mark sigio as pending (if applicable).
180 * For suspend/resume, timetravel_handlers_only may be true
181 * despite time-travel not being configured and used.
183 if (timetravel_handlers_only
) {
184 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
185 reg
->pending_event
= true;
187 mark_sigio_pending();
192 irq_io_loop(reg
, regs
);
195 static void _sigio_handler(struct uml_pt_regs
*regs
,
196 bool timetravel_handlers_only
)
198 struct irq_entry
*irq_entry
;
201 if (timetravel_handlers_only
&& !um_irq_timetravel_handler_used())
204 /* Flush out pending events that were ignored due to time-travel. */
206 irq_do_pending_events(timetravel_handlers_only
);
209 /* This is now lockless - epoll keeps back-referencesto the irqs
210 * which have trigger it so there is no need to walk the irq
211 * list and lock it every time. We avoid locking by turning off
212 * IO for a specific fd by executing os_del_epoll_fd(fd) before
213 * we do any changes to the actual data structures
215 n
= os_waiting_for_events_epoll();
224 for (i
= 0; i
< n
; i
++) {
227 irq_entry
= os_epoll_get_data_pointer(i
);
229 for (t
= 0; t
< NUM_IRQ_TYPES
; t
++)
230 sigio_reg_handler(i
, irq_entry
, t
, regs
,
231 timetravel_handlers_only
);
235 if (!timetravel_handlers_only
)
239 void sigio_handler(int sig
, struct siginfo
*unused_si
, struct uml_pt_regs
*regs
)
242 _sigio_handler(regs
, irqs_suspended
);
246 static struct irq_entry
*get_irq_entry_by_fd(int fd
)
248 struct irq_entry
*walk
;
250 lockdep_assert_held(&irq_lock
);
252 list_for_each_entry(walk
, &active_fds
, list
) {
260 static void free_irq_entry(struct irq_entry
*to_free
, bool remove
)
266 os_del_epoll_fd(to_free
->fd
);
267 list_del(&to_free
->list
);
271 static bool update_irq_entry(struct irq_entry
*entry
)
276 for (i
= 0; i
< NUM_IRQ_TYPES
; i
++)
277 events
|= entry
->reg
[i
].events
;
280 /* will modify (instead of add) if needed */
281 os_add_epoll_fd(events
, entry
->fd
, entry
);
285 os_del_epoll_fd(entry
->fd
);
289 static void update_or_free_irq_entry(struct irq_entry
*entry
)
291 if (!update_irq_entry(entry
))
292 free_irq_entry(entry
, false);
295 static int activate_fd(int irq
, int fd
, enum um_irq_type type
, void *dev_id
,
296 void (*timetravel_handler
)(int, int, void *,
297 struct time_travel_event
*))
299 struct irq_entry
*irq_entry
;
300 int err
, events
= os_event_mask(type
);
303 err
= os_set_fd_async(fd
);
307 spin_lock_irqsave(&irq_lock
, flags
);
308 irq_entry
= get_irq_entry_by_fd(fd
);
310 /* cannot register the same FD twice with the same type */
311 if (WARN_ON(irq_entry
->reg
[type
].events
)) {
316 /* temporarily disable to avoid IRQ-side locking */
319 irq_entry
= kzalloc(sizeof(*irq_entry
), GFP_ATOMIC
);
325 list_add_tail(&irq_entry
->list
, &active_fds
);
326 maybe_sigio_broken(fd
);
329 irq_entry
->reg
[type
].id
= dev_id
;
330 irq_entry
->reg
[type
].irq
= irq
;
331 irq_entry
->reg
[type
].active
= true;
332 irq_entry
->reg
[type
].events
= events
;
334 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
335 if (um_irq_timetravel_handler_used()) {
336 irq_entry
->reg
[type
].timetravel_handler
= timetravel_handler
;
337 irq_entry
->reg
[type
].event
.fn
= irq_event_handler
;
341 WARN_ON(!update_irq_entry(irq_entry
));
342 spin_unlock_irqrestore(&irq_lock
, flags
);
346 spin_unlock_irqrestore(&irq_lock
, flags
);
352 * Remove the entry or entries for a specific FD, if you
353 * don't want to remove all the possible entries then use
354 * um_free_irq() or deactivate_fd() instead.
356 void free_irq_by_fd(int fd
)
358 struct irq_entry
*to_free
;
361 spin_lock_irqsave(&irq_lock
, flags
);
362 to_free
= get_irq_entry_by_fd(fd
);
363 free_irq_entry(to_free
, true);
364 spin_unlock_irqrestore(&irq_lock
, flags
);
366 EXPORT_SYMBOL(free_irq_by_fd
);
368 static void free_irq_by_irq_and_dev(unsigned int irq
, void *dev
)
370 struct irq_entry
*entry
;
373 spin_lock_irqsave(&irq_lock
, flags
);
374 list_for_each_entry(entry
, &active_fds
, list
) {
377 for (i
= 0; i
< NUM_IRQ_TYPES
; i
++) {
378 struct irq_reg
*reg
= &entry
->reg
[i
];
387 os_del_epoll_fd(entry
->fd
);
389 update_or_free_irq_entry(entry
);
394 spin_unlock_irqrestore(&irq_lock
, flags
);
397 void deactivate_fd(int fd
, int irqnum
)
399 struct irq_entry
*entry
;
405 spin_lock_irqsave(&irq_lock
, flags
);
406 entry
= get_irq_entry_by_fd(fd
);
410 for (i
= 0; i
< NUM_IRQ_TYPES
; i
++) {
411 if (!entry
->reg
[i
].events
)
413 if (entry
->reg
[i
].irq
== irqnum
)
414 entry
->reg
[i
].events
= 0;
417 update_or_free_irq_entry(entry
);
419 spin_unlock_irqrestore(&irq_lock
, flags
);
423 EXPORT_SYMBOL(deactivate_fd
);
426 * Called just before shutdown in order to provide a clean exec
427 * environment in case the system is rebooting. No locking because
428 * that would cause a pointless shutdown hang if something hadn't
431 int deactivate_all_fds(void)
433 struct irq_entry
*entry
;
435 /* Stop IO. The IRQ loop has no lock so this is our
436 * only way of making sure we are safe to dispose
437 * of all IRQ handlers
441 /* we can no longer call kfree() here so just deactivate */
442 list_for_each_entry(entry
, &active_fds
, list
)
443 os_del_epoll_fd(entry
->fd
);
449 * do_IRQ handles all normal device IRQs (the special
450 * SMP cross-CPU interrupts have their own specific
453 unsigned int do_IRQ(int irq
, struct uml_pt_regs
*regs
)
455 struct pt_regs
*old_regs
= set_irq_regs((struct pt_regs
*)regs
);
457 generic_handle_irq(irq
);
459 set_irq_regs(old_regs
);
463 void um_free_irq(int irq
, void *dev
)
465 if (WARN(irq
< 0 || irq
> UM_LAST_SIGNAL_IRQ
,
466 "freeing invalid irq %d", irq
))
469 free_irq_by_irq_and_dev(irq
, dev
);
471 clear_bit(irq
, irqs_allocated
);
473 EXPORT_SYMBOL(um_free_irq
);
476 _um_request_irq(int irq
, int fd
, enum um_irq_type type
,
477 irq_handler_t handler
, unsigned long irqflags
,
478 const char *devname
, void *dev_id
,
479 void (*timetravel_handler
)(int, int, void *,
480 struct time_travel_event
*))
484 if (irq
== UM_IRQ_ALLOC
) {
487 for (i
= UM_FIRST_DYN_IRQ
; i
< NR_IRQS
; i
++) {
488 if (!test_and_set_bit(i
, irqs_allocated
)) {
499 err
= activate_fd(irq
, fd
, type
, dev_id
, timetravel_handler
);
504 err
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
510 clear_bit(irq
, irqs_allocated
);
514 int um_request_irq(int irq
, int fd
, enum um_irq_type type
,
515 irq_handler_t handler
, unsigned long irqflags
,
516 const char *devname
, void *dev_id
)
518 return _um_request_irq(irq
, fd
, type
, handler
, irqflags
,
519 devname
, dev_id
, NULL
);
521 EXPORT_SYMBOL(um_request_irq
);
523 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
524 int um_request_irq_tt(int irq
, int fd
, enum um_irq_type type
,
525 irq_handler_t handler
, unsigned long irqflags
,
526 const char *devname
, void *dev_id
,
527 void (*timetravel_handler
)(int, int, void *,
528 struct time_travel_event
*))
530 return _um_request_irq(irq
, fd
, type
, handler
, irqflags
,
531 devname
, dev_id
, timetravel_handler
);
533 EXPORT_SYMBOL(um_request_irq_tt
);
535 void sigio_run_timetravel_handlers(void)
537 _sigio_handler(NULL
, true);
541 #ifdef CONFIG_PM_SLEEP
542 void um_irqs_suspend(void)
544 struct irq_entry
*entry
;
547 irqs_suspended
= true;
549 spin_lock_irqsave(&irq_lock
, flags
);
550 list_for_each_entry(entry
, &active_fds
, list
) {
554 for (t
= 0; t
< NUM_IRQ_TYPES
; t
++) {
555 if (!entry
->reg
[t
].events
)
559 * For the SIGIO_WRITE_IRQ, which is used to handle the
560 * SIGIO workaround thread, we need special handling:
561 * enable wake for it itself, but below we tell it about
562 * any FDs that should be suspended.
564 if (entry
->reg
[t
].wakeup
||
565 entry
->reg
[t
].irq
== SIGIO_WRITE_IRQ
566 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
567 || entry
->reg
[t
].timetravel_handler
576 entry
->suspended
= true;
577 os_clear_fd_async(entry
->fd
);
578 entry
->sigio_workaround
=
579 !__ignore_sigio_fd(entry
->fd
);
582 spin_unlock_irqrestore(&irq_lock
, flags
);
585 void um_irqs_resume(void)
587 struct irq_entry
*entry
;
591 spin_lock_irqsave(&irq_lock
, flags
);
592 list_for_each_entry(entry
, &active_fds
, list
) {
593 if (entry
->suspended
) {
594 int err
= os_set_fd_async(entry
->fd
);
596 WARN(err
< 0, "os_set_fd_async returned %d\n", err
);
597 entry
->suspended
= false;
599 if (entry
->sigio_workaround
) {
600 err
= __add_sigio_fd(entry
->fd
);
601 WARN(err
< 0, "add_sigio_returned %d\n", err
);
605 spin_unlock_irqrestore(&irq_lock
, flags
);
607 irqs_suspended
= false;
608 send_sigio_to_self();
611 static int normal_irq_set_wake(struct irq_data
*d
, unsigned int on
)
613 struct irq_entry
*entry
;
616 spin_lock_irqsave(&irq_lock
, flags
);
617 list_for_each_entry(entry
, &active_fds
, list
) {
620 for (t
= 0; t
< NUM_IRQ_TYPES
; t
++) {
621 if (!entry
->reg
[t
].events
)
624 if (entry
->reg
[t
].irq
!= d
->irq
)
626 entry
->reg
[t
].wakeup
= on
;
631 spin_unlock_irqrestore(&irq_lock
, flags
);
635 #define normal_irq_set_wake NULL
639 * irq_chip must define at least enable/disable and ack when
640 * the edge handler is used.
642 static void dummy(struct irq_data
*d
)
646 /* This is used for everything other than the timer. */
647 static struct irq_chip normal_irq_type
= {
649 .irq_disable
= dummy
,
654 .irq_set_wake
= normal_irq_set_wake
,
657 static struct irq_chip alarm_irq_type
= {
659 .irq_disable
= dummy
,
666 void __init
init_IRQ(void)
670 irq_set_chip_and_handler(TIMER_IRQ
, &alarm_irq_type
, handle_edge_irq
);
672 for (i
= 1; i
< UM_LAST_SIGNAL_IRQ
; i
++)
673 irq_set_chip_and_handler(i
, &normal_irq_type
, handle_edge_irq
);
674 /* Initialize EPOLL Loop */
679 * IRQ stack entry and exit:
681 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
682 * and switch over to the IRQ stack after some preparation. We use
683 * sigaltstack to receive signals on a separate stack from the start.
684 * These two functions make sure the rest of the kernel won't be too
685 * upset by being on a different stack. The IRQ stack has a
686 * thread_info structure at the bottom so that current et al continue
689 * to_irq_stack copies the current task's thread_info to the IRQ stack
690 * thread_info and sets the tasks's stack to point to the IRQ stack.
692 * from_irq_stack copies the thread_info struct back (flags may have
693 * been modified) and resets the task's stack pointer.
697 * What happens when two signals race each other? UML doesn't block
698 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
699 * could arrive while a previous one is still setting up the
702 * There are three cases -
703 * The first interrupt on the stack - sets up the thread_info and
704 * handles the interrupt
705 * A nested interrupt interrupting the copying of the thread_info -
706 * can't handle the interrupt, as the stack is in an unknown state
707 * A nested interrupt not interrupting the copying of the
708 * thread_info - doesn't do any setup, just handles the interrupt
710 * The first job is to figure out whether we interrupted stack setup.
711 * This is done by xchging the signal mask with thread_info->pending.
712 * If the value that comes back is zero, then there is no setup in
713 * progress, and the interrupt can be handled. If the value is
714 * non-zero, then there is stack setup in progress. In order to have
715 * the interrupt handled, we leave our signal in the mask, and it will
716 * be handled by the upper handler after it has set up the stack.
718 * Next is to figure out whether we are the outer handler or a nested
719 * one. As part of setting up the stack, thread_info->real_thread is
720 * set to non-NULL (and is reset to NULL on exit). This is the
721 * nesting indicator. If it is non-NULL, then the stack is already
722 * set up and the handler can run.
725 static unsigned long pending_mask
;
727 unsigned long to_irq_stack(unsigned long *mask_out
)
729 struct thread_info
*ti
;
730 unsigned long mask
, old
;
733 mask
= xchg(&pending_mask
, *mask_out
);
736 * If any interrupts come in at this point, we want to
737 * make sure that their bits aren't lost by our
738 * putting our bit in. So, this loop accumulates bits
739 * until xchg returns the same value that we put in.
740 * When that happens, there were no new interrupts,
741 * and pending_mask contains a bit for each interrupt
747 mask
= xchg(&pending_mask
, old
);
748 } while (mask
!= old
);
752 ti
= current_thread_info();
753 nested
= (ti
->real_thread
!= NULL
);
755 struct task_struct
*task
;
756 struct thread_info
*tti
;
758 task
= cpu_tasks
[ti
->cpu
].task
;
759 tti
= task_thread_info(task
);
762 ti
->real_thread
= tti
;
766 mask
= xchg(&pending_mask
, 0);
767 *mask_out
|= mask
| nested
;
771 unsigned long from_irq_stack(int nested
)
773 struct thread_info
*ti
, *to
;
776 ti
= current_thread_info();
780 to
= ti
->real_thread
;
782 ti
->real_thread
= NULL
;
785 mask
= xchg(&pending_mask
, 0);