do not preallocate cor_connid_reuse_item
[cor.git] / kernel / irq / cpuhotplug.c
blob6c7ca2e983a595ff561396e3d3b9400072426c56
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic cpu hotunplug interrupt migration code copied from the
4 * arch/arm implementation
6 * Copyright (C) Russell King
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/interrupt.h>
13 #include <linux/ratelimit.h>
14 #include <linux/irq.h>
16 #include "internals.h"
18 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
19 static inline bool irq_needs_fixup(struct irq_data *d)
21 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
22 unsigned int cpu = smp_processor_id();
24 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
26 * The cpumask_empty() check is a workaround for interrupt chips,
27 * which do not implement effective affinity, but the architecture has
28 * enabled the config switch. Use the general affinity mask instead.
30 if (cpumask_empty(m))
31 m = irq_data_get_affinity_mask(d);
34 * Sanity check. If the mask is not empty when excluding the outgoing
35 * CPU then it must contain at least one online CPU. The outgoing CPU
36 * has been removed from the online mask already.
38 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
39 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
41 * If this happens then there was a missed IRQ fixup at some
42 * point. Warn about it and enforce fixup.
44 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
45 cpumask_pr_args(m), d->irq, cpu);
46 return true;
48 #endif
49 return cpumask_test_cpu(cpu, m);
52 static bool migrate_one_irq(struct irq_desc *desc)
54 struct irq_data *d = irq_desc_get_irq_data(desc);
55 struct irq_chip *chip = irq_data_get_irq_chip(d);
56 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
57 const struct cpumask *affinity;
58 bool brokeaff = false;
59 int err;
62 * IRQ chip might be already torn down, but the irq descriptor is
63 * still in the radix tree. Also if the chip has no affinity setter,
64 * nothing can be done here.
66 if (!chip || !chip->irq_set_affinity) {
67 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
68 return false;
72 * No move required, if:
73 * - Interrupt is per cpu
74 * - Interrupt is not started
75 * - Affinity mask does not include this CPU.
77 * Note: Do not check desc->action as this might be a chained
78 * interrupt.
80 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
82 * If an irq move is pending, abort it if the dying CPU is
83 * the sole target.
85 irq_fixup_move_pending(desc, false);
86 return false;
90 * Complete an eventually pending irq move cleanup. If this
91 * interrupt was moved in hard irq context, then the vectors need
92 * to be cleaned up. It can't wait until this interrupt actually
93 * happens and this CPU was involved.
95 irq_force_complete_move(desc);
98 * If there is a setaffinity pending, then try to reuse the pending
99 * mask, so the last change of the affinity does not get lost. If
100 * there is no move pending or the pending mask does not contain
101 * any online CPU, use the current affinity mask.
103 if (irq_fixup_move_pending(desc, true))
104 affinity = irq_desc_get_pending_mask(desc);
105 else
106 affinity = irq_data_get_affinity_mask(d);
108 /* Mask the chip for interrupts which cannot move in process context */
109 if (maskchip && chip->irq_mask)
110 chip->irq_mask(d);
112 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
114 * If the interrupt is managed, then shut it down and leave
115 * the affinity untouched.
117 if (irqd_affinity_is_managed(d)) {
118 irqd_set_managed_shutdown(d);
119 irq_shutdown_and_deactivate(desc);
120 return false;
122 affinity = cpu_online_mask;
123 brokeaff = true;
126 * Do not set the force argument of irq_do_set_affinity() as this
127 * disables the masking of offline CPUs from the supplied affinity
128 * mask and therefore might keep/reassign the irq to the outgoing
129 * CPU.
131 err = irq_do_set_affinity(d, affinity, false);
132 if (err) {
133 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
134 d->irq, err);
135 brokeaff = false;
138 if (maskchip && chip->irq_unmask)
139 chip->irq_unmask(d);
141 return brokeaff;
145 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
147 * The current CPU has been marked offline. Migrate IRQs off this CPU.
148 * If the affinity settings do not allow other CPUs, force them onto any
149 * available CPU.
151 * Note: we must iterate over all IRQs, whether they have an attached
152 * action structure or not, as we need to get chained interrupts too.
154 void irq_migrate_all_off_this_cpu(void)
156 struct irq_desc *desc;
157 unsigned int irq;
159 for_each_active_irq(irq) {
160 bool affinity_broken;
162 desc = irq_to_desc(irq);
163 raw_spin_lock(&desc->lock);
164 affinity_broken = migrate_one_irq(desc);
165 raw_spin_unlock(&desc->lock);
167 if (affinity_broken) {
168 pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
169 irq, smp_processor_id());
174 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
176 struct irq_data *data = irq_desc_get_irq_data(desc);
177 const struct cpumask *affinity = irq_data_get_affinity_mask(data);
179 if (!irqd_affinity_is_managed(data) || !desc->action ||
180 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
181 return;
183 if (irqd_is_managed_and_shutdown(data)) {
184 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
185 return;
189 * If the interrupt can only be directed to a single target
190 * CPU then it is already assigned to a CPU in the affinity
191 * mask. No point in trying to move it around.
193 if (!irqd_is_single_target(data))
194 irq_set_affinity_locked(data, affinity, false);
198 * irq_affinity_online_cpu - Restore affinity for managed interrupts
199 * @cpu: Upcoming CPU for which interrupts should be restored
201 int irq_affinity_online_cpu(unsigned int cpu)
203 struct irq_desc *desc;
204 unsigned int irq;
206 irq_lock_sparse();
207 for_each_active_irq(irq) {
208 desc = irq_to_desc(irq);
209 raw_spin_lock_irq(&desc->lock);
210 irq_restore_affinity_of_irq(desc, cpu);
211 raw_spin_unlock_irq(&desc->lock);
213 irq_unlock_sparse();
215 return 0;