2 * Common SMP CPU bringup/teardown functions
7 #include <linux/delay.h>
8 #include <linux/init.h>
9 #include <linux/list.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task.h>
13 #include <linux/export.h>
14 #include <linux/percpu.h>
15 #include <linux/kthread.h>
16 #include <linux/smpboot.h>
22 #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
24 * For the hotplug case we keep the task structs around and reuse
27 static DEFINE_PER_CPU(struct task_struct
*, idle_threads
);
29 struct task_struct
*idle_thread_get(unsigned int cpu
)
31 struct task_struct
*tsk
= per_cpu(idle_threads
, cpu
);
34 return ERR_PTR(-ENOMEM
);
39 void __init
idle_thread_set_boot_cpu(void)
41 per_cpu(idle_threads
, smp_processor_id()) = current
;
45 * idle_init - Initialize the idle thread for a cpu
46 * @cpu: The cpu for which the idle thread should be initialized
48 * Creates the thread if it does not exist.
50 static inline void idle_init(unsigned int cpu
)
52 struct task_struct
*tsk
= per_cpu(idle_threads
, cpu
);
57 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu
);
59 per_cpu(idle_threads
, cpu
) = tsk
;
64 * idle_threads_init - Initialize idle threads for all cpus
66 void __init
idle_threads_init(void)
68 unsigned int cpu
, boot_cpu
;
70 boot_cpu
= smp_processor_id();
72 for_each_possible_cpu(cpu
) {
79 #endif /* #ifdef CONFIG_SMP */
81 static LIST_HEAD(hotplug_threads
);
82 static DEFINE_MUTEX(smpboot_threads_lock
);
84 struct smpboot_thread_data
{
87 struct smp_hotplug_thread
*ht
;
97 * smpboot_thread_fn - percpu hotplug thread loop function
98 * @data: thread data pointer
100 * Checks for thread stop and park conditions. Calls the necessary
101 * setup, cleanup, park and unpark functions for the registered
104 * Returns 1 when the thread should exit, 0 otherwise.
106 static int smpboot_thread_fn(void *data
)
108 struct smpboot_thread_data
*td
= data
;
109 struct smp_hotplug_thread
*ht
= td
->ht
;
112 set_current_state(TASK_INTERRUPTIBLE
);
114 if (kthread_should_stop()) {
115 __set_current_state(TASK_RUNNING
);
117 /* cleanup must mirror setup */
118 if (ht
->cleanup
&& td
->status
!= HP_THREAD_NONE
)
119 ht
->cleanup(td
->cpu
, cpu_online(td
->cpu
));
124 if (kthread_should_park()) {
125 __set_current_state(TASK_RUNNING
);
127 if (ht
->park
&& td
->status
== HP_THREAD_ACTIVE
) {
128 BUG_ON(td
->cpu
!= smp_processor_id());
130 td
->status
= HP_THREAD_PARKED
;
133 /* We might have been woken for stop */
137 BUG_ON(td
->cpu
!= smp_processor_id());
139 /* Check for state change setup */
140 switch (td
->status
) {
142 __set_current_state(TASK_RUNNING
);
146 td
->status
= HP_THREAD_ACTIVE
;
149 case HP_THREAD_PARKED
:
150 __set_current_state(TASK_RUNNING
);
154 td
->status
= HP_THREAD_ACTIVE
;
158 if (!ht
->thread_should_run(td
->cpu
)) {
159 preempt_enable_no_resched();
162 __set_current_state(TASK_RUNNING
);
164 ht
->thread_fn(td
->cpu
);
170 __smpboot_create_thread(struct smp_hotplug_thread
*ht
, unsigned int cpu
)
172 struct task_struct
*tsk
= *per_cpu_ptr(ht
->store
, cpu
);
173 struct smpboot_thread_data
*td
;
178 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, cpu_to_node(cpu
));
184 tsk
= kthread_create_on_cpu(smpboot_thread_fn
, td
, cpu
,
191 * Park the thread so that it could start right on the CPU
192 * when it is available.
195 get_task_struct(tsk
);
196 *per_cpu_ptr(ht
->store
, cpu
) = tsk
;
199 * Make sure that the task has actually scheduled out
200 * into park position, before calling the create
201 * callback. At least the migration thread callback
202 * requires that the task is off the runqueue.
204 if (!wait_task_inactive(tsk
, TASK_PARKED
))
212 int smpboot_create_threads(unsigned int cpu
)
214 struct smp_hotplug_thread
*cur
;
217 mutex_lock(&smpboot_threads_lock
);
218 list_for_each_entry(cur
, &hotplug_threads
, list
) {
219 ret
= __smpboot_create_thread(cur
, cpu
);
223 mutex_unlock(&smpboot_threads_lock
);
227 static void smpboot_unpark_thread(struct smp_hotplug_thread
*ht
, unsigned int cpu
)
229 struct task_struct
*tsk
= *per_cpu_ptr(ht
->store
, cpu
);
231 if (!ht
->selfparking
)
235 int smpboot_unpark_threads(unsigned int cpu
)
237 struct smp_hotplug_thread
*cur
;
239 mutex_lock(&smpboot_threads_lock
);
240 list_for_each_entry(cur
, &hotplug_threads
, list
)
241 if (cpumask_test_cpu(cpu
, cur
->cpumask
))
242 smpboot_unpark_thread(cur
, cpu
);
243 mutex_unlock(&smpboot_threads_lock
);
247 static void smpboot_park_thread(struct smp_hotplug_thread
*ht
, unsigned int cpu
)
249 struct task_struct
*tsk
= *per_cpu_ptr(ht
->store
, cpu
);
251 if (tsk
&& !ht
->selfparking
)
255 int smpboot_park_threads(unsigned int cpu
)
257 struct smp_hotplug_thread
*cur
;
259 mutex_lock(&smpboot_threads_lock
);
260 list_for_each_entry_reverse(cur
, &hotplug_threads
, list
)
261 smpboot_park_thread(cur
, cpu
);
262 mutex_unlock(&smpboot_threads_lock
);
266 static void smpboot_destroy_threads(struct smp_hotplug_thread
*ht
)
270 /* We need to destroy also the parked threads of offline cpus */
271 for_each_possible_cpu(cpu
) {
272 struct task_struct
*tsk
= *per_cpu_ptr(ht
->store
, cpu
);
276 put_task_struct(tsk
);
277 *per_cpu_ptr(ht
->store
, cpu
) = NULL
;
283 * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related
285 * @plug_thread: Hotplug thread descriptor
286 * @cpumask: The cpumask where threads run
288 * Creates and starts the threads on all online cpus.
290 int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread
*plug_thread
,
291 const struct cpumask
*cpumask
)
296 if (!alloc_cpumask_var(&plug_thread
->cpumask
, GFP_KERNEL
))
298 cpumask_copy(plug_thread
->cpumask
, cpumask
);
301 mutex_lock(&smpboot_threads_lock
);
302 for_each_online_cpu(cpu
) {
303 ret
= __smpboot_create_thread(plug_thread
, cpu
);
305 smpboot_destroy_threads(plug_thread
);
306 free_cpumask_var(plug_thread
->cpumask
);
309 if (cpumask_test_cpu(cpu
, cpumask
))
310 smpboot_unpark_thread(plug_thread
, cpu
);
312 list_add(&plug_thread
->list
, &hotplug_threads
);
314 mutex_unlock(&smpboot_threads_lock
);
318 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask
);
321 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
322 * @plug_thread: Hotplug thread descriptor
324 * Stops all threads on all possible cpus.
326 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread
*plug_thread
)
329 mutex_lock(&smpboot_threads_lock
);
330 list_del(&plug_thread
->list
);
331 smpboot_destroy_threads(plug_thread
);
332 mutex_unlock(&smpboot_threads_lock
);
334 free_cpumask_var(plug_thread
->cpumask
);
336 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread
);
339 * smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked
340 * @plug_thread: Hotplug thread descriptor
341 * @new: Revised mask to use
343 * The cpumask field in the smp_hotplug_thread must not be updated directly
344 * by the client, but only by calling this function.
345 * This function can only be called on a registered smp_hotplug_thread.
347 void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread
*plug_thread
,
348 const struct cpumask
*new)
350 struct cpumask
*old
= plug_thread
->cpumask
;
351 static struct cpumask tmp
;
354 lockdep_assert_cpus_held();
355 mutex_lock(&smpboot_threads_lock
);
357 /* Park threads that were exclusively enabled on the old mask. */
358 cpumask_andnot(&tmp
, old
, new);
359 for_each_cpu_and(cpu
, &tmp
, cpu_online_mask
)
360 smpboot_park_thread(plug_thread
, cpu
);
362 /* Unpark threads that are exclusively enabled on the new mask. */
363 cpumask_andnot(&tmp
, new, old
);
364 for_each_cpu_and(cpu
, &tmp
, cpu_online_mask
)
365 smpboot_unpark_thread(plug_thread
, cpu
);
367 cpumask_copy(old
, new);
369 mutex_unlock(&smpboot_threads_lock
);
372 static DEFINE_PER_CPU(atomic_t
, cpu_hotplug_state
) = ATOMIC_INIT(CPU_POST_DEAD
);
375 * Called to poll specified CPU's state, for example, when waiting for
376 * a CPU to come online.
378 int cpu_report_state(int cpu
)
380 return atomic_read(&per_cpu(cpu_hotplug_state
, cpu
));
384 * If CPU has died properly, set its state to CPU_UP_PREPARE and
385 * return success. Otherwise, return -EBUSY if the CPU died after
386 * cpu_wait_death() timed out. And yet otherwise again, return -EAGAIN
387 * if cpu_wait_death() timed out and the CPU still hasn't gotten around
388 * to dying. In the latter two cases, the CPU might not be set up
389 * properly, but it is up to the arch-specific code to decide.
390 * Finally, -EIO indicates an unanticipated problem.
392 * Note that it is permissible to omit this call entirely, as is
393 * done in architectures that do no CPU-hotplug error checking.
395 int cpu_check_up_prepare(int cpu
)
397 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU
)) {
398 atomic_set(&per_cpu(cpu_hotplug_state
, cpu
), CPU_UP_PREPARE
);
402 switch (atomic_read(&per_cpu(cpu_hotplug_state
, cpu
))) {
406 /* The CPU died properly, so just start it up again. */
407 atomic_set(&per_cpu(cpu_hotplug_state
, cpu
), CPU_UP_PREPARE
);
410 case CPU_DEAD_FROZEN
:
413 * Timeout during CPU death, so let caller know.
414 * The outgoing CPU completed its processing, but after
415 * cpu_wait_death() timed out and reported the error. The
416 * caller is free to proceed, in which case the state
417 * will be reset properly by cpu_set_state_online().
418 * Proceeding despite this -EBUSY return makes sense
419 * for systems where the outgoing CPUs take themselves
420 * offline, with no post-death manipulation required from
428 * The most likely reason we got here is that there was
429 * a timeout during CPU death, and the outgoing CPU never
430 * did complete its processing. This could happen on
431 * a virtualized system if the outgoing VCPU gets preempted
432 * for more than five seconds, and the user attempts to
433 * immediately online that same CPU. Trying again later
434 * might return -EBUSY above, hence -EAGAIN.
440 /* Should not happen. Famous last words. */
446 * Mark the specified CPU online.
448 * Note that it is permissible to omit this call entirely, as is
449 * done in architectures that do no CPU-hotplug error checking.
451 void cpu_set_state_online(int cpu
)
453 (void)atomic_xchg(&per_cpu(cpu_hotplug_state
, cpu
), CPU_ONLINE
);
456 #ifdef CONFIG_HOTPLUG_CPU
459 * Wait for the specified CPU to exit the idle loop and die.
461 bool cpu_wait_death(unsigned int cpu
, int seconds
)
463 int jf_left
= seconds
* HZ
;
470 /* The outgoing CPU will normally get done quite quickly. */
471 if (atomic_read(&per_cpu(cpu_hotplug_state
, cpu
)) == CPU_DEAD
)
475 /* But if the outgoing CPU dawdles, wait increasingly long times. */
476 while (atomic_read(&per_cpu(cpu_hotplug_state
, cpu
)) != CPU_DEAD
) {
477 schedule_timeout_uninterruptible(sleep_jf
);
481 sleep_jf
= DIV_ROUND_UP(sleep_jf
* 11, 10);
484 oldstate
= atomic_read(&per_cpu(cpu_hotplug_state
, cpu
));
485 if (oldstate
== CPU_DEAD
) {
486 /* Outgoing CPU died normally, update state. */
487 smp_mb(); /* atomic_read() before update. */
488 atomic_set(&per_cpu(cpu_hotplug_state
, cpu
), CPU_POST_DEAD
);
490 /* Outgoing CPU still hasn't died, set state accordingly. */
491 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state
, cpu
),
492 oldstate
, CPU_BROKEN
) != oldstate
)
500 * Called by the outgoing CPU to report its successful death. Return
501 * false if this report follows the surviving CPU's timing out.
503 * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
504 * timed out. This approach allows architectures to omit calls to
505 * cpu_check_up_prepare() and cpu_set_state_online() without defeating
506 * the next cpu_wait_death()'s polling loop.
508 bool cpu_report_death(void)
512 int cpu
= smp_processor_id();
515 oldstate
= atomic_read(&per_cpu(cpu_hotplug_state
, cpu
));
516 if (oldstate
!= CPU_BROKEN
)
519 newstate
= CPU_DEAD_FROZEN
;
520 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state
, cpu
),
521 oldstate
, newstate
) != oldstate
);
522 return newstate
== CPU_DEAD
;
525 #endif /* #ifdef CONFIG_HOTPLUG_CPU */