2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <trace/events/power.h>
28 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
29 static DEFINE_MUTEX(cpu_add_remove_lock
);
32 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
38 void cpu_maps_update_begin(void)
40 mutex_lock(&cpu_add_remove_lock
);
42 EXPORT_SYMBOL(cpu_notifier_register_begin
);
44 void cpu_maps_update_done(void)
46 mutex_unlock(&cpu_add_remove_lock
);
48 EXPORT_SYMBOL(cpu_notifier_register_done
);
50 static RAW_NOTIFIER_HEAD(cpu_chain
);
52 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
55 static int cpu_hotplug_disabled
;
57 #ifdef CONFIG_HOTPLUG_CPU
60 struct task_struct
*active_writer
;
61 /* wait queue to wake up the active_writer */
63 /* verifies that no writer will get active while readers are active */
66 * Also blocks the new readers during
67 * an ongoing cpu hotplug operation.
71 #ifdef CONFIG_DEBUG_LOCK_ALLOC
72 struct lockdep_map dep_map
;
75 .active_writer
= NULL
,
76 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
77 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
78 #ifdef CONFIG_DEBUG_LOCK_ALLOC
79 .dep_map
= {.name
= "cpu_hotplug.lock" },
83 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
84 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
85 #define cpuhp_lock_acquire_tryread() \
86 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
87 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
88 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
91 void get_online_cpus(void)
94 if (cpu_hotplug
.active_writer
== current
)
96 cpuhp_lock_acquire_read();
97 mutex_lock(&cpu_hotplug
.lock
);
98 atomic_inc(&cpu_hotplug
.refcount
);
99 mutex_unlock(&cpu_hotplug
.lock
);
101 EXPORT_SYMBOL_GPL(get_online_cpus
);
103 bool try_get_online_cpus(void)
105 if (cpu_hotplug
.active_writer
== current
)
107 if (!mutex_trylock(&cpu_hotplug
.lock
))
109 cpuhp_lock_acquire_tryread();
110 atomic_inc(&cpu_hotplug
.refcount
);
111 mutex_unlock(&cpu_hotplug
.lock
);
114 EXPORT_SYMBOL_GPL(try_get_online_cpus
);
116 void put_online_cpus(void)
120 if (cpu_hotplug
.active_writer
== current
)
123 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
124 if (WARN_ON(refcount
< 0)) /* try to fix things up */
125 atomic_inc(&cpu_hotplug
.refcount
);
127 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
128 wake_up(&cpu_hotplug
.wq
);
130 cpuhp_lock_release();
133 EXPORT_SYMBOL_GPL(put_online_cpus
);
136 * This ensures that the hotplug operation can begin only when the
137 * refcount goes to zero.
139 * Note that during a cpu-hotplug operation, the new readers, if any,
140 * will be blocked by the cpu_hotplug.lock
142 * Since cpu_hotplug_begin() is always called after invoking
143 * cpu_maps_update_begin(), we can be sure that only one writer is active.
145 * Note that theoretically, there is a possibility of a livelock:
146 * - Refcount goes to zero, last reader wakes up the sleeping
148 * - Last reader unlocks the cpu_hotplug.lock.
149 * - A new reader arrives at this moment, bumps up the refcount.
150 * - The writer acquires the cpu_hotplug.lock finds the refcount
151 * non zero and goes to sleep again.
153 * However, this is very difficult to achieve in practice since
154 * get_online_cpus() not an api which is called all that often.
157 void cpu_hotplug_begin(void)
161 cpu_hotplug
.active_writer
= current
;
162 cpuhp_lock_acquire();
165 mutex_lock(&cpu_hotplug
.lock
);
166 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
167 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
169 mutex_unlock(&cpu_hotplug
.lock
);
172 finish_wait(&cpu_hotplug
.wq
, &wait
);
175 void cpu_hotplug_done(void)
177 cpu_hotplug
.active_writer
= NULL
;
178 mutex_unlock(&cpu_hotplug
.lock
);
179 cpuhp_lock_release();
183 * Wait for currently running CPU hotplug operations to complete (if any) and
184 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
185 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
186 * hotplug path before performing hotplug operations. So acquiring that lock
187 * guarantees mutual exclusion from any currently running hotplug operations.
189 void cpu_hotplug_disable(void)
191 cpu_maps_update_begin();
192 cpu_hotplug_disabled
= 1;
193 cpu_maps_update_done();
196 void cpu_hotplug_enable(void)
198 cpu_maps_update_begin();
199 cpu_hotplug_disabled
= 0;
200 cpu_maps_update_done();
203 #endif /* CONFIG_HOTPLUG_CPU */
205 /* Need to know about CPUs going up/down? */
206 int __ref
register_cpu_notifier(struct notifier_block
*nb
)
209 cpu_maps_update_begin();
210 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
211 cpu_maps_update_done();
215 int __ref
__register_cpu_notifier(struct notifier_block
*nb
)
217 return raw_notifier_chain_register(&cpu_chain
, nb
);
220 static int __cpu_notify(unsigned long val
, void *v
, int nr_to_call
,
225 ret
= __raw_notifier_call_chain(&cpu_chain
, val
, v
, nr_to_call
,
228 return notifier_to_errno(ret
);
231 static int cpu_notify(unsigned long val
, void *v
)
233 return __cpu_notify(val
, v
, -1, NULL
);
236 #ifdef CONFIG_HOTPLUG_CPU
238 static void cpu_notify_nofail(unsigned long val
, void *v
)
240 BUG_ON(cpu_notify(val
, v
));
242 EXPORT_SYMBOL(register_cpu_notifier
);
243 EXPORT_SYMBOL(__register_cpu_notifier
);
245 void __ref
unregister_cpu_notifier(struct notifier_block
*nb
)
247 cpu_maps_update_begin();
248 raw_notifier_chain_unregister(&cpu_chain
, nb
);
249 cpu_maps_update_done();
251 EXPORT_SYMBOL(unregister_cpu_notifier
);
253 void __ref
__unregister_cpu_notifier(struct notifier_block
*nb
)
255 raw_notifier_chain_unregister(&cpu_chain
, nb
);
257 EXPORT_SYMBOL(__unregister_cpu_notifier
);
260 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
263 * This function walks all processes, finds a valid mm struct for each one and
264 * then clears a corresponding bit in mm's cpumask. While this all sounds
265 * trivial, there are various non-obvious corner cases, which this function
266 * tries to solve in a safe manner.
268 * Also note that the function uses a somewhat relaxed locking scheme, so it may
269 * be called only for an already offlined CPU.
271 void clear_tasks_mm_cpumask(int cpu
)
273 struct task_struct
*p
;
276 * This function is called after the cpu is taken down and marked
277 * offline, so its not like new tasks will ever get this cpu set in
278 * their mm mask. -- Peter Zijlstra
279 * Thus, we may use rcu_read_lock() here, instead of grabbing
280 * full-fledged tasklist_lock.
282 WARN_ON(cpu_online(cpu
));
284 for_each_process(p
) {
285 struct task_struct
*t
;
288 * Main thread might exit, but other threads may still have
289 * a valid mm. Find one.
291 t
= find_lock_task_mm(p
);
294 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
300 static inline void check_for_tasks(int dead_cpu
)
302 struct task_struct
*g
, *p
;
304 read_lock_irq(&tasklist_lock
);
305 do_each_thread(g
, p
) {
309 * We do the check with unlocked task_rq(p)->lock.
310 * Order the reading to do not warn about a task,
311 * which was running on this cpu in the past, and
312 * it's just been woken on another cpu.
315 if (task_cpu(p
) != dead_cpu
)
318 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
319 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
320 } while_each_thread(g
, p
);
321 read_unlock_irq(&tasklist_lock
);
324 struct take_cpu_down_param
{
329 /* Take this CPU down. */
330 static int __ref
take_cpu_down(void *_param
)
332 struct take_cpu_down_param
*param
= _param
;
335 /* Ensure this CPU doesn't handle any more interrupts. */
336 err
= __cpu_disable();
340 cpu_notify(CPU_DYING
| param
->mod
, param
->hcpu
);
341 /* Park the stopper thread */
342 kthread_park(current
);
346 /* Requires cpu_add_remove_lock to be held */
347 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
)
349 int err
, nr_calls
= 0;
350 void *hcpu
= (void *)(long)cpu
;
351 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
352 struct take_cpu_down_param tcd_param
= {
357 if (num_online_cpus() == 1)
360 if (!cpu_online(cpu
))
365 err
= __cpu_notify(CPU_DOWN_PREPARE
| mod
, hcpu
, -1, &nr_calls
);
368 __cpu_notify(CPU_DOWN_FAILED
| mod
, hcpu
, nr_calls
, NULL
);
369 pr_warn("%s: attempt to take down CPU %u failed\n",
375 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
376 * and RCU users of this state to go away such that all new such users
379 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
380 * not imply sync_sched(), so explicitly call both.
382 * Do sync before park smpboot threads to take care the rcu boost case.
384 #ifdef CONFIG_PREEMPT
389 smpboot_park_threads(cpu
);
392 * So now all preempt/rcu users must observe !cpu_active().
395 err
= __stop_machine(take_cpu_down
, &tcd_param
, cpumask_of(cpu
));
397 /* CPU didn't die: tell everyone. Can't complain. */
398 smpboot_unpark_threads(cpu
);
399 cpu_notify_nofail(CPU_DOWN_FAILED
| mod
, hcpu
);
402 BUG_ON(cpu_online(cpu
));
405 * The migration_call() CPU_DYING callback will have removed all
406 * runnable tasks from the cpu, there's only the idle task left now
407 * that the migration thread is done doing the stop_machine thing.
409 * Wait for the stop thread to go away.
411 while (!idle_cpu(cpu
))
414 /* This actually kills the CPU. */
417 /* CPU is completely dead: tell everyone. Too late to complain. */
418 cpu_notify_nofail(CPU_DEAD
| mod
, hcpu
);
420 check_for_tasks(cpu
);
425 cpu_notify_nofail(CPU_POST_DEAD
| mod
, hcpu
);
429 int __ref
cpu_down(unsigned int cpu
)
433 cpu_maps_update_begin();
435 if (cpu_hotplug_disabled
) {
440 err
= _cpu_down(cpu
, 0);
443 cpu_maps_update_done();
446 EXPORT_SYMBOL(cpu_down
);
447 #endif /*CONFIG_HOTPLUG_CPU*/
449 /* Requires cpu_add_remove_lock to be held */
450 static int _cpu_up(unsigned int cpu
, int tasks_frozen
)
452 int ret
, nr_calls
= 0;
453 void *hcpu
= (void *)(long)cpu
;
454 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
455 struct task_struct
*idle
;
459 if (cpu_online(cpu
) || !cpu_present(cpu
)) {
464 idle
= idle_thread_get(cpu
);
470 ret
= smpboot_create_threads(cpu
);
474 ret
= __cpu_notify(CPU_UP_PREPARE
| mod
, hcpu
, -1, &nr_calls
);
477 pr_warn("%s: attempt to bring up CPU %u failed\n",
482 /* Arch-specific enabling code. */
483 ret
= __cpu_up(cpu
, idle
);
486 BUG_ON(!cpu_online(cpu
));
488 /* Wake the per cpu threads */
489 smpboot_unpark_threads(cpu
);
491 /* Now call notifier in preparation. */
492 cpu_notify(CPU_ONLINE
| mod
, hcpu
);
496 __cpu_notify(CPU_UP_CANCELED
| mod
, hcpu
, nr_calls
, NULL
);
503 int cpu_up(unsigned int cpu
)
507 if (!cpu_possible(cpu
)) {
508 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
510 #if defined(CONFIG_IA64)
511 pr_err("please check additional_cpus= boot parameter\n");
516 err
= try_online_node(cpu_to_node(cpu
));
520 cpu_maps_update_begin();
522 if (cpu_hotplug_disabled
) {
527 err
= _cpu_up(cpu
, 0);
530 cpu_maps_update_done();
533 EXPORT_SYMBOL_GPL(cpu_up
);
535 #ifdef CONFIG_PM_SLEEP_SMP
536 static cpumask_var_t frozen_cpus
;
538 int disable_nonboot_cpus(void)
540 int cpu
, first_cpu
, error
= 0;
542 cpu_maps_update_begin();
543 first_cpu
= cpumask_first(cpu_online_mask
);
545 * We take down all of the non-boot CPUs in one shot to avoid races
546 * with the userspace trying to use the CPU hotplug at the same time
548 cpumask_clear(frozen_cpus
);
550 pr_info("Disabling non-boot CPUs ...\n");
551 for_each_online_cpu(cpu
) {
552 if (cpu
== first_cpu
)
554 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
555 error
= _cpu_down(cpu
, 1);
556 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
558 cpumask_set_cpu(cpu
, frozen_cpus
);
560 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
566 BUG_ON(num_online_cpus() > 1);
567 /* Make sure the CPUs won't be enabled by someone else */
568 cpu_hotplug_disabled
= 1;
570 pr_err("Non-boot CPUs are not disabled\n");
572 cpu_maps_update_done();
576 void __weak
arch_enable_nonboot_cpus_begin(void)
580 void __weak
arch_enable_nonboot_cpus_end(void)
584 void __ref
enable_nonboot_cpus(void)
588 /* Allow everyone to use the CPU hotplug again */
589 cpu_maps_update_begin();
590 cpu_hotplug_disabled
= 0;
591 if (cpumask_empty(frozen_cpus
))
594 pr_info("Enabling non-boot CPUs ...\n");
596 arch_enable_nonboot_cpus_begin();
598 for_each_cpu(cpu
, frozen_cpus
) {
599 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
600 error
= _cpu_up(cpu
, 1);
601 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
603 pr_info("CPU%d is up\n", cpu
);
606 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
609 arch_enable_nonboot_cpus_end();
611 cpumask_clear(frozen_cpus
);
613 cpu_maps_update_done();
616 static int __init
alloc_frozen_cpus(void)
618 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
622 core_initcall(alloc_frozen_cpus
);
625 * When callbacks for CPU hotplug notifications are being executed, we must
626 * ensure that the state of the system with respect to the tasks being frozen
627 * or not, as reported by the notification, remains unchanged *throughout the
628 * duration* of the execution of the callbacks.
629 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
631 * This synchronization is implemented by mutually excluding regular CPU
632 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
633 * Hibernate notifications.
636 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
637 unsigned long action
, void *ptr
)
641 case PM_SUSPEND_PREPARE
:
642 case PM_HIBERNATION_PREPARE
:
643 cpu_hotplug_disable();
646 case PM_POST_SUSPEND
:
647 case PM_POST_HIBERNATION
:
648 cpu_hotplug_enable();
659 static int __init
cpu_hotplug_pm_sync_init(void)
662 * cpu_hotplug_pm_callback has higher priority than x86
663 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
664 * to disable cpu hotplug to avoid cpu hotplug race.
666 pm_notifier(cpu_hotplug_pm_callback
, 0);
669 core_initcall(cpu_hotplug_pm_sync_init
);
671 #endif /* CONFIG_PM_SLEEP_SMP */
674 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
675 * @cpu: cpu that just started
677 * This function calls the cpu_chain notifiers with CPU_STARTING.
678 * It must be called by the arch code on the new cpu, before the new cpu
679 * enables interrupts and before the "boot" cpu returns from __cpu_up().
681 void notify_cpu_starting(unsigned int cpu
)
683 unsigned long val
= CPU_STARTING
;
685 #ifdef CONFIG_PM_SLEEP_SMP
686 if (frozen_cpus
!= NULL
&& cpumask_test_cpu(cpu
, frozen_cpus
))
687 val
= CPU_STARTING_FROZEN
;
688 #endif /* CONFIG_PM_SLEEP_SMP */
689 cpu_notify(val
, (void *)(long)cpu
);
692 #endif /* CONFIG_SMP */
695 * cpu_bit_bitmap[] is a special, "compressed" data structure that
696 * represents all NR_CPUS bits binary values of 1<<nr.
698 * It is used by cpumask_of() to get a constant address to a CPU
699 * mask value that has a single bit set only.
702 /* cpu_bit_bitmap[0] is empty - so we can back into it */
703 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
704 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
705 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
706 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
708 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
710 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
711 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
712 #if BITS_PER_LONG > 32
713 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
714 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
717 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
719 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
720 EXPORT_SYMBOL(cpu_all_bits
);
722 #ifdef CONFIG_INIT_ALL_POSSIBLE
723 static DECLARE_BITMAP(cpu_possible_bits
, CONFIG_NR_CPUS
) __read_mostly
726 static DECLARE_BITMAP(cpu_possible_bits
, CONFIG_NR_CPUS
) __read_mostly
;
728 const struct cpumask
*const cpu_possible_mask
= to_cpumask(cpu_possible_bits
);
729 EXPORT_SYMBOL(cpu_possible_mask
);
731 static DECLARE_BITMAP(cpu_online_bits
, CONFIG_NR_CPUS
) __read_mostly
;
732 const struct cpumask
*const cpu_online_mask
= to_cpumask(cpu_online_bits
);
733 EXPORT_SYMBOL(cpu_online_mask
);
735 static DECLARE_BITMAP(cpu_present_bits
, CONFIG_NR_CPUS
) __read_mostly
;
736 const struct cpumask
*const cpu_present_mask
= to_cpumask(cpu_present_bits
);
737 EXPORT_SYMBOL(cpu_present_mask
);
739 static DECLARE_BITMAP(cpu_active_bits
, CONFIG_NR_CPUS
) __read_mostly
;
740 const struct cpumask
*const cpu_active_mask
= to_cpumask(cpu_active_bits
);
741 EXPORT_SYMBOL(cpu_active_mask
);
743 void set_cpu_possible(unsigned int cpu
, bool possible
)
746 cpumask_set_cpu(cpu
, to_cpumask(cpu_possible_bits
));
748 cpumask_clear_cpu(cpu
, to_cpumask(cpu_possible_bits
));
751 void set_cpu_present(unsigned int cpu
, bool present
)
754 cpumask_set_cpu(cpu
, to_cpumask(cpu_present_bits
));
756 cpumask_clear_cpu(cpu
, to_cpumask(cpu_present_bits
));
759 void set_cpu_online(unsigned int cpu
, bool online
)
762 cpumask_set_cpu(cpu
, to_cpumask(cpu_online_bits
));
763 cpumask_set_cpu(cpu
, to_cpumask(cpu_active_bits
));
765 cpumask_clear_cpu(cpu
, to_cpumask(cpu_online_bits
));
769 void set_cpu_active(unsigned int cpu
, bool active
)
772 cpumask_set_cpu(cpu
, to_cpumask(cpu_active_bits
));
774 cpumask_clear_cpu(cpu
, to_cpumask(cpu_active_bits
));
777 void init_cpu_present(const struct cpumask
*src
)
779 cpumask_copy(to_cpumask(cpu_present_bits
), src
);
782 void init_cpu_possible(const struct cpumask
*src
)
784 cpumask_copy(to_cpumask(cpu_possible_bits
), src
);
787 void init_cpu_online(const struct cpumask
*src
)
789 cpumask_copy(to_cpumask(cpu_online_bits
), src
);