2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
27 static DEFINE_MUTEX(cpu_add_remove_lock
);
30 * The following two API's must be used when attempting
31 * to serialize the updates to cpu_online_mask, cpu_present_mask.
33 void cpu_maps_update_begin(void)
35 mutex_lock(&cpu_add_remove_lock
);
38 void cpu_maps_update_done(void)
40 mutex_unlock(&cpu_add_remove_lock
);
43 static RAW_NOTIFIER_HEAD(cpu_chain
);
45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46 * Should always be manipulated under cpu_add_remove_lock
48 static int cpu_hotplug_disabled
;
50 #ifdef CONFIG_HOTPLUG_CPU
53 struct task_struct
*active_writer
;
54 struct mutex lock
; /* Synchronizes accesses to refcount, */
56 * Also blocks the new readers during
57 * an ongoing cpu hotplug operation.
61 .active_writer
= NULL
,
62 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
66 void get_online_cpus(void)
69 if (cpu_hotplug
.active_writer
== current
)
71 mutex_lock(&cpu_hotplug
.lock
);
72 cpu_hotplug
.refcount
++;
73 mutex_unlock(&cpu_hotplug
.lock
);
76 EXPORT_SYMBOL_GPL(get_online_cpus
);
78 void put_online_cpus(void)
80 if (cpu_hotplug
.active_writer
== current
)
82 mutex_lock(&cpu_hotplug
.lock
);
84 if (WARN_ON(!cpu_hotplug
.refcount
))
85 cpu_hotplug
.refcount
++; /* try to fix things up */
87 if (!--cpu_hotplug
.refcount
&& unlikely(cpu_hotplug
.active_writer
))
88 wake_up_process(cpu_hotplug
.active_writer
);
89 mutex_unlock(&cpu_hotplug
.lock
);
92 EXPORT_SYMBOL_GPL(put_online_cpus
);
95 * This ensures that the hotplug operation can begin only when the
96 * refcount goes to zero.
98 * Note that during a cpu-hotplug operation, the new readers, if any,
99 * will be blocked by the cpu_hotplug.lock
101 * Since cpu_hotplug_begin() is always called after invoking
102 * cpu_maps_update_begin(), we can be sure that only one writer is active.
104 * Note that theoretically, there is a possibility of a livelock:
105 * - Refcount goes to zero, last reader wakes up the sleeping
107 * - Last reader unlocks the cpu_hotplug.lock.
108 * - A new reader arrives at this moment, bumps up the refcount.
109 * - The writer acquires the cpu_hotplug.lock finds the refcount
110 * non zero and goes to sleep again.
112 * However, this is very difficult to achieve in practice since
113 * get_online_cpus() not an api which is called all that often.
116 static void cpu_hotplug_begin(void)
118 cpu_hotplug
.active_writer
= current
;
121 mutex_lock(&cpu_hotplug
.lock
);
122 if (likely(!cpu_hotplug
.refcount
))
124 __set_current_state(TASK_UNINTERRUPTIBLE
);
125 mutex_unlock(&cpu_hotplug
.lock
);
130 static void cpu_hotplug_done(void)
132 cpu_hotplug
.active_writer
= NULL
;
133 mutex_unlock(&cpu_hotplug
.lock
);
136 #else /* #if CONFIG_HOTPLUG_CPU */
137 static void cpu_hotplug_begin(void) {}
138 static void cpu_hotplug_done(void) {}
139 #endif /* #else #if CONFIG_HOTPLUG_CPU */
141 /* Need to know about CPUs going up/down? */
142 int __ref
register_cpu_notifier(struct notifier_block
*nb
)
145 cpu_maps_update_begin();
146 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
147 cpu_maps_update_done();
151 static int __cpu_notify(unsigned long val
, void *v
, int nr_to_call
,
156 ret
= __raw_notifier_call_chain(&cpu_chain
, val
, v
, nr_to_call
,
159 return notifier_to_errno(ret
);
162 static int cpu_notify(unsigned long val
, void *v
)
164 return __cpu_notify(val
, v
, -1, NULL
);
167 #ifdef CONFIG_HOTPLUG_CPU
169 static void cpu_notify_nofail(unsigned long val
, void *v
)
171 BUG_ON(cpu_notify(val
, v
));
173 EXPORT_SYMBOL(register_cpu_notifier
);
175 void __ref
unregister_cpu_notifier(struct notifier_block
*nb
)
177 cpu_maps_update_begin();
178 raw_notifier_chain_unregister(&cpu_chain
, nb
);
179 cpu_maps_update_done();
181 EXPORT_SYMBOL(unregister_cpu_notifier
);
184 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
187 * This function walks all processes, finds a valid mm struct for each one and
188 * then clears a corresponding bit in mm's cpumask. While this all sounds
189 * trivial, there are various non-obvious corner cases, which this function
190 * tries to solve in a safe manner.
192 * Also note that the function uses a somewhat relaxed locking scheme, so it may
193 * be called only for an already offlined CPU.
195 void clear_tasks_mm_cpumask(int cpu
)
197 struct task_struct
*p
;
200 * This function is called after the cpu is taken down and marked
201 * offline, so its not like new tasks will ever get this cpu set in
202 * their mm mask. -- Peter Zijlstra
203 * Thus, we may use rcu_read_lock() here, instead of grabbing
204 * full-fledged tasklist_lock.
206 WARN_ON(cpu_online(cpu
));
208 for_each_process(p
) {
209 struct task_struct
*t
;
212 * Main thread might exit, but other threads may still have
213 * a valid mm. Find one.
215 t
= find_lock_task_mm(p
);
218 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
224 static inline void check_for_tasks(int cpu
)
226 struct task_struct
*p
;
228 write_lock_irq(&tasklist_lock
);
229 for_each_process(p
) {
230 if (task_cpu(p
) == cpu
&& p
->state
== TASK_RUNNING
&&
231 (p
->utime
|| p
->stime
))
232 printk(KERN_WARNING
"Task %s (pid = %d) is on cpu %d "
233 "(state = %ld, flags = %x)\n",
234 p
->comm
, task_pid_nr(p
), cpu
,
237 write_unlock_irq(&tasklist_lock
);
240 struct take_cpu_down_param
{
245 /* Take this CPU down. */
246 static int __ref
take_cpu_down(void *_param
)
248 struct take_cpu_down_param
*param
= _param
;
251 /* Ensure this CPU doesn't handle any more interrupts. */
252 err
= __cpu_disable();
256 cpu_notify(CPU_DYING
| param
->mod
, param
->hcpu
);
260 /* Requires cpu_add_remove_lock to be held */
261 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
)
263 int err
, nr_calls
= 0;
264 void *hcpu
= (void *)(long)cpu
;
265 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
266 struct take_cpu_down_param tcd_param
= {
271 if (num_online_cpus() == 1)
274 if (!cpu_online(cpu
))
279 err
= __cpu_notify(CPU_DOWN_PREPARE
| mod
, hcpu
, -1, &nr_calls
);
282 __cpu_notify(CPU_DOWN_FAILED
| mod
, hcpu
, nr_calls
, NULL
);
283 printk("%s: attempt to take down CPU %u failed\n",
287 smpboot_park_threads(cpu
);
289 err
= __stop_machine(take_cpu_down
, &tcd_param
, cpumask_of(cpu
));
291 /* CPU didn't die: tell everyone. Can't complain. */
292 smpboot_unpark_threads(cpu
);
293 cpu_notify_nofail(CPU_DOWN_FAILED
| mod
, hcpu
);
296 BUG_ON(cpu_online(cpu
));
299 * The migration_call() CPU_DYING callback will have removed all
300 * runnable tasks from the cpu, there's only the idle task left now
301 * that the migration thread is done doing the stop_machine thing.
303 * Wait for the stop thread to go away.
305 while (!idle_cpu(cpu
))
308 /* This actually kills the CPU. */
311 /* CPU is completely dead: tell everyone. Too late to complain. */
312 cpu_notify_nofail(CPU_DEAD
| mod
, hcpu
);
314 check_for_tasks(cpu
);
319 cpu_notify_nofail(CPU_POST_DEAD
| mod
, hcpu
);
323 int __ref
cpu_down(unsigned int cpu
)
327 cpu_maps_update_begin();
329 if (cpu_hotplug_disabled
) {
334 err
= _cpu_down(cpu
, 0);
337 cpu_maps_update_done();
340 EXPORT_SYMBOL(cpu_down
);
341 #endif /*CONFIG_HOTPLUG_CPU*/
343 /* Requires cpu_add_remove_lock to be held */
344 static int __cpuinit
_cpu_up(unsigned int cpu
, int tasks_frozen
)
346 int ret
, nr_calls
= 0;
347 void *hcpu
= (void *)(long)cpu
;
348 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
349 struct task_struct
*idle
;
353 if (cpu_online(cpu
) || !cpu_present(cpu
)) {
358 idle
= idle_thread_get(cpu
);
364 ret
= smpboot_create_threads(cpu
);
368 ret
= __cpu_notify(CPU_UP_PREPARE
| mod
, hcpu
, -1, &nr_calls
);
371 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
376 /* Arch-specific enabling code. */
377 ret
= __cpu_up(cpu
, idle
);
380 BUG_ON(!cpu_online(cpu
));
382 /* Wake the per cpu threads */
383 smpboot_unpark_threads(cpu
);
385 /* Now call notifier in preparation. */
386 cpu_notify(CPU_ONLINE
| mod
, hcpu
);
390 __cpu_notify(CPU_UP_CANCELED
| mod
, hcpu
, nr_calls
, NULL
);
397 int __cpuinit
cpu_up(unsigned int cpu
)
401 #ifdef CONFIG_MEMORY_HOTPLUG
406 if (!cpu_possible(cpu
)) {
407 printk(KERN_ERR
"can't online cpu %d because it is not "
408 "configured as may-hotadd at boot time\n", cpu
);
409 #if defined(CONFIG_IA64)
410 printk(KERN_ERR
"please check additional_cpus= boot "
416 #ifdef CONFIG_MEMORY_HOTPLUG
417 nid
= cpu_to_node(cpu
);
418 if (!node_online(nid
)) {
419 err
= mem_online_node(nid
);
424 pgdat
= NODE_DATA(nid
);
427 "Can't online cpu %d due to NULL pgdat\n", cpu
);
431 if (pgdat
->node_zonelists
->_zonerefs
->zone
== NULL
) {
432 mutex_lock(&zonelists_mutex
);
433 build_all_zonelists(NULL
, NULL
);
434 mutex_unlock(&zonelists_mutex
);
438 cpu_maps_update_begin();
440 if (cpu_hotplug_disabled
) {
445 err
= _cpu_up(cpu
, 0);
448 cpu_maps_update_done();
451 EXPORT_SYMBOL_GPL(cpu_up
);
453 #ifdef CONFIG_PM_SLEEP_SMP
454 static cpumask_var_t frozen_cpus
;
456 int disable_nonboot_cpus(void)
458 int cpu
, first_cpu
, error
= 0;
460 cpu_maps_update_begin();
461 first_cpu
= cpumask_first(cpu_online_mask
);
463 * We take down all of the non-boot CPUs in one shot to avoid races
464 * with the userspace trying to use the CPU hotplug at the same time
466 cpumask_clear(frozen_cpus
);
468 printk("Disabling non-boot CPUs ...\n");
469 for_each_online_cpu(cpu
) {
470 if (cpu
== first_cpu
)
472 error
= _cpu_down(cpu
, 1);
474 cpumask_set_cpu(cpu
, frozen_cpus
);
476 printk(KERN_ERR
"Error taking CPU%d down: %d\n",
483 BUG_ON(num_online_cpus() > 1);
484 /* Make sure the CPUs won't be enabled by someone else */
485 cpu_hotplug_disabled
= 1;
487 printk(KERN_ERR
"Non-boot CPUs are not disabled\n");
489 cpu_maps_update_done();
493 void __weak
arch_enable_nonboot_cpus_begin(void)
497 void __weak
arch_enable_nonboot_cpus_end(void)
501 void __ref
enable_nonboot_cpus(void)
505 /* Allow everyone to use the CPU hotplug again */
506 cpu_maps_update_begin();
507 cpu_hotplug_disabled
= 0;
508 if (cpumask_empty(frozen_cpus
))
511 printk(KERN_INFO
"Enabling non-boot CPUs ...\n");
513 arch_enable_nonboot_cpus_begin();
515 for_each_cpu(cpu
, frozen_cpus
) {
516 error
= _cpu_up(cpu
, 1);
518 printk(KERN_INFO
"CPU%d is up\n", cpu
);
521 printk(KERN_WARNING
"Error taking CPU%d up: %d\n", cpu
, error
);
524 arch_enable_nonboot_cpus_end();
526 cpumask_clear(frozen_cpus
);
528 cpu_maps_update_done();
531 static int __init
alloc_frozen_cpus(void)
533 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
537 core_initcall(alloc_frozen_cpus
);
540 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
541 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
542 * to continue until any currently running CPU hotplug operation gets
544 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
545 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
546 * CPU hotplug path and released only after it is complete. Thus, we
547 * (and hence the freezer) will block here until any currently running CPU
548 * hotplug operation gets completed.
550 void cpu_hotplug_disable_before_freeze(void)
552 cpu_maps_update_begin();
553 cpu_hotplug_disabled
= 1;
554 cpu_maps_update_done();
559 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
560 * disabled while beginning to freeze tasks).
562 void cpu_hotplug_enable_after_thaw(void)
564 cpu_maps_update_begin();
565 cpu_hotplug_disabled
= 0;
566 cpu_maps_update_done();
570 * When callbacks for CPU hotplug notifications are being executed, we must
571 * ensure that the state of the system with respect to the tasks being frozen
572 * or not, as reported by the notification, remains unchanged *throughout the
573 * duration* of the execution of the callbacks.
574 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
576 * This synchronization is implemented by mutually excluding regular CPU
577 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
578 * Hibernate notifications.
581 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
582 unsigned long action
, void *ptr
)
586 case PM_SUSPEND_PREPARE
:
587 case PM_HIBERNATION_PREPARE
:
588 cpu_hotplug_disable_before_freeze();
591 case PM_POST_SUSPEND
:
592 case PM_POST_HIBERNATION
:
593 cpu_hotplug_enable_after_thaw();
604 static int __init
cpu_hotplug_pm_sync_init(void)
607 * cpu_hotplug_pm_callback has higher priority than x86
608 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
609 * to disable cpu hotplug to avoid cpu hotplug race.
611 pm_notifier(cpu_hotplug_pm_callback
, 0);
614 core_initcall(cpu_hotplug_pm_sync_init
);
616 #endif /* CONFIG_PM_SLEEP_SMP */
619 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
620 * @cpu: cpu that just started
622 * This function calls the cpu_chain notifiers with CPU_STARTING.
623 * It must be called by the arch code on the new cpu, before the new cpu
624 * enables interrupts and before the "boot" cpu returns from __cpu_up().
626 void __cpuinit
notify_cpu_starting(unsigned int cpu
)
628 unsigned long val
= CPU_STARTING
;
630 #ifdef CONFIG_PM_SLEEP_SMP
631 if (frozen_cpus
!= NULL
&& cpumask_test_cpu(cpu
, frozen_cpus
))
632 val
= CPU_STARTING_FROZEN
;
633 #endif /* CONFIG_PM_SLEEP_SMP */
634 cpu_notify(val
, (void *)(long)cpu
);
637 #endif /* CONFIG_SMP */
640 * cpu_bit_bitmap[] is a special, "compressed" data structure that
641 * represents all NR_CPUS bits binary values of 1<<nr.
643 * It is used by cpumask_of() to get a constant address to a CPU
644 * mask value that has a single bit set only.
647 /* cpu_bit_bitmap[0] is empty - so we can back into it */
648 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
649 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
650 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
651 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
653 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
655 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
656 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
657 #if BITS_PER_LONG > 32
658 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
659 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
662 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
664 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
665 EXPORT_SYMBOL(cpu_all_bits
);
667 #ifdef CONFIG_INIT_ALL_POSSIBLE
668 static DECLARE_BITMAP(cpu_possible_bits
, CONFIG_NR_CPUS
) __read_mostly
671 static DECLARE_BITMAP(cpu_possible_bits
, CONFIG_NR_CPUS
) __read_mostly
;
673 const struct cpumask
*const cpu_possible_mask
= to_cpumask(cpu_possible_bits
);
674 EXPORT_SYMBOL(cpu_possible_mask
);
676 static DECLARE_BITMAP(cpu_online_bits
, CONFIG_NR_CPUS
) __read_mostly
;
677 const struct cpumask
*const cpu_online_mask
= to_cpumask(cpu_online_bits
);
678 EXPORT_SYMBOL(cpu_online_mask
);
680 static DECLARE_BITMAP(cpu_present_bits
, CONFIG_NR_CPUS
) __read_mostly
;
681 const struct cpumask
*const cpu_present_mask
= to_cpumask(cpu_present_bits
);
682 EXPORT_SYMBOL(cpu_present_mask
);
684 static DECLARE_BITMAP(cpu_active_bits
, CONFIG_NR_CPUS
) __read_mostly
;
685 const struct cpumask
*const cpu_active_mask
= to_cpumask(cpu_active_bits
);
686 EXPORT_SYMBOL(cpu_active_mask
);
688 void set_cpu_possible(unsigned int cpu
, bool possible
)
691 cpumask_set_cpu(cpu
, to_cpumask(cpu_possible_bits
));
693 cpumask_clear_cpu(cpu
, to_cpumask(cpu_possible_bits
));
696 void set_cpu_present(unsigned int cpu
, bool present
)
699 cpumask_set_cpu(cpu
, to_cpumask(cpu_present_bits
));
701 cpumask_clear_cpu(cpu
, to_cpumask(cpu_present_bits
));
704 void set_cpu_online(unsigned int cpu
, bool online
)
707 cpumask_set_cpu(cpu
, to_cpumask(cpu_online_bits
));
709 cpumask_clear_cpu(cpu
, to_cpumask(cpu_online_bits
));
712 void set_cpu_active(unsigned int cpu
, bool active
)
715 cpumask_set_cpu(cpu
, to_cpumask(cpu_active_bits
));
717 cpumask_clear_cpu(cpu
, to_cpumask(cpu_active_bits
));
720 void init_cpu_present(const struct cpumask
*src
)
722 cpumask_copy(to_cpumask(cpu_present_bits
), src
);
725 void init_cpu_possible(const struct cpumask
*src
)
727 cpumask_copy(to_cpumask(cpu_possible_bits
), src
);
730 void init_cpu_online(const struct cpumask
*src
)
732 cpumask_copy(to_cpumask(cpu_online_bits
), src
);