video: fbdev: sis: make const array specialtv static, reduces object code size
[linux-2.6/btrfs-unstable.git] / kernel / cpu.c
blobd851df22f5c5eef24b9f4e3ead6e986e4b87352f
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/unistd.h>
14 #include <linux/cpu.h>
15 #include <linux/oom.h>
16 #include <linux/rcupdate.h>
17 #include <linux/export.h>
18 #include <linux/bug.h>
19 #include <linux/kthread.h>
20 #include <linux/stop_machine.h>
21 #include <linux/mutex.h>
22 #include <linux/gfp.h>
23 #include <linux/suspend.h>
24 #include <linux/lockdep.h>
25 #include <linux/tick.h>
26 #include <linux/irq.h>
27 #include <linux/nmi.h>
28 #include <linux/smpboot.h>
29 #include <linux/relay.h>
30 #include <linux/slab.h>
31 #include <linux/percpu-rwsem.h>
33 #include <trace/events/power.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpuhp.h>
37 #include "smpboot.h"
39 /**
40 * cpuhp_cpu_state - Per cpu hotplug state storage
41 * @state: The current cpu state
42 * @target: The target state
43 * @thread: Pointer to the hotplug thread
44 * @should_run: Thread should execute
45 * @rollback: Perform a rollback
46 * @single: Single callback invocation
47 * @bringup: Single callback bringup or teardown selector
48 * @cb_state: The state for a single callback (install/uninstall)
49 * @result: Result of the operation
50 * @done_up: Signal completion to the issuer of the task for cpu-up
51 * @done_down: Signal completion to the issuer of the task for cpu-down
53 struct cpuhp_cpu_state {
54 enum cpuhp_state state;
55 enum cpuhp_state target;
56 enum cpuhp_state fail;
57 #ifdef CONFIG_SMP
58 struct task_struct *thread;
59 bool should_run;
60 bool rollback;
61 bool single;
62 bool bringup;
63 struct hlist_node *node;
64 struct hlist_node *last;
65 enum cpuhp_state cb_state;
66 int result;
67 struct completion done_up;
68 struct completion done_down;
69 #endif
72 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
73 .fail = CPUHP_INVALID,
76 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
77 static struct lockdep_map cpuhp_state_up_map =
78 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
79 static struct lockdep_map cpuhp_state_down_map =
80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
83 static void inline cpuhp_lock_acquire(bool bringup)
85 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
88 static void inline cpuhp_lock_release(bool bringup)
90 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
92 #else
94 static void inline cpuhp_lock_acquire(bool bringup) { }
95 static void inline cpuhp_lock_release(bool bringup) { }
97 #endif
99 /**
100 * cpuhp_step - Hotplug state machine step
101 * @name: Name of the step
102 * @startup: Startup function of the step
103 * @teardown: Teardown function of the step
104 * @skip_onerr: Do not invoke the functions on error rollback
105 * Will go away once the notifiers are gone
106 * @cant_stop: Bringup/teardown can't be stopped at this step
108 struct cpuhp_step {
109 const char *name;
110 union {
111 int (*single)(unsigned int cpu);
112 int (*multi)(unsigned int cpu,
113 struct hlist_node *node);
114 } startup;
115 union {
116 int (*single)(unsigned int cpu);
117 int (*multi)(unsigned int cpu,
118 struct hlist_node *node);
119 } teardown;
120 struct hlist_head list;
121 bool skip_onerr;
122 bool cant_stop;
123 bool multi_instance;
126 static DEFINE_MUTEX(cpuhp_state_mutex);
127 static struct cpuhp_step cpuhp_bp_states[];
128 static struct cpuhp_step cpuhp_ap_states[];
130 static bool cpuhp_is_ap_state(enum cpuhp_state state)
133 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
134 * purposes as that state is handled explicitly in cpu_down.
136 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
139 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
141 struct cpuhp_step *sp;
143 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
144 return sp + state;
148 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
149 * @cpu: The cpu for which the callback should be invoked
150 * @state: The state to do callbacks for
151 * @bringup: True if the bringup callback should be invoked
152 * @node: For multi-instance, do a single entry callback for install/remove
153 * @lastp: For multi-instance rollback, remember how far we got
155 * Called from cpu hotplug and from the state register machinery.
157 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
158 bool bringup, struct hlist_node *node,
159 struct hlist_node **lastp)
161 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
162 struct cpuhp_step *step = cpuhp_get_step(state);
163 int (*cbm)(unsigned int cpu, struct hlist_node *node);
164 int (*cb)(unsigned int cpu);
165 int ret, cnt;
167 if (st->fail == state) {
168 st->fail = CPUHP_INVALID;
170 if (!(bringup ? step->startup.single : step->teardown.single))
171 return 0;
173 return -EAGAIN;
176 if (!step->multi_instance) {
177 WARN_ON_ONCE(lastp && *lastp);
178 cb = bringup ? step->startup.single : step->teardown.single;
179 if (!cb)
180 return 0;
181 trace_cpuhp_enter(cpu, st->target, state, cb);
182 ret = cb(cpu);
183 trace_cpuhp_exit(cpu, st->state, state, ret);
184 return ret;
186 cbm = bringup ? step->startup.multi : step->teardown.multi;
187 if (!cbm)
188 return 0;
190 /* Single invocation for instance add/remove */
191 if (node) {
192 WARN_ON_ONCE(lastp && *lastp);
193 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
194 ret = cbm(cpu, node);
195 trace_cpuhp_exit(cpu, st->state, state, ret);
196 return ret;
199 /* State transition. Invoke on all instances */
200 cnt = 0;
201 hlist_for_each(node, &step->list) {
202 if (lastp && node == *lastp)
203 break;
205 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
206 ret = cbm(cpu, node);
207 trace_cpuhp_exit(cpu, st->state, state, ret);
208 if (ret) {
209 if (!lastp)
210 goto err;
212 *lastp = node;
213 return ret;
215 cnt++;
217 if (lastp)
218 *lastp = NULL;
219 return 0;
220 err:
221 /* Rollback the instances if one failed */
222 cbm = !bringup ? step->startup.multi : step->teardown.multi;
223 if (!cbm)
224 return ret;
226 hlist_for_each(node, &step->list) {
227 if (!cnt--)
228 break;
230 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
231 ret = cbm(cpu, node);
232 trace_cpuhp_exit(cpu, st->state, state, ret);
234 * Rollback must not fail,
236 WARN_ON_ONCE(ret);
238 return ret;
241 #ifdef CONFIG_SMP
242 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
244 struct completion *done = bringup ? &st->done_up : &st->done_down;
245 wait_for_completion(done);
248 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
250 struct completion *done = bringup ? &st->done_up : &st->done_down;
251 complete(done);
255 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
257 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
259 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
262 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
263 static DEFINE_MUTEX(cpu_add_remove_lock);
264 bool cpuhp_tasks_frozen;
265 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
268 * The following two APIs (cpu_maps_update_begin/done) must be used when
269 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
271 void cpu_maps_update_begin(void)
273 mutex_lock(&cpu_add_remove_lock);
276 void cpu_maps_update_done(void)
278 mutex_unlock(&cpu_add_remove_lock);
282 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
283 * Should always be manipulated under cpu_add_remove_lock
285 static int cpu_hotplug_disabled;
287 #ifdef CONFIG_HOTPLUG_CPU
289 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
291 void cpus_read_lock(void)
293 percpu_down_read(&cpu_hotplug_lock);
295 EXPORT_SYMBOL_GPL(cpus_read_lock);
297 void cpus_read_unlock(void)
299 percpu_up_read(&cpu_hotplug_lock);
301 EXPORT_SYMBOL_GPL(cpus_read_unlock);
303 void cpus_write_lock(void)
305 percpu_down_write(&cpu_hotplug_lock);
308 void cpus_write_unlock(void)
310 percpu_up_write(&cpu_hotplug_lock);
313 void lockdep_assert_cpus_held(void)
315 percpu_rwsem_assert_held(&cpu_hotplug_lock);
319 * Wait for currently running CPU hotplug operations to complete (if any) and
320 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
321 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
322 * hotplug path before performing hotplug operations. So acquiring that lock
323 * guarantees mutual exclusion from any currently running hotplug operations.
325 void cpu_hotplug_disable(void)
327 cpu_maps_update_begin();
328 cpu_hotplug_disabled++;
329 cpu_maps_update_done();
331 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
333 static void __cpu_hotplug_enable(void)
335 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
336 return;
337 cpu_hotplug_disabled--;
340 void cpu_hotplug_enable(void)
342 cpu_maps_update_begin();
343 __cpu_hotplug_enable();
344 cpu_maps_update_done();
346 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
347 #endif /* CONFIG_HOTPLUG_CPU */
349 static inline enum cpuhp_state
350 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
352 enum cpuhp_state prev_state = st->state;
354 st->rollback = false;
355 st->last = NULL;
357 st->target = target;
358 st->single = false;
359 st->bringup = st->state < target;
361 return prev_state;
364 static inline void
365 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
367 st->rollback = true;
370 * If we have st->last we need to undo partial multi_instance of this
371 * state first. Otherwise start undo at the previous state.
373 if (!st->last) {
374 if (st->bringup)
375 st->state--;
376 else
377 st->state++;
380 st->target = prev_state;
381 st->bringup = !st->bringup;
384 /* Regular hotplug invocation of the AP hotplug thread */
385 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
387 if (!st->single && st->state == st->target)
388 return;
390 st->result = 0;
392 * Make sure the above stores are visible before should_run becomes
393 * true. Paired with the mb() above in cpuhp_thread_fun()
395 smp_mb();
396 st->should_run = true;
397 wake_up_process(st->thread);
398 wait_for_ap_thread(st, st->bringup);
401 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
403 enum cpuhp_state prev_state;
404 int ret;
406 prev_state = cpuhp_set_state(st, target);
407 __cpuhp_kick_ap(st);
408 if ((ret = st->result)) {
409 cpuhp_reset_state(st, prev_state);
410 __cpuhp_kick_ap(st);
413 return ret;
416 static int bringup_wait_for_ap(unsigned int cpu)
418 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
420 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
421 wait_for_ap_thread(st, true);
422 if (WARN_ON_ONCE((!cpu_online(cpu))))
423 return -ECANCELED;
425 /* Unpark the stopper thread and the hotplug thread of the target cpu */
426 stop_machine_unpark(cpu);
427 kthread_unpark(st->thread);
429 if (st->target <= CPUHP_AP_ONLINE_IDLE)
430 return 0;
432 return cpuhp_kick_ap(st, st->target);
435 static int bringup_cpu(unsigned int cpu)
437 struct task_struct *idle = idle_thread_get(cpu);
438 int ret;
441 * Some architectures have to walk the irq descriptors to
442 * setup the vector space for the cpu which comes online.
443 * Prevent irq alloc/free across the bringup.
445 irq_lock_sparse();
447 /* Arch-specific enabling code. */
448 ret = __cpu_up(cpu, idle);
449 irq_unlock_sparse();
450 if (ret)
451 return ret;
452 return bringup_wait_for_ap(cpu);
456 * Hotplug state machine related functions
459 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
461 for (st->state--; st->state > st->target; st->state--) {
462 struct cpuhp_step *step = cpuhp_get_step(st->state);
464 if (!step->skip_onerr)
465 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
469 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
470 enum cpuhp_state target)
472 enum cpuhp_state prev_state = st->state;
473 int ret = 0;
475 while (st->state < target) {
476 st->state++;
477 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
478 if (ret) {
479 st->target = prev_state;
480 undo_cpu_up(cpu, st);
481 break;
484 return ret;
488 * The cpu hotplug threads manage the bringup and teardown of the cpus
490 static void cpuhp_create(unsigned int cpu)
492 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
494 init_completion(&st->done_up);
495 init_completion(&st->done_down);
498 static int cpuhp_should_run(unsigned int cpu)
500 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
502 return st->should_run;
506 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
507 * callbacks when a state gets [un]installed at runtime.
509 * Each invocation of this function by the smpboot thread does a single AP
510 * state callback.
512 * It has 3 modes of operation:
513 * - single: runs st->cb_state
514 * - up: runs ++st->state, while st->state < st->target
515 * - down: runs st->state--, while st->state > st->target
517 * When complete or on error, should_run is cleared and the completion is fired.
519 static void cpuhp_thread_fun(unsigned int cpu)
521 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
522 bool bringup = st->bringup;
523 enum cpuhp_state state;
526 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
527 * that if we see ->should_run we also see the rest of the state.
529 smp_mb();
531 if (WARN_ON_ONCE(!st->should_run))
532 return;
534 cpuhp_lock_acquire(bringup);
536 if (st->single) {
537 state = st->cb_state;
538 st->should_run = false;
539 } else {
540 if (bringup) {
541 st->state++;
542 state = st->state;
543 st->should_run = (st->state < st->target);
544 WARN_ON_ONCE(st->state > st->target);
545 } else {
546 state = st->state;
547 st->state--;
548 st->should_run = (st->state > st->target);
549 WARN_ON_ONCE(st->state < st->target);
553 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
555 if (st->rollback) {
556 struct cpuhp_step *step = cpuhp_get_step(state);
557 if (step->skip_onerr)
558 goto next;
561 if (cpuhp_is_atomic_state(state)) {
562 local_irq_disable();
563 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
564 local_irq_enable();
567 * STARTING/DYING must not fail!
569 WARN_ON_ONCE(st->result);
570 } else {
571 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
574 if (st->result) {
576 * If we fail on a rollback, we're up a creek without no
577 * paddle, no way forward, no way back. We loose, thanks for
578 * playing.
580 WARN_ON_ONCE(st->rollback);
581 st->should_run = false;
584 next:
585 cpuhp_lock_release(bringup);
587 if (!st->should_run)
588 complete_ap_thread(st, bringup);
591 /* Invoke a single callback on a remote cpu */
592 static int
593 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
594 struct hlist_node *node)
596 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
597 int ret;
599 if (!cpu_online(cpu))
600 return 0;
602 cpuhp_lock_acquire(false);
603 cpuhp_lock_release(false);
605 cpuhp_lock_acquire(true);
606 cpuhp_lock_release(true);
609 * If we are up and running, use the hotplug thread. For early calls
610 * we invoke the thread function directly.
612 if (!st->thread)
613 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
615 st->rollback = false;
616 st->last = NULL;
618 st->node = node;
619 st->bringup = bringup;
620 st->cb_state = state;
621 st->single = true;
623 __cpuhp_kick_ap(st);
626 * If we failed and did a partial, do a rollback.
628 if ((ret = st->result) && st->last) {
629 st->rollback = true;
630 st->bringup = !bringup;
632 __cpuhp_kick_ap(st);
635 return ret;
638 static int cpuhp_kick_ap_work(unsigned int cpu)
640 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
641 enum cpuhp_state prev_state = st->state;
642 int ret;
644 cpuhp_lock_acquire(false);
645 cpuhp_lock_release(false);
647 cpuhp_lock_acquire(true);
648 cpuhp_lock_release(true);
650 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
651 ret = cpuhp_kick_ap(st, st->target);
652 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
654 return ret;
657 static struct smp_hotplug_thread cpuhp_threads = {
658 .store = &cpuhp_state.thread,
659 .create = &cpuhp_create,
660 .thread_should_run = cpuhp_should_run,
661 .thread_fn = cpuhp_thread_fun,
662 .thread_comm = "cpuhp/%u",
663 .selfparking = true,
666 void __init cpuhp_threads_init(void)
668 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
669 kthread_unpark(this_cpu_read(cpuhp_state.thread));
672 #ifdef CONFIG_HOTPLUG_CPU
674 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
675 * @cpu: a CPU id
677 * This function walks all processes, finds a valid mm struct for each one and
678 * then clears a corresponding bit in mm's cpumask. While this all sounds
679 * trivial, there are various non-obvious corner cases, which this function
680 * tries to solve in a safe manner.
682 * Also note that the function uses a somewhat relaxed locking scheme, so it may
683 * be called only for an already offlined CPU.
685 void clear_tasks_mm_cpumask(int cpu)
687 struct task_struct *p;
690 * This function is called after the cpu is taken down and marked
691 * offline, so its not like new tasks will ever get this cpu set in
692 * their mm mask. -- Peter Zijlstra
693 * Thus, we may use rcu_read_lock() here, instead of grabbing
694 * full-fledged tasklist_lock.
696 WARN_ON(cpu_online(cpu));
697 rcu_read_lock();
698 for_each_process(p) {
699 struct task_struct *t;
702 * Main thread might exit, but other threads may still have
703 * a valid mm. Find one.
705 t = find_lock_task_mm(p);
706 if (!t)
707 continue;
708 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
709 task_unlock(t);
711 rcu_read_unlock();
714 /* Take this CPU down. */
715 static int take_cpu_down(void *_param)
717 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
718 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
719 int err, cpu = smp_processor_id();
720 int ret;
722 /* Ensure this CPU doesn't handle any more interrupts. */
723 err = __cpu_disable();
724 if (err < 0)
725 return err;
728 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
729 * do this step again.
731 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
732 st->state--;
733 /* Invoke the former CPU_DYING callbacks */
734 for (; st->state > target; st->state--) {
735 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
737 * DYING must not fail!
739 WARN_ON_ONCE(ret);
742 /* Give up timekeeping duties */
743 tick_handover_do_timer();
744 /* Park the stopper thread */
745 stop_machine_park(cpu);
746 return 0;
749 static int takedown_cpu(unsigned int cpu)
751 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
752 int err;
754 /* Park the smpboot threads */
755 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
756 smpboot_park_threads(cpu);
759 * Prevent irq alloc/free while the dying cpu reorganizes the
760 * interrupt affinities.
762 irq_lock_sparse();
765 * So now all preempt/rcu users must observe !cpu_active().
767 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
768 if (err) {
769 /* CPU refused to die */
770 irq_unlock_sparse();
771 /* Unpark the hotplug thread so we can rollback there */
772 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
773 return err;
775 BUG_ON(cpu_online(cpu));
778 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
779 * runnable tasks from the cpu, there's only the idle task left now
780 * that the migration thread is done doing the stop_machine thing.
782 * Wait for the stop thread to go away.
784 wait_for_ap_thread(st, false);
785 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
787 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
788 irq_unlock_sparse();
790 hotplug_cpu__broadcast_tick_pull(cpu);
791 /* This actually kills the CPU. */
792 __cpu_die(cpu);
794 tick_cleanup_dead_cpu(cpu);
795 rcutree_migrate_callbacks(cpu);
796 return 0;
799 static void cpuhp_complete_idle_dead(void *arg)
801 struct cpuhp_cpu_state *st = arg;
803 complete_ap_thread(st, false);
806 void cpuhp_report_idle_dead(void)
808 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
810 BUG_ON(st->state != CPUHP_AP_OFFLINE);
811 rcu_report_dead(smp_processor_id());
812 st->state = CPUHP_AP_IDLE_DEAD;
814 * We cannot call complete after rcu_report_dead() so we delegate it
815 * to an online cpu.
817 smp_call_function_single(cpumask_first(cpu_online_mask),
818 cpuhp_complete_idle_dead, st, 0);
821 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
823 for (st->state++; st->state < st->target; st->state++) {
824 struct cpuhp_step *step = cpuhp_get_step(st->state);
826 if (!step->skip_onerr)
827 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
831 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
832 enum cpuhp_state target)
834 enum cpuhp_state prev_state = st->state;
835 int ret = 0;
837 for (; st->state > target; st->state--) {
838 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
839 if (ret) {
840 st->target = prev_state;
841 undo_cpu_down(cpu, st);
842 break;
845 return ret;
848 /* Requires cpu_add_remove_lock to be held */
849 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
850 enum cpuhp_state target)
852 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
853 int prev_state, ret = 0;
855 if (num_online_cpus() == 1)
856 return -EBUSY;
858 if (!cpu_present(cpu))
859 return -EINVAL;
861 cpus_write_lock();
863 cpuhp_tasks_frozen = tasks_frozen;
865 prev_state = cpuhp_set_state(st, target);
867 * If the current CPU state is in the range of the AP hotplug thread,
868 * then we need to kick the thread.
870 if (st->state > CPUHP_TEARDOWN_CPU) {
871 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
872 ret = cpuhp_kick_ap_work(cpu);
874 * The AP side has done the error rollback already. Just
875 * return the error code..
877 if (ret)
878 goto out;
881 * We might have stopped still in the range of the AP hotplug
882 * thread. Nothing to do anymore.
884 if (st->state > CPUHP_TEARDOWN_CPU)
885 goto out;
887 st->target = target;
890 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
891 * to do the further cleanups.
893 ret = cpuhp_down_callbacks(cpu, st, target);
894 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
895 cpuhp_reset_state(st, prev_state);
896 __cpuhp_kick_ap(st);
899 out:
900 cpus_write_unlock();
902 * Do post unplug cleanup. This is still protected against
903 * concurrent CPU hotplug via cpu_add_remove_lock.
905 lockup_detector_cleanup();
906 return ret;
909 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
911 int err;
913 cpu_maps_update_begin();
915 if (cpu_hotplug_disabled) {
916 err = -EBUSY;
917 goto out;
920 err = _cpu_down(cpu, 0, target);
922 out:
923 cpu_maps_update_done();
924 return err;
927 int cpu_down(unsigned int cpu)
929 return do_cpu_down(cpu, CPUHP_OFFLINE);
931 EXPORT_SYMBOL(cpu_down);
933 #else
934 #define takedown_cpu NULL
935 #endif /*CONFIG_HOTPLUG_CPU*/
938 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
939 * @cpu: cpu that just started
941 * It must be called by the arch code on the new cpu, before the new cpu
942 * enables interrupts and before the "boot" cpu returns from __cpu_up().
944 void notify_cpu_starting(unsigned int cpu)
946 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
947 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
948 int ret;
950 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
951 while (st->state < target) {
952 st->state++;
953 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
955 * STARTING must not fail!
957 WARN_ON_ONCE(ret);
962 * Called from the idle task. Wake up the controlling task which brings the
963 * stopper and the hotplug thread of the upcoming CPU up and then delegates
964 * the rest of the online bringup to the hotplug thread.
966 void cpuhp_online_idle(enum cpuhp_state state)
968 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
970 /* Happens for the boot cpu */
971 if (state != CPUHP_AP_ONLINE_IDLE)
972 return;
974 st->state = CPUHP_AP_ONLINE_IDLE;
975 complete_ap_thread(st, true);
978 /* Requires cpu_add_remove_lock to be held */
979 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
981 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
982 struct task_struct *idle;
983 int ret = 0;
985 cpus_write_lock();
987 if (!cpu_present(cpu)) {
988 ret = -EINVAL;
989 goto out;
993 * The caller of do_cpu_up might have raced with another
994 * caller. Ignore it for now.
996 if (st->state >= target)
997 goto out;
999 if (st->state == CPUHP_OFFLINE) {
1000 /* Let it fail before we try to bring the cpu up */
1001 idle = idle_thread_get(cpu);
1002 if (IS_ERR(idle)) {
1003 ret = PTR_ERR(idle);
1004 goto out;
1008 cpuhp_tasks_frozen = tasks_frozen;
1010 cpuhp_set_state(st, target);
1012 * If the current CPU state is in the range of the AP hotplug thread,
1013 * then we need to kick the thread once more.
1015 if (st->state > CPUHP_BRINGUP_CPU) {
1016 ret = cpuhp_kick_ap_work(cpu);
1018 * The AP side has done the error rollback already. Just
1019 * return the error code..
1021 if (ret)
1022 goto out;
1026 * Try to reach the target state. We max out on the BP at
1027 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1028 * responsible for bringing it up to the target state.
1030 target = min((int)target, CPUHP_BRINGUP_CPU);
1031 ret = cpuhp_up_callbacks(cpu, st, target);
1032 out:
1033 cpus_write_unlock();
1034 return ret;
1037 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1039 int err = 0;
1041 if (!cpu_possible(cpu)) {
1042 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1043 cpu);
1044 #if defined(CONFIG_IA64)
1045 pr_err("please check additional_cpus= boot parameter\n");
1046 #endif
1047 return -EINVAL;
1050 err = try_online_node(cpu_to_node(cpu));
1051 if (err)
1052 return err;
1054 cpu_maps_update_begin();
1056 if (cpu_hotplug_disabled) {
1057 err = -EBUSY;
1058 goto out;
1061 err = _cpu_up(cpu, 0, target);
1062 out:
1063 cpu_maps_update_done();
1064 return err;
1067 int cpu_up(unsigned int cpu)
1069 return do_cpu_up(cpu, CPUHP_ONLINE);
1071 EXPORT_SYMBOL_GPL(cpu_up);
1073 #ifdef CONFIG_PM_SLEEP_SMP
1074 static cpumask_var_t frozen_cpus;
1076 int freeze_secondary_cpus(int primary)
1078 int cpu, error = 0;
1080 cpu_maps_update_begin();
1081 if (!cpu_online(primary))
1082 primary = cpumask_first(cpu_online_mask);
1084 * We take down all of the non-boot CPUs in one shot to avoid races
1085 * with the userspace trying to use the CPU hotplug at the same time
1087 cpumask_clear(frozen_cpus);
1089 pr_info("Disabling non-boot CPUs ...\n");
1090 for_each_online_cpu(cpu) {
1091 if (cpu == primary)
1092 continue;
1093 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1094 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1095 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1096 if (!error)
1097 cpumask_set_cpu(cpu, frozen_cpus);
1098 else {
1099 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1100 break;
1104 if (!error)
1105 BUG_ON(num_online_cpus() > 1);
1106 else
1107 pr_err("Non-boot CPUs are not disabled\n");
1110 * Make sure the CPUs won't be enabled by someone else. We need to do
1111 * this even in case of failure as all disable_nonboot_cpus() users are
1112 * supposed to do enable_nonboot_cpus() on the failure path.
1114 cpu_hotplug_disabled++;
1116 cpu_maps_update_done();
1117 return error;
1120 void __weak arch_enable_nonboot_cpus_begin(void)
1124 void __weak arch_enable_nonboot_cpus_end(void)
1128 void enable_nonboot_cpus(void)
1130 int cpu, error;
1132 /* Allow everyone to use the CPU hotplug again */
1133 cpu_maps_update_begin();
1134 __cpu_hotplug_enable();
1135 if (cpumask_empty(frozen_cpus))
1136 goto out;
1138 pr_info("Enabling non-boot CPUs ...\n");
1140 arch_enable_nonboot_cpus_begin();
1142 for_each_cpu(cpu, frozen_cpus) {
1143 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1144 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1145 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1146 if (!error) {
1147 pr_info("CPU%d is up\n", cpu);
1148 continue;
1150 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1153 arch_enable_nonboot_cpus_end();
1155 cpumask_clear(frozen_cpus);
1156 out:
1157 cpu_maps_update_done();
1160 static int __init alloc_frozen_cpus(void)
1162 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1163 return -ENOMEM;
1164 return 0;
1166 core_initcall(alloc_frozen_cpus);
1169 * When callbacks for CPU hotplug notifications are being executed, we must
1170 * ensure that the state of the system with respect to the tasks being frozen
1171 * or not, as reported by the notification, remains unchanged *throughout the
1172 * duration* of the execution of the callbacks.
1173 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1175 * This synchronization is implemented by mutually excluding regular CPU
1176 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1177 * Hibernate notifications.
1179 static int
1180 cpu_hotplug_pm_callback(struct notifier_block *nb,
1181 unsigned long action, void *ptr)
1183 switch (action) {
1185 case PM_SUSPEND_PREPARE:
1186 case PM_HIBERNATION_PREPARE:
1187 cpu_hotplug_disable();
1188 break;
1190 case PM_POST_SUSPEND:
1191 case PM_POST_HIBERNATION:
1192 cpu_hotplug_enable();
1193 break;
1195 default:
1196 return NOTIFY_DONE;
1199 return NOTIFY_OK;
1203 static int __init cpu_hotplug_pm_sync_init(void)
1206 * cpu_hotplug_pm_callback has higher priority than x86
1207 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1208 * to disable cpu hotplug to avoid cpu hotplug race.
1210 pm_notifier(cpu_hotplug_pm_callback, 0);
1211 return 0;
1213 core_initcall(cpu_hotplug_pm_sync_init);
1215 #endif /* CONFIG_PM_SLEEP_SMP */
1217 int __boot_cpu_id;
1219 #endif /* CONFIG_SMP */
1221 /* Boot processor state steps */
1222 static struct cpuhp_step cpuhp_bp_states[] = {
1223 [CPUHP_OFFLINE] = {
1224 .name = "offline",
1225 .startup.single = NULL,
1226 .teardown.single = NULL,
1228 #ifdef CONFIG_SMP
1229 [CPUHP_CREATE_THREADS]= {
1230 .name = "threads:prepare",
1231 .startup.single = smpboot_create_threads,
1232 .teardown.single = NULL,
1233 .cant_stop = true,
1235 [CPUHP_PERF_PREPARE] = {
1236 .name = "perf:prepare",
1237 .startup.single = perf_event_init_cpu,
1238 .teardown.single = perf_event_exit_cpu,
1240 [CPUHP_WORKQUEUE_PREP] = {
1241 .name = "workqueue:prepare",
1242 .startup.single = workqueue_prepare_cpu,
1243 .teardown.single = NULL,
1245 [CPUHP_HRTIMERS_PREPARE] = {
1246 .name = "hrtimers:prepare",
1247 .startup.single = hrtimers_prepare_cpu,
1248 .teardown.single = hrtimers_dead_cpu,
1250 [CPUHP_SMPCFD_PREPARE] = {
1251 .name = "smpcfd:prepare",
1252 .startup.single = smpcfd_prepare_cpu,
1253 .teardown.single = smpcfd_dead_cpu,
1255 [CPUHP_RELAY_PREPARE] = {
1256 .name = "relay:prepare",
1257 .startup.single = relay_prepare_cpu,
1258 .teardown.single = NULL,
1260 [CPUHP_SLAB_PREPARE] = {
1261 .name = "slab:prepare",
1262 .startup.single = slab_prepare_cpu,
1263 .teardown.single = slab_dead_cpu,
1265 [CPUHP_RCUTREE_PREP] = {
1266 .name = "RCU/tree:prepare",
1267 .startup.single = rcutree_prepare_cpu,
1268 .teardown.single = rcutree_dead_cpu,
1271 * On the tear-down path, timers_dead_cpu() must be invoked
1272 * before blk_mq_queue_reinit_notify() from notify_dead(),
1273 * otherwise a RCU stall occurs.
1275 [CPUHP_TIMERS_DEAD] = {
1276 .name = "timers:dead",
1277 .startup.single = NULL,
1278 .teardown.single = timers_dead_cpu,
1280 /* Kicks the plugged cpu into life */
1281 [CPUHP_BRINGUP_CPU] = {
1282 .name = "cpu:bringup",
1283 .startup.single = bringup_cpu,
1284 .teardown.single = NULL,
1285 .cant_stop = true,
1287 [CPUHP_AP_SMPCFD_DYING] = {
1288 .name = "smpcfd:dying",
1289 .startup.single = NULL,
1290 .teardown.single = smpcfd_dying_cpu,
1293 * Handled on controll processor until the plugged processor manages
1294 * this itself.
1296 [CPUHP_TEARDOWN_CPU] = {
1297 .name = "cpu:teardown",
1298 .startup.single = NULL,
1299 .teardown.single = takedown_cpu,
1300 .cant_stop = true,
1302 #else
1303 [CPUHP_BRINGUP_CPU] = { },
1304 #endif
1307 /* Application processor state steps */
1308 static struct cpuhp_step cpuhp_ap_states[] = {
1309 #ifdef CONFIG_SMP
1310 /* Final state before CPU kills itself */
1311 [CPUHP_AP_IDLE_DEAD] = {
1312 .name = "idle:dead",
1315 * Last state before CPU enters the idle loop to die. Transient state
1316 * for synchronization.
1318 [CPUHP_AP_OFFLINE] = {
1319 .name = "ap:offline",
1320 .cant_stop = true,
1322 /* First state is scheduler control. Interrupts are disabled */
1323 [CPUHP_AP_SCHED_STARTING] = {
1324 .name = "sched:starting",
1325 .startup.single = sched_cpu_starting,
1326 .teardown.single = sched_cpu_dying,
1328 [CPUHP_AP_RCUTREE_DYING] = {
1329 .name = "RCU/tree:dying",
1330 .startup.single = NULL,
1331 .teardown.single = rcutree_dying_cpu,
1333 /* Entry state on starting. Interrupts enabled from here on. Transient
1334 * state for synchronsization */
1335 [CPUHP_AP_ONLINE] = {
1336 .name = "ap:online",
1338 /* Handle smpboot threads park/unpark */
1339 [CPUHP_AP_SMPBOOT_THREADS] = {
1340 .name = "smpboot/threads:online",
1341 .startup.single = smpboot_unpark_threads,
1342 .teardown.single = NULL,
1344 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1345 .name = "irq/affinity:online",
1346 .startup.single = irq_affinity_online_cpu,
1347 .teardown.single = NULL,
1349 [CPUHP_AP_PERF_ONLINE] = {
1350 .name = "perf:online",
1351 .startup.single = perf_event_init_cpu,
1352 .teardown.single = perf_event_exit_cpu,
1354 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1355 .name = "workqueue:online",
1356 .startup.single = workqueue_online_cpu,
1357 .teardown.single = workqueue_offline_cpu,
1359 [CPUHP_AP_RCUTREE_ONLINE] = {
1360 .name = "RCU/tree:online",
1361 .startup.single = rcutree_online_cpu,
1362 .teardown.single = rcutree_offline_cpu,
1364 #endif
1366 * The dynamically registered state space is here
1369 #ifdef CONFIG_SMP
1370 /* Last state is scheduler control setting the cpu active */
1371 [CPUHP_AP_ACTIVE] = {
1372 .name = "sched:active",
1373 .startup.single = sched_cpu_activate,
1374 .teardown.single = sched_cpu_deactivate,
1376 #endif
1378 /* CPU is fully up and running. */
1379 [CPUHP_ONLINE] = {
1380 .name = "online",
1381 .startup.single = NULL,
1382 .teardown.single = NULL,
1386 /* Sanity check for callbacks */
1387 static int cpuhp_cb_check(enum cpuhp_state state)
1389 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1390 return -EINVAL;
1391 return 0;
1395 * Returns a free for dynamic slot assignment of the Online state. The states
1396 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1397 * by having no name assigned.
1399 static int cpuhp_reserve_state(enum cpuhp_state state)
1401 enum cpuhp_state i, end;
1402 struct cpuhp_step *step;
1404 switch (state) {
1405 case CPUHP_AP_ONLINE_DYN:
1406 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1407 end = CPUHP_AP_ONLINE_DYN_END;
1408 break;
1409 case CPUHP_BP_PREPARE_DYN:
1410 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1411 end = CPUHP_BP_PREPARE_DYN_END;
1412 break;
1413 default:
1414 return -EINVAL;
1417 for (i = state; i <= end; i++, step++) {
1418 if (!step->name)
1419 return i;
1421 WARN(1, "No more dynamic states available for CPU hotplug\n");
1422 return -ENOSPC;
1425 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1426 int (*startup)(unsigned int cpu),
1427 int (*teardown)(unsigned int cpu),
1428 bool multi_instance)
1430 /* (Un)Install the callbacks for further cpu hotplug operations */
1431 struct cpuhp_step *sp;
1432 int ret = 0;
1435 * If name is NULL, then the state gets removed.
1437 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1438 * the first allocation from these dynamic ranges, so the removal
1439 * would trigger a new allocation and clear the wrong (already
1440 * empty) state, leaving the callbacks of the to be cleared state
1441 * dangling, which causes wreckage on the next hotplug operation.
1443 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1444 state == CPUHP_BP_PREPARE_DYN)) {
1445 ret = cpuhp_reserve_state(state);
1446 if (ret < 0)
1447 return ret;
1448 state = ret;
1450 sp = cpuhp_get_step(state);
1451 if (name && sp->name)
1452 return -EBUSY;
1454 sp->startup.single = startup;
1455 sp->teardown.single = teardown;
1456 sp->name = name;
1457 sp->multi_instance = multi_instance;
1458 INIT_HLIST_HEAD(&sp->list);
1459 return ret;
1462 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1464 return cpuhp_get_step(state)->teardown.single;
1468 * Call the startup/teardown function for a step either on the AP or
1469 * on the current CPU.
1471 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1472 struct hlist_node *node)
1474 struct cpuhp_step *sp = cpuhp_get_step(state);
1475 int ret;
1478 * If there's nothing to do, we done.
1479 * Relies on the union for multi_instance.
1481 if ((bringup && !sp->startup.single) ||
1482 (!bringup && !sp->teardown.single))
1483 return 0;
1485 * The non AP bound callbacks can fail on bringup. On teardown
1486 * e.g. module removal we crash for now.
1488 #ifdef CONFIG_SMP
1489 if (cpuhp_is_ap_state(state))
1490 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1491 else
1492 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1493 #else
1494 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1495 #endif
1496 BUG_ON(ret && !bringup);
1497 return ret;
1501 * Called from __cpuhp_setup_state on a recoverable failure.
1503 * Note: The teardown callbacks for rollback are not allowed to fail!
1505 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1506 struct hlist_node *node)
1508 int cpu;
1510 /* Roll back the already executed steps on the other cpus */
1511 for_each_present_cpu(cpu) {
1512 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1513 int cpustate = st->state;
1515 if (cpu >= failedcpu)
1516 break;
1518 /* Did we invoke the startup call on that cpu ? */
1519 if (cpustate >= state)
1520 cpuhp_issue_call(cpu, state, false, node);
1524 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1525 struct hlist_node *node,
1526 bool invoke)
1528 struct cpuhp_step *sp;
1529 int cpu;
1530 int ret;
1532 lockdep_assert_cpus_held();
1534 sp = cpuhp_get_step(state);
1535 if (sp->multi_instance == false)
1536 return -EINVAL;
1538 mutex_lock(&cpuhp_state_mutex);
1540 if (!invoke || !sp->startup.multi)
1541 goto add_node;
1544 * Try to call the startup callback for each present cpu
1545 * depending on the hotplug state of the cpu.
1547 for_each_present_cpu(cpu) {
1548 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1549 int cpustate = st->state;
1551 if (cpustate < state)
1552 continue;
1554 ret = cpuhp_issue_call(cpu, state, true, node);
1555 if (ret) {
1556 if (sp->teardown.multi)
1557 cpuhp_rollback_install(cpu, state, node);
1558 goto unlock;
1561 add_node:
1562 ret = 0;
1563 hlist_add_head(node, &sp->list);
1564 unlock:
1565 mutex_unlock(&cpuhp_state_mutex);
1566 return ret;
1569 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1570 bool invoke)
1572 int ret;
1574 cpus_read_lock();
1575 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1576 cpus_read_unlock();
1577 return ret;
1579 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1582 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1583 * @state: The state to setup
1584 * @invoke: If true, the startup function is invoked for cpus where
1585 * cpu state >= @state
1586 * @startup: startup callback function
1587 * @teardown: teardown callback function
1588 * @multi_instance: State is set up for multiple instances which get
1589 * added afterwards.
1591 * The caller needs to hold cpus read locked while calling this function.
1592 * Returns:
1593 * On success:
1594 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1595 * 0 for all other states
1596 * On failure: proper (negative) error code
1598 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1599 const char *name, bool invoke,
1600 int (*startup)(unsigned int cpu),
1601 int (*teardown)(unsigned int cpu),
1602 bool multi_instance)
1604 int cpu, ret = 0;
1605 bool dynstate;
1607 lockdep_assert_cpus_held();
1609 if (cpuhp_cb_check(state) || !name)
1610 return -EINVAL;
1612 mutex_lock(&cpuhp_state_mutex);
1614 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1615 multi_instance);
1617 dynstate = state == CPUHP_AP_ONLINE_DYN;
1618 if (ret > 0 && dynstate) {
1619 state = ret;
1620 ret = 0;
1623 if (ret || !invoke || !startup)
1624 goto out;
1627 * Try to call the startup callback for each present cpu
1628 * depending on the hotplug state of the cpu.
1630 for_each_present_cpu(cpu) {
1631 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1632 int cpustate = st->state;
1634 if (cpustate < state)
1635 continue;
1637 ret = cpuhp_issue_call(cpu, state, true, NULL);
1638 if (ret) {
1639 if (teardown)
1640 cpuhp_rollback_install(cpu, state, NULL);
1641 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1642 goto out;
1645 out:
1646 mutex_unlock(&cpuhp_state_mutex);
1648 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1649 * dynamically allocated state in case of success.
1651 if (!ret && dynstate)
1652 return state;
1653 return ret;
1655 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1657 int __cpuhp_setup_state(enum cpuhp_state state,
1658 const char *name, bool invoke,
1659 int (*startup)(unsigned int cpu),
1660 int (*teardown)(unsigned int cpu),
1661 bool multi_instance)
1663 int ret;
1665 cpus_read_lock();
1666 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1667 teardown, multi_instance);
1668 cpus_read_unlock();
1669 return ret;
1671 EXPORT_SYMBOL(__cpuhp_setup_state);
1673 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1674 struct hlist_node *node, bool invoke)
1676 struct cpuhp_step *sp = cpuhp_get_step(state);
1677 int cpu;
1679 BUG_ON(cpuhp_cb_check(state));
1681 if (!sp->multi_instance)
1682 return -EINVAL;
1684 cpus_read_lock();
1685 mutex_lock(&cpuhp_state_mutex);
1687 if (!invoke || !cpuhp_get_teardown_cb(state))
1688 goto remove;
1690 * Call the teardown callback for each present cpu depending
1691 * on the hotplug state of the cpu. This function is not
1692 * allowed to fail currently!
1694 for_each_present_cpu(cpu) {
1695 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1696 int cpustate = st->state;
1698 if (cpustate >= state)
1699 cpuhp_issue_call(cpu, state, false, node);
1702 remove:
1703 hlist_del(node);
1704 mutex_unlock(&cpuhp_state_mutex);
1705 cpus_read_unlock();
1707 return 0;
1709 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1712 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1713 * @state: The state to remove
1714 * @invoke: If true, the teardown function is invoked for cpus where
1715 * cpu state >= @state
1717 * The caller needs to hold cpus read locked while calling this function.
1718 * The teardown callback is currently not allowed to fail. Think
1719 * about module removal!
1721 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1723 struct cpuhp_step *sp = cpuhp_get_step(state);
1724 int cpu;
1726 BUG_ON(cpuhp_cb_check(state));
1728 lockdep_assert_cpus_held();
1730 mutex_lock(&cpuhp_state_mutex);
1731 if (sp->multi_instance) {
1732 WARN(!hlist_empty(&sp->list),
1733 "Error: Removing state %d which has instances left.\n",
1734 state);
1735 goto remove;
1738 if (!invoke || !cpuhp_get_teardown_cb(state))
1739 goto remove;
1742 * Call the teardown callback for each present cpu depending
1743 * on the hotplug state of the cpu. This function is not
1744 * allowed to fail currently!
1746 for_each_present_cpu(cpu) {
1747 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1748 int cpustate = st->state;
1750 if (cpustate >= state)
1751 cpuhp_issue_call(cpu, state, false, NULL);
1753 remove:
1754 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1755 mutex_unlock(&cpuhp_state_mutex);
1757 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1759 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1761 cpus_read_lock();
1762 __cpuhp_remove_state_cpuslocked(state, invoke);
1763 cpus_read_unlock();
1765 EXPORT_SYMBOL(__cpuhp_remove_state);
1767 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1768 static ssize_t show_cpuhp_state(struct device *dev,
1769 struct device_attribute *attr, char *buf)
1771 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1773 return sprintf(buf, "%d\n", st->state);
1775 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1777 static ssize_t write_cpuhp_target(struct device *dev,
1778 struct device_attribute *attr,
1779 const char *buf, size_t count)
1781 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1782 struct cpuhp_step *sp;
1783 int target, ret;
1785 ret = kstrtoint(buf, 10, &target);
1786 if (ret)
1787 return ret;
1789 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1790 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1791 return -EINVAL;
1792 #else
1793 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1794 return -EINVAL;
1795 #endif
1797 ret = lock_device_hotplug_sysfs();
1798 if (ret)
1799 return ret;
1801 mutex_lock(&cpuhp_state_mutex);
1802 sp = cpuhp_get_step(target);
1803 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1804 mutex_unlock(&cpuhp_state_mutex);
1805 if (ret)
1806 goto out;
1808 if (st->state < target)
1809 ret = do_cpu_up(dev->id, target);
1810 else
1811 ret = do_cpu_down(dev->id, target);
1812 out:
1813 unlock_device_hotplug();
1814 return ret ? ret : count;
1817 static ssize_t show_cpuhp_target(struct device *dev,
1818 struct device_attribute *attr, char *buf)
1820 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1822 return sprintf(buf, "%d\n", st->target);
1824 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1827 static ssize_t write_cpuhp_fail(struct device *dev,
1828 struct device_attribute *attr,
1829 const char *buf, size_t count)
1831 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1832 struct cpuhp_step *sp;
1833 int fail, ret;
1835 ret = kstrtoint(buf, 10, &fail);
1836 if (ret)
1837 return ret;
1840 * Cannot fail STARTING/DYING callbacks.
1842 if (cpuhp_is_atomic_state(fail))
1843 return -EINVAL;
1846 * Cannot fail anything that doesn't have callbacks.
1848 mutex_lock(&cpuhp_state_mutex);
1849 sp = cpuhp_get_step(fail);
1850 if (!sp->startup.single && !sp->teardown.single)
1851 ret = -EINVAL;
1852 mutex_unlock(&cpuhp_state_mutex);
1853 if (ret)
1854 return ret;
1856 st->fail = fail;
1858 return count;
1861 static ssize_t show_cpuhp_fail(struct device *dev,
1862 struct device_attribute *attr, char *buf)
1864 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1866 return sprintf(buf, "%d\n", st->fail);
1869 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1871 static struct attribute *cpuhp_cpu_attrs[] = {
1872 &dev_attr_state.attr,
1873 &dev_attr_target.attr,
1874 &dev_attr_fail.attr,
1875 NULL
1878 static const struct attribute_group cpuhp_cpu_attr_group = {
1879 .attrs = cpuhp_cpu_attrs,
1880 .name = "hotplug",
1881 NULL
1884 static ssize_t show_cpuhp_states(struct device *dev,
1885 struct device_attribute *attr, char *buf)
1887 ssize_t cur, res = 0;
1888 int i;
1890 mutex_lock(&cpuhp_state_mutex);
1891 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1892 struct cpuhp_step *sp = cpuhp_get_step(i);
1894 if (sp->name) {
1895 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1896 buf += cur;
1897 res += cur;
1900 mutex_unlock(&cpuhp_state_mutex);
1901 return res;
1903 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1905 static struct attribute *cpuhp_cpu_root_attrs[] = {
1906 &dev_attr_states.attr,
1907 NULL
1910 static const struct attribute_group cpuhp_cpu_root_attr_group = {
1911 .attrs = cpuhp_cpu_root_attrs,
1912 .name = "hotplug",
1913 NULL
1916 static int __init cpuhp_sysfs_init(void)
1918 int cpu, ret;
1920 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1921 &cpuhp_cpu_root_attr_group);
1922 if (ret)
1923 return ret;
1925 for_each_possible_cpu(cpu) {
1926 struct device *dev = get_cpu_device(cpu);
1928 if (!dev)
1929 continue;
1930 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1931 if (ret)
1932 return ret;
1934 return 0;
1936 device_initcall(cpuhp_sysfs_init);
1937 #endif
1940 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1941 * represents all NR_CPUS bits binary values of 1<<nr.
1943 * It is used by cpumask_of() to get a constant address to a CPU
1944 * mask value that has a single bit set only.
1947 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1948 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1949 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1950 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1951 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1953 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1955 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1956 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1957 #if BITS_PER_LONG > 32
1958 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1959 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1960 #endif
1962 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1964 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1965 EXPORT_SYMBOL(cpu_all_bits);
1967 #ifdef CONFIG_INIT_ALL_POSSIBLE
1968 struct cpumask __cpu_possible_mask __read_mostly
1969 = {CPU_BITS_ALL};
1970 #else
1971 struct cpumask __cpu_possible_mask __read_mostly;
1972 #endif
1973 EXPORT_SYMBOL(__cpu_possible_mask);
1975 struct cpumask __cpu_online_mask __read_mostly;
1976 EXPORT_SYMBOL(__cpu_online_mask);
1978 struct cpumask __cpu_present_mask __read_mostly;
1979 EXPORT_SYMBOL(__cpu_present_mask);
1981 struct cpumask __cpu_active_mask __read_mostly;
1982 EXPORT_SYMBOL(__cpu_active_mask);
1984 void init_cpu_present(const struct cpumask *src)
1986 cpumask_copy(&__cpu_present_mask, src);
1989 void init_cpu_possible(const struct cpumask *src)
1991 cpumask_copy(&__cpu_possible_mask, src);
1994 void init_cpu_online(const struct cpumask *src)
1996 cpumask_copy(&__cpu_online_mask, src);
2000 * Activate the first processor.
2002 void __init boot_cpu_init(void)
2004 int cpu = smp_processor_id();
2006 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2007 set_cpu_online(cpu, true);
2008 set_cpu_active(cpu, true);
2009 set_cpu_present(cpu, true);
2010 set_cpu_possible(cpu, true);
2012 #ifdef CONFIG_SMP
2013 __boot_cpu_id = cpu;
2014 #endif
2018 * Must be called _AFTER_ setting up the per_cpu areas
2020 void __init boot_cpu_state_init(void)
2022 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;