cpu/hotplug: Create hotplug threads
[linux-2.6/btrfs-unstable.git] / kernel / smp.c
blob822ffb1ada3fffb63f47a5b5dcb4d1765d380d40
1 /*
2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6 #include <linux/irq_work.h>
7 #include <linux/rcupdate.h>
8 #include <linux/rculist.h>
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/percpu.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
18 #include "smpboot.h"
20 enum {
21 CSD_FLAG_LOCK = 0x01,
22 CSD_FLAG_SYNCHRONOUS = 0x02,
25 struct call_function_data {
26 struct call_single_data __percpu *csd;
27 cpumask_var_t cpumask;
30 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
32 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
34 static void flush_smp_call_function_queue(bool warn_cpu_offline);
36 static int
37 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
39 long cpu = (long)hcpu;
40 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
42 switch (action) {
43 case CPU_UP_PREPARE:
44 case CPU_UP_PREPARE_FROZEN:
45 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
46 cpu_to_node(cpu)))
47 return notifier_from_errno(-ENOMEM);
48 cfd->csd = alloc_percpu(struct call_single_data);
49 if (!cfd->csd) {
50 free_cpumask_var(cfd->cpumask);
51 return notifier_from_errno(-ENOMEM);
53 break;
55 #ifdef CONFIG_HOTPLUG_CPU
56 case CPU_UP_CANCELED:
57 case CPU_UP_CANCELED_FROZEN:
58 /* Fall-through to the CPU_DEAD[_FROZEN] case. */
60 case CPU_DEAD:
61 case CPU_DEAD_FROZEN:
62 free_cpumask_var(cfd->cpumask);
63 free_percpu(cfd->csd);
64 break;
66 case CPU_DYING:
67 case CPU_DYING_FROZEN:
69 * The IPIs for the smp-call-function callbacks queued by other
70 * CPUs might arrive late, either due to hardware latencies or
71 * because this CPU disabled interrupts (inside stop-machine)
72 * before the IPIs were sent. So flush out any pending callbacks
73 * explicitly (without waiting for the IPIs to arrive), to
74 * ensure that the outgoing CPU doesn't go offline with work
75 * still pending.
77 flush_smp_call_function_queue(false);
78 break;
79 #endif
82 return NOTIFY_OK;
85 static struct notifier_block hotplug_cfd_notifier = {
86 .notifier_call = hotplug_cfd,
89 void __init call_function_init(void)
91 void *cpu = (void *)(long)smp_processor_id();
92 int i;
94 for_each_possible_cpu(i)
95 init_llist_head(&per_cpu(call_single_queue, i));
97 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
98 register_cpu_notifier(&hotplug_cfd_notifier);
102 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
104 * For non-synchronous ipi calls the csd can still be in use by the
105 * previous function call. For multi-cpu calls its even more interesting
106 * as we'll have to ensure no other cpu is observing our csd.
108 static void csd_lock_wait(struct call_single_data *csd)
110 while (smp_load_acquire(&csd->flags) & CSD_FLAG_LOCK)
111 cpu_relax();
114 static void csd_lock(struct call_single_data *csd)
116 csd_lock_wait(csd);
117 csd->flags |= CSD_FLAG_LOCK;
120 * prevent CPU from reordering the above assignment
121 * to ->flags with any subsequent assignments to other
122 * fields of the specified call_single_data structure:
124 smp_wmb();
127 static void csd_unlock(struct call_single_data *csd)
129 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
132 * ensure we're all done before releasing data:
134 smp_store_release(&csd->flags, 0);
137 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
140 * Insert a previously allocated call_single_data element
141 * for execution on the given CPU. data must already have
142 * ->func, ->info, and ->flags set.
144 static int generic_exec_single(int cpu, struct call_single_data *csd,
145 smp_call_func_t func, void *info)
147 if (cpu == smp_processor_id()) {
148 unsigned long flags;
151 * We can unlock early even for the synchronous on-stack case,
152 * since we're doing this from the same CPU..
154 csd_unlock(csd);
155 local_irq_save(flags);
156 func(info);
157 local_irq_restore(flags);
158 return 0;
162 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
163 csd_unlock(csd);
164 return -ENXIO;
167 csd->func = func;
168 csd->info = info;
171 * The list addition should be visible before sending the IPI
172 * handler locks the list to pull the entry off it because of
173 * normal cache coherency rules implied by spinlocks.
175 * If IPIs can go out of order to the cache coherency protocol
176 * in an architecture, sufficient synchronisation should be added
177 * to arch code to make it appear to obey cache coherency WRT
178 * locking and barrier primitives. Generic code isn't really
179 * equipped to do the right thing...
181 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
182 arch_send_call_function_single_ipi(cpu);
184 return 0;
188 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
190 * Invoked by arch to handle an IPI for call function single.
191 * Must be called with interrupts disabled.
193 void generic_smp_call_function_single_interrupt(void)
195 flush_smp_call_function_queue(true);
199 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
201 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
202 * offline CPU. Skip this check if set to 'false'.
204 * Flush any pending smp-call-function callbacks queued on this CPU. This is
205 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
206 * to ensure that all pending IPI callbacks are run before it goes completely
207 * offline.
209 * Loop through the call_single_queue and run all the queued callbacks.
210 * Must be called with interrupts disabled.
212 static void flush_smp_call_function_queue(bool warn_cpu_offline)
214 struct llist_head *head;
215 struct llist_node *entry;
216 struct call_single_data *csd, *csd_next;
217 static bool warned;
219 WARN_ON(!irqs_disabled());
221 head = this_cpu_ptr(&call_single_queue);
222 entry = llist_del_all(head);
223 entry = llist_reverse_order(entry);
225 /* There shouldn't be any pending callbacks on an offline CPU. */
226 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
227 !warned && !llist_empty(head))) {
228 warned = true;
229 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
232 * We don't have to use the _safe() variant here
233 * because we are not invoking the IPI handlers yet.
235 llist_for_each_entry(csd, entry, llist)
236 pr_warn("IPI callback %pS sent to offline CPU\n",
237 csd->func);
240 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
241 smp_call_func_t func = csd->func;
242 void *info = csd->info;
244 /* Do we wait until *after* callback? */
245 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
246 func(info);
247 csd_unlock(csd);
248 } else {
249 csd_unlock(csd);
250 func(info);
255 * Handle irq works queued remotely by irq_work_queue_on().
256 * Smp functions above are typically synchronous so they
257 * better run first since some other CPUs may be busy waiting
258 * for them.
260 irq_work_run();
264 * smp_call_function_single - Run a function on a specific CPU
265 * @func: The function to run. This must be fast and non-blocking.
266 * @info: An arbitrary pointer to pass to the function.
267 * @wait: If true, wait until function has completed on other CPUs.
269 * Returns 0 on success, else a negative status code.
271 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
272 int wait)
274 struct call_single_data *csd;
275 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
276 int this_cpu;
277 int err;
280 * prevent preemption and reschedule on another processor,
281 * as well as CPU removal
283 this_cpu = get_cpu();
286 * Can deadlock when called with interrupts disabled.
287 * We allow cpu's that are not yet online though, as no one else can
288 * send smp call function interrupt to this cpu and as such deadlocks
289 * can't happen.
291 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
292 && !oops_in_progress);
294 csd = &csd_stack;
295 if (!wait) {
296 csd = this_cpu_ptr(&csd_data);
297 csd_lock(csd);
300 err = generic_exec_single(cpu, csd, func, info);
302 if (wait)
303 csd_lock_wait(csd);
305 put_cpu();
307 return err;
309 EXPORT_SYMBOL(smp_call_function_single);
312 * smp_call_function_single_async(): Run an asynchronous function on a
313 * specific CPU.
314 * @cpu: The CPU to run on.
315 * @csd: Pre-allocated and setup data structure
317 * Like smp_call_function_single(), but the call is asynchonous and
318 * can thus be done from contexts with disabled interrupts.
320 * The caller passes his own pre-allocated data structure
321 * (ie: embedded in an object) and is responsible for synchronizing it
322 * such that the IPIs performed on the @csd are strictly serialized.
324 * NOTE: Be careful, there is unfortunately no current debugging facility to
325 * validate the correctness of this serialization.
327 int smp_call_function_single_async(int cpu, struct call_single_data *csd)
329 int err = 0;
331 preempt_disable();
333 /* We could deadlock if we have to wait here with interrupts disabled! */
334 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
335 csd_lock_wait(csd);
337 csd->flags = CSD_FLAG_LOCK;
338 smp_wmb();
340 err = generic_exec_single(cpu, csd, csd->func, csd->info);
341 preempt_enable();
343 return err;
345 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
348 * smp_call_function_any - Run a function on any of the given cpus
349 * @mask: The mask of cpus it can run on.
350 * @func: The function to run. This must be fast and non-blocking.
351 * @info: An arbitrary pointer to pass to the function.
352 * @wait: If true, wait until function has completed.
354 * Returns 0 on success, else a negative status code (if no cpus were online).
356 * Selection preference:
357 * 1) current cpu if in @mask
358 * 2) any cpu of current node if in @mask
359 * 3) any other online cpu in @mask
361 int smp_call_function_any(const struct cpumask *mask,
362 smp_call_func_t func, void *info, int wait)
364 unsigned int cpu;
365 const struct cpumask *nodemask;
366 int ret;
368 /* Try for same CPU (cheapest) */
369 cpu = get_cpu();
370 if (cpumask_test_cpu(cpu, mask))
371 goto call;
373 /* Try for same node. */
374 nodemask = cpumask_of_node(cpu_to_node(cpu));
375 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
376 cpu = cpumask_next_and(cpu, nodemask, mask)) {
377 if (cpu_online(cpu))
378 goto call;
381 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
382 cpu = cpumask_any_and(mask, cpu_online_mask);
383 call:
384 ret = smp_call_function_single(cpu, func, info, wait);
385 put_cpu();
386 return ret;
388 EXPORT_SYMBOL_GPL(smp_call_function_any);
391 * smp_call_function_many(): Run a function on a set of other CPUs.
392 * @mask: The set of cpus to run on (only runs on online subset).
393 * @func: The function to run. This must be fast and non-blocking.
394 * @info: An arbitrary pointer to pass to the function.
395 * @wait: If true, wait (atomically) until function has completed
396 * on other CPUs.
398 * If @wait is true, then returns once @func has returned.
400 * You must not call this function with disabled interrupts or from a
401 * hardware interrupt handler or from a bottom half handler. Preemption
402 * must be disabled when calling this function.
404 void smp_call_function_many(const struct cpumask *mask,
405 smp_call_func_t func, void *info, bool wait)
407 struct call_function_data *cfd;
408 int cpu, next_cpu, this_cpu = smp_processor_id();
411 * Can deadlock when called with interrupts disabled.
412 * We allow cpu's that are not yet online though, as no one else can
413 * send smp call function interrupt to this cpu and as such deadlocks
414 * can't happen.
416 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
417 && !oops_in_progress && !early_boot_irqs_disabled);
419 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
420 cpu = cpumask_first_and(mask, cpu_online_mask);
421 if (cpu == this_cpu)
422 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
424 /* No online cpus? We're done. */
425 if (cpu >= nr_cpu_ids)
426 return;
428 /* Do we have another CPU which isn't us? */
429 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
430 if (next_cpu == this_cpu)
431 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
433 /* Fastpath: do that cpu by itself. */
434 if (next_cpu >= nr_cpu_ids) {
435 smp_call_function_single(cpu, func, info, wait);
436 return;
439 cfd = this_cpu_ptr(&cfd_data);
441 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
442 cpumask_clear_cpu(this_cpu, cfd->cpumask);
444 /* Some callers race with other cpus changing the passed mask */
445 if (unlikely(!cpumask_weight(cfd->cpumask)))
446 return;
448 for_each_cpu(cpu, cfd->cpumask) {
449 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
451 csd_lock(csd);
452 if (wait)
453 csd->flags |= CSD_FLAG_SYNCHRONOUS;
454 csd->func = func;
455 csd->info = info;
456 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
459 /* Send a message to all CPUs in the map */
460 arch_send_call_function_ipi_mask(cfd->cpumask);
462 if (wait) {
463 for_each_cpu(cpu, cfd->cpumask) {
464 struct call_single_data *csd;
466 csd = per_cpu_ptr(cfd->csd, cpu);
467 csd_lock_wait(csd);
471 EXPORT_SYMBOL(smp_call_function_many);
474 * smp_call_function(): Run a function on all other CPUs.
475 * @func: The function to run. This must be fast and non-blocking.
476 * @info: An arbitrary pointer to pass to the function.
477 * @wait: If true, wait (atomically) until function has completed
478 * on other CPUs.
480 * Returns 0.
482 * If @wait is true, then returns once @func has returned; otherwise
483 * it returns just before the target cpu calls @func.
485 * You must not call this function with disabled interrupts or from a
486 * hardware interrupt handler or from a bottom half handler.
488 int smp_call_function(smp_call_func_t func, void *info, int wait)
490 preempt_disable();
491 smp_call_function_many(cpu_online_mask, func, info, wait);
492 preempt_enable();
494 return 0;
496 EXPORT_SYMBOL(smp_call_function);
498 /* Setup configured maximum number of CPUs to activate */
499 unsigned int setup_max_cpus = NR_CPUS;
500 EXPORT_SYMBOL(setup_max_cpus);
504 * Setup routine for controlling SMP activation
506 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
507 * activation entirely (the MPS table probe still happens, though).
509 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
510 * greater than 0, limits the maximum number of CPUs activated in
511 * SMP mode to <NUM>.
514 void __weak arch_disable_smp_support(void) { }
516 static int __init nosmp(char *str)
518 setup_max_cpus = 0;
519 arch_disable_smp_support();
521 return 0;
524 early_param("nosmp", nosmp);
526 /* this is hard limit */
527 static int __init nrcpus(char *str)
529 int nr_cpus;
531 get_option(&str, &nr_cpus);
532 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
533 nr_cpu_ids = nr_cpus;
535 return 0;
538 early_param("nr_cpus", nrcpus);
540 static int __init maxcpus(char *str)
542 get_option(&str, &setup_max_cpus);
543 if (setup_max_cpus == 0)
544 arch_disable_smp_support();
546 return 0;
549 early_param("maxcpus", maxcpus);
551 /* Setup number of possible processor ids */
552 int nr_cpu_ids __read_mostly = NR_CPUS;
553 EXPORT_SYMBOL(nr_cpu_ids);
555 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
556 void __init setup_nr_cpu_ids(void)
558 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
561 void __weak smp_announce(void)
563 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
566 /* Called by boot processor to activate the rest. */
567 void __init smp_init(void)
569 unsigned int cpu;
571 idle_threads_init();
572 cpuhp_threads_init();
574 /* FIXME: This should be done in userspace --RR */
575 for_each_present_cpu(cpu) {
576 if (num_online_cpus() >= setup_max_cpus)
577 break;
578 if (!cpu_online(cpu))
579 cpu_up(cpu);
582 /* Any cleanup work */
583 smp_announce();
584 smp_cpus_done(setup_max_cpus);
588 * Call a function on all processors. May be used during early boot while
589 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
590 * of local_irq_disable/enable().
592 int on_each_cpu(void (*func) (void *info), void *info, int wait)
594 unsigned long flags;
595 int ret = 0;
597 preempt_disable();
598 ret = smp_call_function(func, info, wait);
599 local_irq_save(flags);
600 func(info);
601 local_irq_restore(flags);
602 preempt_enable();
603 return ret;
605 EXPORT_SYMBOL(on_each_cpu);
608 * on_each_cpu_mask(): Run a function on processors specified by
609 * cpumask, which may include the local processor.
610 * @mask: The set of cpus to run on (only runs on online subset).
611 * @func: The function to run. This must be fast and non-blocking.
612 * @info: An arbitrary pointer to pass to the function.
613 * @wait: If true, wait (atomically) until function has completed
614 * on other CPUs.
616 * If @wait is true, then returns once @func has returned.
618 * You must not call this function with disabled interrupts or from a
619 * hardware interrupt handler or from a bottom half handler. The
620 * exception is that it may be used during early boot while
621 * early_boot_irqs_disabled is set.
623 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
624 void *info, bool wait)
626 int cpu = get_cpu();
628 smp_call_function_many(mask, func, info, wait);
629 if (cpumask_test_cpu(cpu, mask)) {
630 unsigned long flags;
631 local_irq_save(flags);
632 func(info);
633 local_irq_restore(flags);
635 put_cpu();
637 EXPORT_SYMBOL(on_each_cpu_mask);
640 * on_each_cpu_cond(): Call a function on each processor for which
641 * the supplied function cond_func returns true, optionally waiting
642 * for all the required CPUs to finish. This may include the local
643 * processor.
644 * @cond_func: A callback function that is passed a cpu id and
645 * the the info parameter. The function is called
646 * with preemption disabled. The function should
647 * return a blooean value indicating whether to IPI
648 * the specified CPU.
649 * @func: The function to run on all applicable CPUs.
650 * This must be fast and non-blocking.
651 * @info: An arbitrary pointer to pass to both functions.
652 * @wait: If true, wait (atomically) until function has
653 * completed on other CPUs.
654 * @gfp_flags: GFP flags to use when allocating the cpumask
655 * used internally by the function.
657 * The function might sleep if the GFP flags indicates a non
658 * atomic allocation is allowed.
660 * Preemption is disabled to protect against CPUs going offline but not online.
661 * CPUs going online during the call will not be seen or sent an IPI.
663 * You must not call this function with disabled interrupts or
664 * from a hardware interrupt handler or from a bottom half handler.
666 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
667 smp_call_func_t func, void *info, bool wait,
668 gfp_t gfp_flags)
670 cpumask_var_t cpus;
671 int cpu, ret;
673 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
675 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
676 preempt_disable();
677 for_each_online_cpu(cpu)
678 if (cond_func(cpu, info))
679 cpumask_set_cpu(cpu, cpus);
680 on_each_cpu_mask(cpus, func, info, wait);
681 preempt_enable();
682 free_cpumask_var(cpus);
683 } else {
685 * No free cpumask, bother. No matter, we'll
686 * just have to IPI them one by one.
688 preempt_disable();
689 for_each_online_cpu(cpu)
690 if (cond_func(cpu, info)) {
691 ret = smp_call_function_single(cpu, func,
692 info, wait);
693 WARN_ON_ONCE(ret);
695 preempt_enable();
698 EXPORT_SYMBOL(on_each_cpu_cond);
700 static void do_nothing(void *unused)
705 * kick_all_cpus_sync - Force all cpus out of idle
707 * Used to synchronize the update of pm_idle function pointer. It's
708 * called after the pointer is updated and returns after the dummy
709 * callback function has been executed on all cpus. The execution of
710 * the function can only happen on the remote cpus after they have
711 * left the idle function which had been called via pm_idle function
712 * pointer. So it's guaranteed that nothing uses the previous pointer
713 * anymore.
715 void kick_all_cpus_sync(void)
717 /* Make sure the change is visible before we kick the cpus */
718 smp_mb();
719 smp_call_function(do_nothing, NULL, 1);
721 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
724 * wake_up_all_idle_cpus - break all cpus out of idle
725 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
726 * including idle polling cpus, for non-idle cpus, we will do nothing
727 * for them.
729 void wake_up_all_idle_cpus(void)
731 int cpu;
733 preempt_disable();
734 for_each_online_cpu(cpu) {
735 if (cpu == smp_processor_id())
736 continue;
738 wake_up_if_idle(cpu);
740 preempt_enable();
742 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);