Merge branches 'bkl-removal', 'ipoib', 'mlx4' and 'nes' into for-linus
[linux-2.6/mini2440.git] / kernel / smp.c
blob462c785ca1eee80dea958541edf19786259c88e6
1 /*
2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/smp.h>
14 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
15 static LIST_HEAD(call_function_queue);
16 __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
18 enum {
19 CSD_FLAG_WAIT = 0x01,
20 CSD_FLAG_ALLOC = 0x02,
23 struct call_function_data {
24 struct call_single_data csd;
25 spinlock_t lock;
26 unsigned int refs;
27 cpumask_t cpumask;
28 struct rcu_head rcu_head;
31 struct call_single_queue {
32 struct list_head list;
33 spinlock_t lock;
36 void __cpuinit init_call_single_data(void)
38 int i;
40 for_each_possible_cpu(i) {
41 struct call_single_queue *q = &per_cpu(call_single_queue, i);
43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list);
48 static void csd_flag_wait(struct call_single_data *data)
50 /* Wait for response */
51 do {
53 * We need to see the flags store in the IPI handler
55 smp_mb();
56 if (!(data->flags & CSD_FLAG_WAIT))
57 break;
58 cpu_relax();
59 } while (1);
63 * Insert a previously allocated call_single_data element for execution
64 * on the given CPU. data must already have ->func, ->info, and ->flags set.
66 static void generic_exec_single(int cpu, struct call_single_data *data)
68 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
69 int wait = data->flags & CSD_FLAG_WAIT, ipi;
70 unsigned long flags;
72 spin_lock_irqsave(&dst->lock, flags);
73 ipi = list_empty(&dst->list);
74 list_add_tail(&data->list, &dst->list);
75 spin_unlock_irqrestore(&dst->lock, flags);
77 if (ipi)
78 arch_send_call_function_single_ipi(cpu);
80 if (wait)
81 csd_flag_wait(data);
84 static void rcu_free_call_data(struct rcu_head *head)
86 struct call_function_data *data;
88 data = container_of(head, struct call_function_data, rcu_head);
90 kfree(data);
94 * Invoked by arch to handle an IPI for call function. Must be called with
95 * interrupts disabled.
97 void generic_smp_call_function_interrupt(void)
99 struct call_function_data *data;
100 int cpu = get_cpu();
103 * It's ok to use list_for_each_rcu() here even though we may delete
104 * 'pos', since list_del_rcu() doesn't clear ->next
106 rcu_read_lock();
107 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
108 int refs;
110 if (!cpu_isset(cpu, data->cpumask))
111 continue;
113 data->csd.func(data->csd.info);
115 spin_lock(&data->lock);
116 cpu_clear(cpu, data->cpumask);
117 WARN_ON(data->refs == 0);
118 data->refs--;
119 refs = data->refs;
120 spin_unlock(&data->lock);
122 if (refs)
123 continue;
125 spin_lock(&call_function_lock);
126 list_del_rcu(&data->csd.list);
127 spin_unlock(&call_function_lock);
129 if (data->csd.flags & CSD_FLAG_WAIT) {
131 * serialize stores to data with the flag clear
132 * and wakeup
134 smp_wmb();
135 data->csd.flags &= ~CSD_FLAG_WAIT;
136 } else
137 call_rcu(&data->rcu_head, rcu_free_call_data);
139 rcu_read_unlock();
141 put_cpu();
145 * Invoked by arch to handle an IPI for call function single. Must be called
146 * from the arch with interrupts disabled.
148 void generic_smp_call_function_single_interrupt(void)
150 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
151 LIST_HEAD(list);
154 * Need to see other stores to list head for checking whether
155 * list is empty without holding q->lock
157 smp_mb();
158 while (!list_empty(&q->list)) {
159 unsigned int data_flags;
161 spin_lock(&q->lock);
162 list_replace_init(&q->list, &list);
163 spin_unlock(&q->lock);
165 while (!list_empty(&list)) {
166 struct call_single_data *data;
168 data = list_entry(list.next, struct call_single_data,
169 list);
170 list_del(&data->list);
173 * 'data' can be invalid after this call if
174 * flags == 0 (when called through
175 * generic_exec_single(), so save them away before
176 * making the call.
178 data_flags = data->flags;
180 data->func(data->info);
182 if (data_flags & CSD_FLAG_WAIT) {
183 smp_wmb();
184 data->flags &= ~CSD_FLAG_WAIT;
185 } else if (data_flags & CSD_FLAG_ALLOC)
186 kfree(data);
189 * See comment on outer loop
191 smp_mb();
196 * smp_call_function_single - Run a function on a specific CPU
197 * @func: The function to run. This must be fast and non-blocking.
198 * @info: An arbitrary pointer to pass to the function.
199 * @wait: If true, wait until function has completed on other CPUs.
201 * Returns 0 on success, else a negative status code. Note that @wait
202 * will be implicitly turned on in case of allocation failures, since
203 * we fall back to on-stack allocation.
205 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
206 int wait)
208 struct call_single_data d;
209 unsigned long flags;
210 /* prevent preemption and reschedule on another processor */
211 int me = get_cpu();
213 /* Can deadlock when called with interrupts disabled */
214 WARN_ON(irqs_disabled());
216 if (cpu == me) {
217 local_irq_save(flags);
218 func(info);
219 local_irq_restore(flags);
220 } else {
221 struct call_single_data *data = NULL;
223 if (!wait) {
224 data = kmalloc(sizeof(*data), GFP_ATOMIC);
225 if (data)
226 data->flags = CSD_FLAG_ALLOC;
228 if (!data) {
229 data = &d;
230 data->flags = CSD_FLAG_WAIT;
233 data->func = func;
234 data->info = info;
235 generic_exec_single(cpu, data);
238 put_cpu();
239 return 0;
241 EXPORT_SYMBOL(smp_call_function_single);
244 * __smp_call_function_single(): Run a function on another CPU
245 * @cpu: The CPU to run on.
246 * @data: Pre-allocated and setup data structure
248 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
249 * data structure. Useful for embedding @data inside other structures, for
250 * instance.
253 void __smp_call_function_single(int cpu, struct call_single_data *data)
255 /* Can deadlock when called with interrupts disabled */
256 WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
258 generic_exec_single(cpu, data);
262 * smp_call_function_mask(): Run a function on a set of other CPUs.
263 * @mask: The set of cpus to run on.
264 * @func: The function to run. This must be fast and non-blocking.
265 * @info: An arbitrary pointer to pass to the function.
266 * @wait: If true, wait (atomically) until function has completed on other CPUs.
268 * Returns 0 on success, else a negative status code.
270 * If @wait is true, then returns once @func has returned. Note that @wait
271 * will be implicitly turned on in case of allocation failures, since
272 * we fall back to on-stack allocation.
274 * You must not call this function with disabled interrupts or from a
275 * hardware interrupt handler or from a bottom half handler. Preemption
276 * must be disabled when calling this function.
278 int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
279 int wait)
281 struct call_function_data d;
282 struct call_function_data *data = NULL;
283 cpumask_t allbutself;
284 unsigned long flags;
285 int cpu, num_cpus;
287 /* Can deadlock when called with interrupts disabled */
288 WARN_ON(irqs_disabled());
290 cpu = smp_processor_id();
291 allbutself = cpu_online_map;
292 cpu_clear(cpu, allbutself);
293 cpus_and(mask, mask, allbutself);
294 num_cpus = cpus_weight(mask);
297 * If zero CPUs, return. If just a single CPU, turn this request
298 * into a targetted single call instead since it's faster.
300 if (!num_cpus)
301 return 0;
302 else if (num_cpus == 1) {
303 cpu = first_cpu(mask);
304 return smp_call_function_single(cpu, func, info, wait);
307 if (!wait) {
308 data = kmalloc(sizeof(*data), GFP_ATOMIC);
309 if (data)
310 data->csd.flags = CSD_FLAG_ALLOC;
312 if (!data) {
313 data = &d;
314 data->csd.flags = CSD_FLAG_WAIT;
315 wait = 1;
318 spin_lock_init(&data->lock);
319 data->csd.func = func;
320 data->csd.info = info;
321 data->refs = num_cpus;
322 data->cpumask = mask;
324 spin_lock_irqsave(&call_function_lock, flags);
325 list_add_tail_rcu(&data->csd.list, &call_function_queue);
326 spin_unlock_irqrestore(&call_function_lock, flags);
328 /* Send a message to all CPUs in the map */
329 arch_send_call_function_ipi(mask);
331 /* optionally wait for the CPUs to complete */
332 if (wait)
333 csd_flag_wait(&data->csd);
335 return 0;
337 EXPORT_SYMBOL(smp_call_function_mask);
340 * smp_call_function(): Run a function on all other CPUs.
341 * @func: The function to run. This must be fast and non-blocking.
342 * @info: An arbitrary pointer to pass to the function.
343 * @wait: If true, wait (atomically) until function has completed on other CPUs.
345 * Returns 0 on success, else a negative status code.
347 * If @wait is true, then returns once @func has returned; otherwise
348 * it returns just before the target cpu calls @func. In case of allocation
349 * failure, @wait will be implicitly turned on.
351 * You must not call this function with disabled interrupts or from a
352 * hardware interrupt handler or from a bottom half handler.
354 int smp_call_function(void (*func)(void *), void *info, int wait)
356 int ret;
358 preempt_disable();
359 ret = smp_call_function_mask(cpu_online_map, func, info, wait);
360 preempt_enable();
361 return ret;
363 EXPORT_SYMBOL(smp_call_function);
365 void ipi_call_lock(void)
367 spin_lock(&call_function_lock);
370 void ipi_call_unlock(void)
372 spin_unlock(&call_function_lock);
375 void ipi_call_lock_irq(void)
377 spin_lock_irq(&call_function_lock);
380 void ipi_call_unlock_irq(void)
382 spin_unlock_irq(&call_function_lock);