2 * padata.c - generic interface to process data streams in parallel
4 * Copyright (C) 2008, 2009 secunet Security Networks AG
5 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/module.h>
22 #include <linux/cpumask.h>
23 #include <linux/err.h>
24 #include <linux/cpu.h>
25 #include <linux/padata.h>
26 #include <linux/mutex.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/rcupdate.h>
31 #define MAX_SEQ_NR INT_MAX - NR_CPUS
32 #define MAX_OBJ_NUM 10000 * NR_CPUS
34 static int padata_index_to_cpu(struct parallel_data
*pd
, int cpu_index
)
38 target_cpu
= cpumask_first(pd
->cpumask
);
39 for (cpu
= 0; cpu
< cpu_index
; cpu
++)
40 target_cpu
= cpumask_next(target_cpu
, pd
->cpumask
);
45 static int padata_cpu_hash(struct padata_priv
*padata
)
48 struct parallel_data
*pd
;
53 * Hash the sequence numbers to the cpus by taking
54 * seq_nr mod. number of cpus in use.
56 cpu_index
= padata
->seq_nr
% cpumask_weight(pd
->cpumask
);
58 return padata_index_to_cpu(pd
, cpu_index
);
61 static void padata_parallel_worker(struct work_struct
*work
)
63 struct padata_queue
*queue
;
64 struct parallel_data
*pd
;
65 struct padata_instance
*pinst
;
66 LIST_HEAD(local_list
);
69 queue
= container_of(work
, struct padata_queue
, pwork
);
73 spin_lock(&queue
->parallel
.lock
);
74 list_replace_init(&queue
->parallel
.list
, &local_list
);
75 spin_unlock(&queue
->parallel
.lock
);
77 while (!list_empty(&local_list
)) {
78 struct padata_priv
*padata
;
80 padata
= list_entry(local_list
.next
,
81 struct padata_priv
, list
);
83 list_del_init(&padata
->list
);
85 padata
->parallel(padata
);
92 * padata_do_parallel - padata parallelization function
94 * @pinst: padata instance
95 * @padata: object to be parallelized
96 * @cb_cpu: cpu the serialization callback function will run on,
97 * must be in the cpumask of padata.
99 * The parallelization callback function will run with BHs off.
100 * Note: Every object which is parallelized by padata_do_parallel
101 * must be seen by padata_do_serial.
103 int padata_do_parallel(struct padata_instance
*pinst
,
104 struct padata_priv
*padata
, int cb_cpu
)
107 struct padata_queue
*queue
;
108 struct parallel_data
*pd
;
112 pd
= rcu_dereference(pinst
->pd
);
115 if (!(pinst
->flags
& PADATA_INIT
))
119 if ((pinst
->flags
& PADATA_RESET
))
122 if (atomic_read(&pd
->refcnt
) >= MAX_OBJ_NUM
)
126 if (!cpumask_test_cpu(cb_cpu
, pd
->cpumask
))
130 atomic_inc(&pd
->refcnt
);
132 padata
->cb_cpu
= cb_cpu
;
134 if (unlikely(atomic_read(&pd
->seq_nr
) == pd
->max_seq_nr
))
135 atomic_set(&pd
->seq_nr
, -1);
137 padata
->seq_nr
= atomic_inc_return(&pd
->seq_nr
);
139 target_cpu
= padata_cpu_hash(padata
);
140 queue
= per_cpu_ptr(pd
->queue
, target_cpu
);
142 spin_lock(&queue
->parallel
.lock
);
143 list_add_tail(&padata
->list
, &queue
->parallel
.list
);
144 spin_unlock(&queue
->parallel
.lock
);
146 queue_work_on(target_cpu
, pinst
->wq
, &queue
->pwork
);
149 rcu_read_unlock_bh();
153 EXPORT_SYMBOL(padata_do_parallel
);
155 static struct padata_priv
*padata_get_next(struct parallel_data
*pd
)
157 int cpu
, num_cpus
, empty
, calc_seq_nr
;
158 int seq_nr
, next_nr
, overrun
, next_overrun
;
159 struct padata_queue
*queue
, *next_queue
;
160 struct padata_priv
*padata
;
161 struct padata_list
*reorder
;
168 num_cpus
= cpumask_weight(pd
->cpumask
);
170 for_each_cpu(cpu
, pd
->cpumask
) {
171 queue
= per_cpu_ptr(pd
->queue
, cpu
);
172 reorder
= &queue
->reorder
;
175 * Calculate the seq_nr of the object that should be
176 * next in this queue.
179 calc_seq_nr
= (atomic_read(&queue
->num_obj
) * num_cpus
)
182 if (unlikely(calc_seq_nr
> pd
->max_seq_nr
)) {
183 calc_seq_nr
= calc_seq_nr
- pd
->max_seq_nr
- 1;
187 if (!list_empty(&reorder
->list
)) {
188 padata
= list_entry(reorder
->list
.next
,
189 struct padata_priv
, list
);
191 seq_nr
= padata
->seq_nr
;
192 BUG_ON(calc_seq_nr
!= seq_nr
);
194 seq_nr
= calc_seq_nr
;
198 if (next_nr
< 0 || seq_nr
< next_nr
199 || (next_overrun
&& !overrun
)) {
201 next_overrun
= overrun
;
208 if (empty
== num_cpus
)
211 reorder
= &next_queue
->reorder
;
213 if (!list_empty(&reorder
->list
)) {
214 padata
= list_entry(reorder
->list
.next
,
215 struct padata_priv
, list
);
217 if (unlikely(next_overrun
)) {
218 for_each_cpu(cpu
, pd
->cpumask
) {
219 queue
= per_cpu_ptr(pd
->queue
, cpu
);
220 atomic_set(&queue
->num_obj
, 0);
224 spin_lock(&reorder
->lock
);
225 list_del_init(&padata
->list
);
226 atomic_dec(&pd
->reorder_objects
);
227 spin_unlock(&reorder
->lock
);
229 atomic_inc(&next_queue
->num_obj
);
234 if (next_nr
% num_cpus
== next_queue
->cpu_index
) {
235 padata
= ERR_PTR(-ENODATA
);
239 padata
= ERR_PTR(-EINPROGRESS
);
244 static void padata_reorder(struct parallel_data
*pd
)
246 struct padata_priv
*padata
;
247 struct padata_queue
*queue
;
248 struct padata_instance
*pinst
= pd
->pinst
;
251 if (!spin_trylock_bh(&pd
->lock
))
255 padata
= padata_get_next(pd
);
257 if (!padata
|| PTR_ERR(padata
) == -EINPROGRESS
)
260 if (PTR_ERR(padata
) == -ENODATA
) {
261 spin_unlock_bh(&pd
->lock
);
265 queue
= per_cpu_ptr(pd
->queue
, padata
->cb_cpu
);
267 spin_lock(&queue
->serial
.lock
);
268 list_add_tail(&padata
->list
, &queue
->serial
.list
);
269 spin_unlock(&queue
->serial
.lock
);
271 queue_work_on(padata
->cb_cpu
, pinst
->wq
, &queue
->swork
);
274 spin_unlock_bh(&pd
->lock
);
276 if (atomic_read(&pd
->reorder_objects
))
283 static void padata_serial_worker(struct work_struct
*work
)
285 struct padata_queue
*queue
;
286 struct parallel_data
*pd
;
287 LIST_HEAD(local_list
);
290 queue
= container_of(work
, struct padata_queue
, swork
);
293 spin_lock(&queue
->serial
.lock
);
294 list_replace_init(&queue
->serial
.list
, &local_list
);
295 spin_unlock(&queue
->serial
.lock
);
297 while (!list_empty(&local_list
)) {
298 struct padata_priv
*padata
;
300 padata
= list_entry(local_list
.next
,
301 struct padata_priv
, list
);
303 list_del_init(&padata
->list
);
305 padata
->serial(padata
);
306 atomic_dec(&pd
->refcnt
);
312 * padata_do_serial - padata serialization function
314 * @padata: object to be serialized.
316 * padata_do_serial must be called for every parallelized object.
317 * The serialization callback function will run with BHs off.
319 void padata_do_serial(struct padata_priv
*padata
)
322 struct padata_queue
*queue
;
323 struct parallel_data
*pd
;
328 queue
= per_cpu_ptr(pd
->queue
, cpu
);
330 spin_lock(&queue
->reorder
.lock
);
331 atomic_inc(&pd
->reorder_objects
);
332 list_add_tail(&padata
->list
, &queue
->reorder
.list
);
333 spin_unlock(&queue
->reorder
.lock
);
339 EXPORT_SYMBOL(padata_do_serial
);
341 static struct parallel_data
*padata_alloc_pd(struct padata_instance
*pinst
,
342 const struct cpumask
*cpumask
)
344 int cpu
, cpu_index
, num_cpus
;
345 struct padata_queue
*queue
;
346 struct parallel_data
*pd
;
350 pd
= kzalloc(sizeof(struct parallel_data
), GFP_KERNEL
);
354 pd
->queue
= alloc_percpu(struct padata_queue
);
358 if (!alloc_cpumask_var(&pd
->cpumask
, GFP_KERNEL
))
361 for_each_possible_cpu(cpu
) {
362 queue
= per_cpu_ptr(pd
->queue
, cpu
);
366 if (cpumask_test_cpu(cpu
, cpumask
)
367 && cpumask_test_cpu(cpu
, cpu_active_mask
)) {
368 queue
->cpu_index
= cpu_index
;
371 queue
->cpu_index
= -1;
373 INIT_LIST_HEAD(&queue
->reorder
.list
);
374 INIT_LIST_HEAD(&queue
->parallel
.list
);
375 INIT_LIST_HEAD(&queue
->serial
.list
);
376 spin_lock_init(&queue
->reorder
.lock
);
377 spin_lock_init(&queue
->parallel
.lock
);
378 spin_lock_init(&queue
->serial
.lock
);
380 INIT_WORK(&queue
->pwork
, padata_parallel_worker
);
381 INIT_WORK(&queue
->swork
, padata_serial_worker
);
382 atomic_set(&queue
->num_obj
, 0);
385 cpumask_and(pd
->cpumask
, cpumask
, cpu_active_mask
);
387 num_cpus
= cpumask_weight(pd
->cpumask
);
388 pd
->max_seq_nr
= (MAX_SEQ_NR
/ num_cpus
) * num_cpus
- 1;
390 atomic_set(&pd
->seq_nr
, -1);
391 atomic_set(&pd
->reorder_objects
, 0);
392 atomic_set(&pd
->refcnt
, 0);
394 spin_lock_init(&pd
->lock
);
399 free_percpu(pd
->queue
);
406 static void padata_free_pd(struct parallel_data
*pd
)
408 free_cpumask_var(pd
->cpumask
);
409 free_percpu(pd
->queue
);
413 static void padata_replace(struct padata_instance
*pinst
,
414 struct parallel_data
*pd_new
)
416 struct parallel_data
*pd_old
= pinst
->pd
;
418 pinst
->flags
|= PADATA_RESET
;
420 rcu_assign_pointer(pinst
->pd
, pd_new
);
424 while (atomic_read(&pd_old
->refcnt
) != 0)
427 flush_workqueue(pinst
->wq
);
429 padata_free_pd(pd_old
);
431 pinst
->flags
&= ~PADATA_RESET
;
435 * padata_set_cpumask - set the cpumask that padata should use
437 * @pinst: padata instance
438 * @cpumask: the cpumask to use
440 int padata_set_cpumask(struct padata_instance
*pinst
,
441 cpumask_var_t cpumask
)
443 struct parallel_data
*pd
;
448 mutex_lock(&pinst
->lock
);
450 pd
= padata_alloc_pd(pinst
, cpumask
);
456 cpumask_copy(pinst
->cpumask
, cpumask
);
458 padata_replace(pinst
, pd
);
461 mutex_unlock(&pinst
->lock
);
465 EXPORT_SYMBOL(padata_set_cpumask
);
467 static int __padata_add_cpu(struct padata_instance
*pinst
, int cpu
)
469 struct parallel_data
*pd
;
471 if (cpumask_test_cpu(cpu
, cpu_active_mask
)) {
472 pd
= padata_alloc_pd(pinst
, pinst
->cpumask
);
476 padata_replace(pinst
, pd
);
483 * padata_add_cpu - add a cpu to the padata cpumask
485 * @pinst: padata instance
488 int padata_add_cpu(struct padata_instance
*pinst
, int cpu
)
494 mutex_lock(&pinst
->lock
);
496 cpumask_set_cpu(cpu
, pinst
->cpumask
);
497 err
= __padata_add_cpu(pinst
, cpu
);
499 mutex_unlock(&pinst
->lock
);
503 EXPORT_SYMBOL(padata_add_cpu
);
505 static int __padata_remove_cpu(struct padata_instance
*pinst
, int cpu
)
507 struct parallel_data
*pd
;
509 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
510 pd
= padata_alloc_pd(pinst
, pinst
->cpumask
);
514 padata_replace(pinst
, pd
);
521 * padata_remove_cpu - remove a cpu from the padata cpumask
523 * @pinst: padata instance
524 * @cpu: cpu to remove
526 int padata_remove_cpu(struct padata_instance
*pinst
, int cpu
)
532 mutex_lock(&pinst
->lock
);
534 cpumask_clear_cpu(cpu
, pinst
->cpumask
);
535 err
= __padata_remove_cpu(pinst
, cpu
);
537 mutex_unlock(&pinst
->lock
);
541 EXPORT_SYMBOL(padata_remove_cpu
);
544 * padata_start - start the parallel processing
546 * @pinst: padata instance to start
548 void padata_start(struct padata_instance
*pinst
)
552 mutex_lock(&pinst
->lock
);
553 pinst
->flags
|= PADATA_INIT
;
554 mutex_unlock(&pinst
->lock
);
556 EXPORT_SYMBOL(padata_start
);
559 * padata_stop - stop the parallel processing
561 * @pinst: padata instance to stop
563 void padata_stop(struct padata_instance
*pinst
)
567 mutex_lock(&pinst
->lock
);
568 pinst
->flags
&= ~PADATA_INIT
;
569 mutex_unlock(&pinst
->lock
);
571 EXPORT_SYMBOL(padata_stop
);
573 static int __cpuinit
padata_cpu_callback(struct notifier_block
*nfb
,
574 unsigned long action
, void *hcpu
)
577 struct padata_instance
*pinst
;
578 int cpu
= (unsigned long)hcpu
;
580 pinst
= container_of(nfb
, struct padata_instance
, cpu_notifier
);
584 case CPU_ONLINE_FROZEN
:
585 if (!cpumask_test_cpu(cpu
, pinst
->cpumask
))
587 mutex_lock(&pinst
->lock
);
588 err
= __padata_add_cpu(pinst
, cpu
);
589 mutex_unlock(&pinst
->lock
);
594 case CPU_DOWN_PREPARE
:
595 case CPU_DOWN_PREPARE_FROZEN
:
596 if (!cpumask_test_cpu(cpu
, pinst
->cpumask
))
598 mutex_lock(&pinst
->lock
);
599 err
= __padata_remove_cpu(pinst
, cpu
);
600 mutex_unlock(&pinst
->lock
);
605 case CPU_UP_CANCELED
:
606 case CPU_UP_CANCELED_FROZEN
:
607 if (!cpumask_test_cpu(cpu
, pinst
->cpumask
))
609 mutex_lock(&pinst
->lock
);
610 __padata_remove_cpu(pinst
, cpu
);
611 mutex_unlock(&pinst
->lock
);
613 case CPU_DOWN_FAILED
:
614 case CPU_DOWN_FAILED_FROZEN
:
615 if (!cpumask_test_cpu(cpu
, pinst
->cpumask
))
617 mutex_lock(&pinst
->lock
);
618 __padata_add_cpu(pinst
, cpu
);
619 mutex_unlock(&pinst
->lock
);
626 * padata_alloc - allocate and initialize a padata instance
628 * @cpumask: cpumask that padata uses for parallelization
629 * @wq: workqueue to use for the allocated padata instance
631 struct padata_instance
*padata_alloc(const struct cpumask
*cpumask
,
632 struct workqueue_struct
*wq
)
635 struct padata_instance
*pinst
;
636 struct parallel_data
*pd
;
638 pinst
= kzalloc(sizeof(struct padata_instance
), GFP_KERNEL
);
642 pd
= padata_alloc_pd(pinst
, cpumask
);
646 if (!alloc_cpumask_var(&pinst
->cpumask
, GFP_KERNEL
))
649 rcu_assign_pointer(pinst
->pd
, pd
);
653 cpumask_copy(pinst
->cpumask
, cpumask
);
657 pinst
->cpu_notifier
.notifier_call
= padata_cpu_callback
;
658 pinst
->cpu_notifier
.priority
= 0;
659 err
= register_hotcpu_notifier(&pinst
->cpu_notifier
);
661 goto err_free_cpumask
;
663 mutex_init(&pinst
->lock
);
668 free_cpumask_var(pinst
->cpumask
);
676 EXPORT_SYMBOL(padata_alloc
);
679 * padata_free - free a padata instance
681 * @ padata_inst: padata instance to free
683 void padata_free(struct padata_instance
*pinst
)
689 while (atomic_read(&pinst
->pd
->refcnt
) != 0)
692 unregister_hotcpu_notifier(&pinst
->cpu_notifier
);
693 padata_free_pd(pinst
->pd
);
694 free_cpumask_var(pinst
->cpumask
);
697 EXPORT_SYMBOL(padata_free
);