2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <asm/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/freezer.h>
43 #include <linux/cpu.h>
44 #include <linux/delay.h>
45 #include <linux/byteorder/swabb.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
52 "Josh Triplett <josh@freedesktop.org>");
54 static int nreaders
= -1; /* # reader threads, defaults to 2*ncpus */
55 static int nfakewriters
= 4; /* # fake writer threads */
56 static int stat_interval
; /* Interval between stats, in seconds. */
57 /* Defaults to "only at end of test". */
58 static int verbose
; /* Print more debug info. */
59 static int test_no_idle_hz
; /* Test RCU's support for tickless idle CPUs. */
60 static int shuffle_interval
= 5; /* Interval between shuffles (in sec)*/
61 static char *torture_type
= "rcu"; /* What RCU implementation to torture. */
63 module_param(nreaders
, int, 0444);
64 MODULE_PARM_DESC(nreaders
, "Number of RCU reader threads");
65 module_param(nfakewriters
, int, 0444);
66 MODULE_PARM_DESC(nfakewriters
, "Number of RCU fake writer threads");
67 module_param(stat_interval
, int, 0444);
68 MODULE_PARM_DESC(stat_interval
, "Number of seconds between stats printk()s");
69 module_param(verbose
, bool, 0444);
70 MODULE_PARM_DESC(verbose
, "Enable verbose debugging printk()s");
71 module_param(test_no_idle_hz
, bool, 0444);
72 MODULE_PARM_DESC(test_no_idle_hz
, "Test support for tickless idle CPUs");
73 module_param(shuffle_interval
, int, 0444);
74 MODULE_PARM_DESC(shuffle_interval
, "Number of seconds between shuffles");
75 module_param(torture_type
, charp
, 0444);
76 MODULE_PARM_DESC(torture_type
, "Type of RCU to torture (rcu, rcu_bh, srcu)");
78 #define TORTURE_FLAG "-torture:"
79 #define PRINTK_STRING(s) \
80 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
81 #define VERBOSE_PRINTK_STRING(s) \
82 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
83 #define VERBOSE_PRINTK_ERRSTRING(s) \
84 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
86 static char printk_buf
[4096];
88 static int nrealreaders
;
89 static struct task_struct
*writer_task
;
90 static struct task_struct
**fakewriter_tasks
;
91 static struct task_struct
**reader_tasks
;
92 static struct task_struct
*stats_task
;
93 static struct task_struct
*shuffler_task
;
95 #define RCU_TORTURE_PIPE_LEN 10
98 struct rcu_head rtort_rcu
;
100 struct list_head rtort_free
;
104 static int fullstop
= 0; /* stop generating callbacks at test end. */
105 static LIST_HEAD(rcu_torture_freelist
);
106 static struct rcu_torture
*rcu_torture_current
= NULL
;
107 static long rcu_torture_current_version
= 0;
108 static struct rcu_torture rcu_tortures
[10 * RCU_TORTURE_PIPE_LEN
];
109 static DEFINE_SPINLOCK(rcu_torture_lock
);
110 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_count
) =
112 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_batch
) =
114 static atomic_t rcu_torture_wcount
[RCU_TORTURE_PIPE_LEN
+ 1];
115 static atomic_t n_rcu_torture_alloc
;
116 static atomic_t n_rcu_torture_alloc_fail
;
117 static atomic_t n_rcu_torture_free
;
118 static atomic_t n_rcu_torture_mberror
;
119 static atomic_t n_rcu_torture_error
;
120 static struct list_head rcu_torture_removed
;
123 * Allocate an element from the rcu_tortures pool.
125 static struct rcu_torture
*
126 rcu_torture_alloc(void)
130 spin_lock_bh(&rcu_torture_lock
);
131 if (list_empty(&rcu_torture_freelist
)) {
132 atomic_inc(&n_rcu_torture_alloc_fail
);
133 spin_unlock_bh(&rcu_torture_lock
);
136 atomic_inc(&n_rcu_torture_alloc
);
137 p
= rcu_torture_freelist
.next
;
139 spin_unlock_bh(&rcu_torture_lock
);
140 return container_of(p
, struct rcu_torture
, rtort_free
);
144 * Free an element to the rcu_tortures pool.
147 rcu_torture_free(struct rcu_torture
*p
)
149 atomic_inc(&n_rcu_torture_free
);
150 spin_lock_bh(&rcu_torture_lock
);
151 list_add_tail(&p
->rtort_free
, &rcu_torture_freelist
);
152 spin_unlock_bh(&rcu_torture_lock
);
155 struct rcu_random_state
{
156 unsigned long rrs_state
;
160 #define RCU_RANDOM_MULT 39916801 /* prime */
161 #define RCU_RANDOM_ADD 479001701 /* prime */
162 #define RCU_RANDOM_REFRESH 10000
164 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
167 * Crude but fast random-number generator. Uses a linear congruential
168 * generator, with occasional help from cpu_clock().
171 rcu_random(struct rcu_random_state
*rrsp
)
173 if (--rrsp
->rrs_count
< 0) {
175 (unsigned long)cpu_clock(raw_smp_processor_id());
176 rrsp
->rrs_count
= RCU_RANDOM_REFRESH
;
178 rrsp
->rrs_state
= rrsp
->rrs_state
* RCU_RANDOM_MULT
+ RCU_RANDOM_ADD
;
179 return swahw32(rrsp
->rrs_state
);
183 * Operations vector for selecting different types of tests.
186 struct rcu_torture_ops
{
188 void (*cleanup
)(void);
189 int (*readlock
)(void);
190 void (*readdelay
)(struct rcu_random_state
*rrsp
);
191 void (*readunlock
)(int idx
);
192 int (*completed
)(void);
193 void (*deferredfree
)(struct rcu_torture
*p
);
195 int (*stats
)(char *page
);
198 static struct rcu_torture_ops
*cur_ops
= NULL
;
201 * Definitions for rcu torture testing.
204 static int rcu_torture_read_lock(void) __acquires(RCU
)
210 static void rcu_read_delay(struct rcu_random_state
*rrsp
)
213 const long longdelay
= 200;
215 /* We want there to be long-running readers, but not all the time. */
217 delay
= rcu_random(rrsp
) % (nrealreaders
* 2 * longdelay
);
222 static void rcu_torture_read_unlock(int idx
) __releases(RCU
)
227 static int rcu_torture_completed(void)
229 return rcu_batches_completed();
233 rcu_torture_cb(struct rcu_head
*p
)
236 struct rcu_torture
*rp
= container_of(p
, struct rcu_torture
, rtort_rcu
);
239 /* Test is ending, just drop callbacks on the floor. */
240 /* The next initialization will pick up the pieces. */
243 i
= rp
->rtort_pipe_count
;
244 if (i
> RCU_TORTURE_PIPE_LEN
)
245 i
= RCU_TORTURE_PIPE_LEN
;
246 atomic_inc(&rcu_torture_wcount
[i
]);
247 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
248 rp
->rtort_mbtest
= 0;
249 rcu_torture_free(rp
);
251 cur_ops
->deferredfree(rp
);
254 static void rcu_torture_deferred_free(struct rcu_torture
*p
)
256 call_rcu(&p
->rtort_rcu
, rcu_torture_cb
);
259 static struct rcu_torture_ops rcu_ops
= {
262 .readlock
= rcu_torture_read_lock
,
263 .readdelay
= rcu_read_delay
,
264 .readunlock
= rcu_torture_read_unlock
,
265 .completed
= rcu_torture_completed
,
266 .deferredfree
= rcu_torture_deferred_free
,
267 .sync
= synchronize_rcu
,
272 static void rcu_sync_torture_deferred_free(struct rcu_torture
*p
)
275 struct rcu_torture
*rp
;
276 struct rcu_torture
*rp1
;
279 list_add(&p
->rtort_free
, &rcu_torture_removed
);
280 list_for_each_entry_safe(rp
, rp1
, &rcu_torture_removed
, rtort_free
) {
281 i
= rp
->rtort_pipe_count
;
282 if (i
> RCU_TORTURE_PIPE_LEN
)
283 i
= RCU_TORTURE_PIPE_LEN
;
284 atomic_inc(&rcu_torture_wcount
[i
]);
285 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
286 rp
->rtort_mbtest
= 0;
287 list_del(&rp
->rtort_free
);
288 rcu_torture_free(rp
);
293 static void rcu_sync_torture_init(void)
295 INIT_LIST_HEAD(&rcu_torture_removed
);
298 static struct rcu_torture_ops rcu_sync_ops
= {
299 .init
= rcu_sync_torture_init
,
301 .readlock
= rcu_torture_read_lock
,
302 .readdelay
= rcu_read_delay
,
303 .readunlock
= rcu_torture_read_unlock
,
304 .completed
= rcu_torture_completed
,
305 .deferredfree
= rcu_sync_torture_deferred_free
,
306 .sync
= synchronize_rcu
,
312 * Definitions for rcu_bh torture testing.
315 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH
)
321 static void rcu_bh_torture_read_unlock(int idx
) __releases(RCU_BH
)
323 rcu_read_unlock_bh();
326 static int rcu_bh_torture_completed(void)
328 return rcu_batches_completed_bh();
331 static void rcu_bh_torture_deferred_free(struct rcu_torture
*p
)
333 call_rcu_bh(&p
->rtort_rcu
, rcu_torture_cb
);
336 struct rcu_bh_torture_synchronize
{
337 struct rcu_head head
;
338 struct completion completion
;
341 static void rcu_bh_torture_wakeme_after_cb(struct rcu_head
*head
)
343 struct rcu_bh_torture_synchronize
*rcu
;
345 rcu
= container_of(head
, struct rcu_bh_torture_synchronize
, head
);
346 complete(&rcu
->completion
);
349 static void rcu_bh_torture_synchronize(void)
351 struct rcu_bh_torture_synchronize rcu
;
353 init_completion(&rcu
.completion
);
354 call_rcu_bh(&rcu
.head
, rcu_bh_torture_wakeme_after_cb
);
355 wait_for_completion(&rcu
.completion
);
358 static struct rcu_torture_ops rcu_bh_ops
= {
361 .readlock
= rcu_bh_torture_read_lock
,
362 .readdelay
= rcu_read_delay
, /* just reuse rcu's version. */
363 .readunlock
= rcu_bh_torture_read_unlock
,
364 .completed
= rcu_bh_torture_completed
,
365 .deferredfree
= rcu_bh_torture_deferred_free
,
366 .sync
= rcu_bh_torture_synchronize
,
371 static struct rcu_torture_ops rcu_bh_sync_ops
= {
372 .init
= rcu_sync_torture_init
,
374 .readlock
= rcu_bh_torture_read_lock
,
375 .readdelay
= rcu_read_delay
, /* just reuse rcu's version. */
376 .readunlock
= rcu_bh_torture_read_unlock
,
377 .completed
= rcu_bh_torture_completed
,
378 .deferredfree
= rcu_sync_torture_deferred_free
,
379 .sync
= rcu_bh_torture_synchronize
,
381 .name
= "rcu_bh_sync"
385 * Definitions for srcu torture testing.
388 static struct srcu_struct srcu_ctl
;
390 static void srcu_torture_init(void)
392 init_srcu_struct(&srcu_ctl
);
393 rcu_sync_torture_init();
396 static void srcu_torture_cleanup(void)
398 synchronize_srcu(&srcu_ctl
);
399 cleanup_srcu_struct(&srcu_ctl
);
402 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl
)
404 return srcu_read_lock(&srcu_ctl
);
407 static void srcu_read_delay(struct rcu_random_state
*rrsp
)
410 const long uspertick
= 1000000 / HZ
;
411 const long longdelay
= 10;
413 /* We want there to be long-running readers, but not all the time. */
415 delay
= rcu_random(rrsp
) % (nrealreaders
* 2 * longdelay
* uspertick
);
417 schedule_timeout_interruptible(longdelay
);
420 static void srcu_torture_read_unlock(int idx
) __releases(&srcu_ctl
)
422 srcu_read_unlock(&srcu_ctl
, idx
);
425 static int srcu_torture_completed(void)
427 return srcu_batches_completed(&srcu_ctl
);
430 static void srcu_torture_synchronize(void)
432 synchronize_srcu(&srcu_ctl
);
435 static int srcu_torture_stats(char *page
)
439 int idx
= srcu_ctl
.completed
& 0x1;
441 cnt
+= sprintf(&page
[cnt
], "%s%s per-CPU(idx=%d):",
442 torture_type
, TORTURE_FLAG
, idx
);
443 for_each_possible_cpu(cpu
) {
444 cnt
+= sprintf(&page
[cnt
], " %d(%d,%d)", cpu
,
445 per_cpu_ptr(srcu_ctl
.per_cpu_ref
, cpu
)->c
[!idx
],
446 per_cpu_ptr(srcu_ctl
.per_cpu_ref
, cpu
)->c
[idx
]);
448 cnt
+= sprintf(&page
[cnt
], "\n");
452 static struct rcu_torture_ops srcu_ops
= {
453 .init
= srcu_torture_init
,
454 .cleanup
= srcu_torture_cleanup
,
455 .readlock
= srcu_torture_read_lock
,
456 .readdelay
= srcu_read_delay
,
457 .readunlock
= srcu_torture_read_unlock
,
458 .completed
= srcu_torture_completed
,
459 .deferredfree
= rcu_sync_torture_deferred_free
,
460 .sync
= srcu_torture_synchronize
,
461 .stats
= srcu_torture_stats
,
466 * Definitions for sched torture testing.
469 static int sched_torture_read_lock(void)
475 static void sched_torture_read_unlock(int idx
)
480 static int sched_torture_completed(void)
485 static void sched_torture_synchronize(void)
490 static struct rcu_torture_ops sched_ops
= {
491 .init
= rcu_sync_torture_init
,
493 .readlock
= sched_torture_read_lock
,
494 .readdelay
= rcu_read_delay
, /* just reuse rcu's version. */
495 .readunlock
= sched_torture_read_unlock
,
496 .completed
= sched_torture_completed
,
497 .deferredfree
= rcu_sync_torture_deferred_free
,
498 .sync
= sched_torture_synchronize
,
504 * RCU torture writer kthread. Repeatedly substitutes a new structure
505 * for that pointed to by rcu_torture_current, freeing the old structure
506 * after a series of grace periods (the "pipeline").
509 rcu_torture_writer(void *arg
)
512 long oldbatch
= rcu_batches_completed();
513 struct rcu_torture
*rp
;
514 struct rcu_torture
*old_rp
;
515 static DEFINE_RCU_RANDOM(rand
);
517 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
518 set_user_nice(current
, 19);
521 schedule_timeout_uninterruptible(1);
522 if ((rp
= rcu_torture_alloc()) == NULL
)
524 rp
->rtort_pipe_count
= 0;
525 udelay(rcu_random(&rand
) & 0x3ff);
526 old_rp
= rcu_torture_current
;
527 rp
->rtort_mbtest
= 1;
528 rcu_assign_pointer(rcu_torture_current
, rp
);
531 i
= old_rp
->rtort_pipe_count
;
532 if (i
> RCU_TORTURE_PIPE_LEN
)
533 i
= RCU_TORTURE_PIPE_LEN
;
534 atomic_inc(&rcu_torture_wcount
[i
]);
535 old_rp
->rtort_pipe_count
++;
536 cur_ops
->deferredfree(old_rp
);
538 rcu_torture_current_version
++;
539 oldbatch
= cur_ops
->completed();
540 } while (!kthread_should_stop() && !fullstop
);
541 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
542 while (!kthread_should_stop())
543 schedule_timeout_uninterruptible(1);
548 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
549 * delay between calls.
552 rcu_torture_fakewriter(void *arg
)
554 DEFINE_RCU_RANDOM(rand
);
556 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
557 set_user_nice(current
, 19);
560 schedule_timeout_uninterruptible(1 + rcu_random(&rand
)%10);
561 udelay(rcu_random(&rand
) & 0x3ff);
563 } while (!kthread_should_stop() && !fullstop
);
565 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
566 while (!kthread_should_stop())
567 schedule_timeout_uninterruptible(1);
572 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
573 * incrementing the corresponding element of the pipeline array. The
574 * counter in the element should never be greater than 1, otherwise, the
575 * RCU implementation is broken.
578 rcu_torture_reader(void *arg
)
582 DEFINE_RCU_RANDOM(rand
);
583 struct rcu_torture
*p
;
586 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
587 set_user_nice(current
, 19);
590 idx
= cur_ops
->readlock();
591 completed
= cur_ops
->completed();
592 p
= rcu_dereference(rcu_torture_current
);
594 /* Wait for rcu_torture_writer to get underway */
595 cur_ops
->readunlock(idx
);
596 schedule_timeout_interruptible(HZ
);
599 if (p
->rtort_mbtest
== 0)
600 atomic_inc(&n_rcu_torture_mberror
);
601 cur_ops
->readdelay(&rand
);
603 pipe_count
= p
->rtort_pipe_count
;
604 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
605 /* Should not happen, but... */
606 pipe_count
= RCU_TORTURE_PIPE_LEN
;
608 ++__get_cpu_var(rcu_torture_count
)[pipe_count
];
609 completed
= cur_ops
->completed() - completed
;
610 if (completed
> RCU_TORTURE_PIPE_LEN
) {
611 /* Should not happen, but... */
612 completed
= RCU_TORTURE_PIPE_LEN
;
614 ++__get_cpu_var(rcu_torture_batch
)[completed
];
616 cur_ops
->readunlock(idx
);
618 } while (!kthread_should_stop() && !fullstop
);
619 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
620 while (!kthread_should_stop())
621 schedule_timeout_uninterruptible(1);
626 * Create an RCU-torture statistics message in the specified buffer.
629 rcu_torture_printk(char *page
)
634 long pipesummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
635 long batchsummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
637 for_each_possible_cpu(cpu
) {
638 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
639 pipesummary
[i
] += per_cpu(rcu_torture_count
, cpu
)[i
];
640 batchsummary
[i
] += per_cpu(rcu_torture_batch
, cpu
)[i
];
643 for (i
= RCU_TORTURE_PIPE_LEN
- 1; i
>= 0; i
--) {
644 if (pipesummary
[i
] != 0)
647 cnt
+= sprintf(&page
[cnt
], "%s%s ", torture_type
, TORTURE_FLAG
);
648 cnt
+= sprintf(&page
[cnt
],
649 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
652 rcu_torture_current_version
,
653 list_empty(&rcu_torture_freelist
),
654 atomic_read(&n_rcu_torture_alloc
),
655 atomic_read(&n_rcu_torture_alloc_fail
),
656 atomic_read(&n_rcu_torture_free
),
657 atomic_read(&n_rcu_torture_mberror
));
658 if (atomic_read(&n_rcu_torture_mberror
) != 0)
659 cnt
+= sprintf(&page
[cnt
], " !!!");
660 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
662 cnt
+= sprintf(&page
[cnt
], "!!! ");
663 atomic_inc(&n_rcu_torture_error
);
665 cnt
+= sprintf(&page
[cnt
], "Reader Pipe: ");
666 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
667 cnt
+= sprintf(&page
[cnt
], " %ld", pipesummary
[i
]);
668 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
669 cnt
+= sprintf(&page
[cnt
], "Reader Batch: ");
670 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
671 cnt
+= sprintf(&page
[cnt
], " %ld", batchsummary
[i
]);
672 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
673 cnt
+= sprintf(&page
[cnt
], "Free-Block Circulation: ");
674 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
675 cnt
+= sprintf(&page
[cnt
], " %d",
676 atomic_read(&rcu_torture_wcount
[i
]));
678 cnt
+= sprintf(&page
[cnt
], "\n");
680 cnt
+= cur_ops
->stats(&page
[cnt
]);
685 * Print torture statistics. Caller must ensure that there is only
686 * one call to this function at a given time!!! This is normally
687 * accomplished by relying on the module system to only have one copy
688 * of the module loaded, and then by giving the rcu_torture_stats
689 * kthread full control (or the init/cleanup functions when rcu_torture_stats
690 * thread is not running).
693 rcu_torture_stats_print(void)
697 cnt
= rcu_torture_printk(printk_buf
);
698 printk(KERN_ALERT
"%s", printk_buf
);
702 * Periodically prints torture statistics, if periodic statistics printing
703 * was specified via the stat_interval module parameter.
705 * No need to worry about fullstop here, since this one doesn't reference
706 * volatile state or register callbacks.
709 rcu_torture_stats(void *arg
)
711 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
713 schedule_timeout_interruptible(stat_interval
* HZ
);
714 rcu_torture_stats_print();
715 } while (!kthread_should_stop());
716 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
720 static int rcu_idle_cpu
; /* Force all torture tasks off this CPU */
722 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
723 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
725 static void rcu_torture_shuffle_tasks(void)
730 cpus_setall(tmp_mask
);
733 /* No point in shuffling if there is only one online CPU (ex: UP) */
734 if (num_online_cpus() == 1) {
739 if (rcu_idle_cpu
!= -1)
740 cpu_clear(rcu_idle_cpu
, tmp_mask
);
742 set_cpus_allowed_ptr(current
, &tmp_mask
);
745 for (i
= 0; i
< nrealreaders
; i
++)
747 set_cpus_allowed_ptr(reader_tasks
[i
],
751 if (fakewriter_tasks
) {
752 for (i
= 0; i
< nfakewriters
; i
++)
753 if (fakewriter_tasks
[i
])
754 set_cpus_allowed_ptr(fakewriter_tasks
[i
],
759 set_cpus_allowed_ptr(writer_task
, &tmp_mask
);
762 set_cpus_allowed_ptr(stats_task
, &tmp_mask
);
764 if (rcu_idle_cpu
== -1)
765 rcu_idle_cpu
= num_online_cpus() - 1;
772 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
773 * system to become idle at a time and cut off its timer ticks. This is meant
774 * to test the support for such tickless idle CPU in RCU.
777 rcu_torture_shuffle(void *arg
)
779 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
781 schedule_timeout_interruptible(shuffle_interval
* HZ
);
782 rcu_torture_shuffle_tasks();
783 } while (!kthread_should_stop());
784 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
789 rcu_torture_print_module_parms(char *tag
)
791 printk(KERN_ALERT
"%s" TORTURE_FLAG
792 "--- %s: nreaders=%d nfakewriters=%d "
793 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
794 "shuffle_interval = %d\n",
795 torture_type
, tag
, nrealreaders
, nfakewriters
,
796 stat_interval
, verbose
, test_no_idle_hz
, shuffle_interval
);
800 rcu_torture_cleanup(void)
806 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
807 kthread_stop(shuffler_task
);
809 shuffler_task
= NULL
;
812 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
813 kthread_stop(writer_task
);
818 for (i
= 0; i
< nrealreaders
; i
++) {
819 if (reader_tasks
[i
]) {
820 VERBOSE_PRINTK_STRING(
821 "Stopping rcu_torture_reader task");
822 kthread_stop(reader_tasks
[i
]);
824 reader_tasks
[i
] = NULL
;
829 rcu_torture_current
= NULL
;
831 if (fakewriter_tasks
) {
832 for (i
= 0; i
< nfakewriters
; i
++) {
833 if (fakewriter_tasks
[i
]) {
834 VERBOSE_PRINTK_STRING(
835 "Stopping rcu_torture_fakewriter task");
836 kthread_stop(fakewriter_tasks
[i
]);
838 fakewriter_tasks
[i
] = NULL
;
840 kfree(fakewriter_tasks
);
841 fakewriter_tasks
= NULL
;
845 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
846 kthread_stop(stats_task
);
850 /* Wait for all RCU callbacks to fire. */
853 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
855 if (cur_ops
->cleanup
)
857 if (atomic_read(&n_rcu_torture_error
))
858 rcu_torture_print_module_parms("End of test: FAILURE");
860 rcu_torture_print_module_parms("End of test: SUCCESS");
864 rcu_torture_init(void)
869 static struct rcu_torture_ops
*torture_ops
[] =
870 { &rcu_ops
, &rcu_sync_ops
, &rcu_bh_ops
, &rcu_bh_sync_ops
,
871 &srcu_ops
, &sched_ops
, };
873 /* Process args and tell the world that the torturer is on the job. */
874 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
875 cur_ops
= torture_ops
[i
];
876 if (strcmp(torture_type
, cur_ops
->name
) == 0)
879 if (i
== ARRAY_SIZE(torture_ops
)) {
880 printk(KERN_ALERT
"rcutorture: invalid torture type: \"%s\"\n",
885 cur_ops
->init(); /* no "goto unwind" prior to this point!!! */
888 nrealreaders
= nreaders
;
890 nrealreaders
= 2 * num_online_cpus();
891 rcu_torture_print_module_parms("Start of test");
894 /* Set up the freelist. */
896 INIT_LIST_HEAD(&rcu_torture_freelist
);
897 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++) {
898 rcu_tortures
[i
].rtort_mbtest
= 0;
899 list_add_tail(&rcu_tortures
[i
].rtort_free
,
900 &rcu_torture_freelist
);
903 /* Initialize the statistics so that each run gets its own numbers. */
905 rcu_torture_current
= NULL
;
906 rcu_torture_current_version
= 0;
907 atomic_set(&n_rcu_torture_alloc
, 0);
908 atomic_set(&n_rcu_torture_alloc_fail
, 0);
909 atomic_set(&n_rcu_torture_free
, 0);
910 atomic_set(&n_rcu_torture_mberror
, 0);
911 atomic_set(&n_rcu_torture_error
, 0);
912 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
913 atomic_set(&rcu_torture_wcount
[i
], 0);
914 for_each_possible_cpu(cpu
) {
915 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
916 per_cpu(rcu_torture_count
, cpu
)[i
] = 0;
917 per_cpu(rcu_torture_batch
, cpu
)[i
] = 0;
921 /* Start up the kthreads. */
923 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
924 writer_task
= kthread_run(rcu_torture_writer
, NULL
,
925 "rcu_torture_writer");
926 if (IS_ERR(writer_task
)) {
927 firsterr
= PTR_ERR(writer_task
);
928 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
932 fakewriter_tasks
= kzalloc(nfakewriters
* sizeof(fakewriter_tasks
[0]),
934 if (fakewriter_tasks
== NULL
) {
935 VERBOSE_PRINTK_ERRSTRING("out of memory");
939 for (i
= 0; i
< nfakewriters
; i
++) {
940 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
941 fakewriter_tasks
[i
] = kthread_run(rcu_torture_fakewriter
, NULL
,
942 "rcu_torture_fakewriter");
943 if (IS_ERR(fakewriter_tasks
[i
])) {
944 firsterr
= PTR_ERR(fakewriter_tasks
[i
]);
945 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
946 fakewriter_tasks
[i
] = NULL
;
950 reader_tasks
= kzalloc(nrealreaders
* sizeof(reader_tasks
[0]),
952 if (reader_tasks
== NULL
) {
953 VERBOSE_PRINTK_ERRSTRING("out of memory");
957 for (i
= 0; i
< nrealreaders
; i
++) {
958 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
959 reader_tasks
[i
] = kthread_run(rcu_torture_reader
, NULL
,
960 "rcu_torture_reader");
961 if (IS_ERR(reader_tasks
[i
])) {
962 firsterr
= PTR_ERR(reader_tasks
[i
]);
963 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
964 reader_tasks
[i
] = NULL
;
968 if (stat_interval
> 0) {
969 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
970 stats_task
= kthread_run(rcu_torture_stats
, NULL
,
971 "rcu_torture_stats");
972 if (IS_ERR(stats_task
)) {
973 firsterr
= PTR_ERR(stats_task
);
974 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
979 if (test_no_idle_hz
) {
980 rcu_idle_cpu
= num_online_cpus() - 1;
981 /* Create the shuffler thread */
982 shuffler_task
= kthread_run(rcu_torture_shuffle
, NULL
,
983 "rcu_torture_shuffle");
984 if (IS_ERR(shuffler_task
)) {
985 firsterr
= PTR_ERR(shuffler_task
);
986 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
987 shuffler_task
= NULL
;
994 rcu_torture_cleanup();
998 module_init(rcu_torture_init
);
999 module_exit(rcu_torture_cleanup
);