2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <asm/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53 "Josh Triplett <josh@freedesktop.org>");
55 static int nreaders
= -1; /* # reader threads, defaults to 2*ncpus */
56 static int nfakewriters
= 4; /* # fake writer threads */
57 static int stat_interval
; /* Interval between stats, in seconds. */
58 /* Defaults to "only at end of test". */
59 static int verbose
; /* Print more debug info. */
60 static int test_no_idle_hz
; /* Test RCU's support for tickless idle CPUs. */
61 static int shuffle_interval
= 3; /* Interval between shuffles (in sec)*/
62 static int stutter
= 5; /* Start/stop testing interval (in sec) */
63 static int irqreader
= 1; /* RCU readers from irq (timers). */
64 static char *torture_type
= "rcu"; /* What RCU implementation to torture. */
66 module_param(nreaders
, int, 0444);
67 MODULE_PARM_DESC(nreaders
, "Number of RCU reader threads");
68 module_param(nfakewriters
, int, 0444);
69 MODULE_PARM_DESC(nfakewriters
, "Number of RCU fake writer threads");
70 module_param(stat_interval
, int, 0444);
71 MODULE_PARM_DESC(stat_interval
, "Number of seconds between stats printk()s");
72 module_param(verbose
, bool, 0444);
73 MODULE_PARM_DESC(verbose
, "Enable verbose debugging printk()s");
74 module_param(test_no_idle_hz
, bool, 0444);
75 MODULE_PARM_DESC(test_no_idle_hz
, "Test support for tickless idle CPUs");
76 module_param(shuffle_interval
, int, 0444);
77 MODULE_PARM_DESC(shuffle_interval
, "Number of seconds between shuffles");
78 module_param(stutter
, int, 0444);
79 MODULE_PARM_DESC(stutter
, "Number of seconds to run/halt test");
80 module_param(irqreader
, int, 0444);
81 MODULE_PARM_DESC(irqreader
, "Allow RCU readers from irq handlers");
82 module_param(torture_type
, charp
, 0444);
83 MODULE_PARM_DESC(torture_type
, "Type of RCU to torture (rcu, rcu_bh, srcu)");
85 #define TORTURE_FLAG "-torture:"
86 #define PRINTK_STRING(s) \
87 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
88 #define VERBOSE_PRINTK_STRING(s) \
89 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
90 #define VERBOSE_PRINTK_ERRSTRING(s) \
91 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
93 static char printk_buf
[4096];
95 static int nrealreaders
;
96 static struct task_struct
*writer_task
;
97 static struct task_struct
**fakewriter_tasks
;
98 static struct task_struct
**reader_tasks
;
99 static struct task_struct
*stats_task
;
100 static struct task_struct
*shuffler_task
;
101 static struct task_struct
*stutter_task
;
103 #define RCU_TORTURE_PIPE_LEN 10
106 struct rcu_head rtort_rcu
;
107 int rtort_pipe_count
;
108 struct list_head rtort_free
;
112 static LIST_HEAD(rcu_torture_freelist
);
113 static struct rcu_torture
*rcu_torture_current
= NULL
;
114 static long rcu_torture_current_version
= 0;
115 static struct rcu_torture rcu_tortures
[10 * RCU_TORTURE_PIPE_LEN
];
116 static DEFINE_SPINLOCK(rcu_torture_lock
);
117 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_count
) =
119 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_batch
) =
121 static atomic_t rcu_torture_wcount
[RCU_TORTURE_PIPE_LEN
+ 1];
122 static atomic_t n_rcu_torture_alloc
;
123 static atomic_t n_rcu_torture_alloc_fail
;
124 static atomic_t n_rcu_torture_free
;
125 static atomic_t n_rcu_torture_mberror
;
126 static atomic_t n_rcu_torture_error
;
127 static long n_rcu_torture_timers
= 0;
128 static struct list_head rcu_torture_removed
;
129 static cpumask_var_t shuffle_tmp_mask
;
131 static int stutter_pause_test
= 0;
133 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
134 #define RCUTORTURE_RUNNABLE_INIT 1
136 #define RCUTORTURE_RUNNABLE_INIT 0
138 int rcutorture_runnable
= RCUTORTURE_RUNNABLE_INIT
;
140 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
142 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
143 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
144 #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
145 static int fullstop
= FULLSTOP_RMMOD
;
146 DEFINE_MUTEX(fullstop_mutex
); /* Protect fullstop transitions and spawning */
150 * Detect and respond to a system shutdown.
153 rcutorture_shutdown_notify(struct notifier_block
*unused1
,
154 unsigned long unused2
, void *unused3
)
156 mutex_lock(&fullstop_mutex
);
157 if (fullstop
== FULLSTOP_DONTSTOP
)
158 fullstop
= FULLSTOP_SHUTDOWN
;
160 printk(KERN_WARNING
/* but going down anyway, so... */
161 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
162 mutex_unlock(&fullstop_mutex
);
167 * Absorb kthreads into a kernel function that won't return, so that
168 * they won't ever access module text or data again.
170 static void rcutorture_shutdown_absorb(char *title
)
172 if (ACCESS_ONCE(fullstop
) == FULLSTOP_SHUTDOWN
) {
174 "rcutorture thread %s parking due to system shutdown\n",
176 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT
);
181 * Allocate an element from the rcu_tortures pool.
183 static struct rcu_torture
*
184 rcu_torture_alloc(void)
188 spin_lock_bh(&rcu_torture_lock
);
189 if (list_empty(&rcu_torture_freelist
)) {
190 atomic_inc(&n_rcu_torture_alloc_fail
);
191 spin_unlock_bh(&rcu_torture_lock
);
194 atomic_inc(&n_rcu_torture_alloc
);
195 p
= rcu_torture_freelist
.next
;
197 spin_unlock_bh(&rcu_torture_lock
);
198 return container_of(p
, struct rcu_torture
, rtort_free
);
202 * Free an element to the rcu_tortures pool.
205 rcu_torture_free(struct rcu_torture
*p
)
207 atomic_inc(&n_rcu_torture_free
);
208 spin_lock_bh(&rcu_torture_lock
);
209 list_add_tail(&p
->rtort_free
, &rcu_torture_freelist
);
210 spin_unlock_bh(&rcu_torture_lock
);
213 struct rcu_random_state
{
214 unsigned long rrs_state
;
218 #define RCU_RANDOM_MULT 39916801 /* prime */
219 #define RCU_RANDOM_ADD 479001701 /* prime */
220 #define RCU_RANDOM_REFRESH 10000
222 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
225 * Crude but fast random-number generator. Uses a linear congruential
226 * generator, with occasional help from cpu_clock().
229 rcu_random(struct rcu_random_state
*rrsp
)
231 if (--rrsp
->rrs_count
< 0) {
233 (unsigned long)cpu_clock(raw_smp_processor_id());
234 rrsp
->rrs_count
= RCU_RANDOM_REFRESH
;
236 rrsp
->rrs_state
= rrsp
->rrs_state
* RCU_RANDOM_MULT
+ RCU_RANDOM_ADD
;
237 return swahw32(rrsp
->rrs_state
);
241 rcu_stutter_wait(char *title
)
243 while (stutter_pause_test
|| !rcutorture_runnable
) {
244 if (rcutorture_runnable
)
245 schedule_timeout_interruptible(1);
247 schedule_timeout_interruptible(round_jiffies_relative(HZ
));
248 rcutorture_shutdown_absorb(title
);
253 * Operations vector for selecting different types of tests.
256 struct rcu_torture_ops
{
258 void (*cleanup
)(void);
259 int (*readlock
)(void);
260 void (*readdelay
)(struct rcu_random_state
*rrsp
);
261 void (*readunlock
)(int idx
);
262 int (*completed
)(void);
263 void (*deferredfree
)(struct rcu_torture
*p
);
265 void (*cb_barrier
)(void);
266 int (*stats
)(char *page
);
270 static struct rcu_torture_ops
*cur_ops
= NULL
;
273 * Definitions for rcu torture testing.
276 static int rcu_torture_read_lock(void) __acquires(RCU
)
282 static void rcu_read_delay(struct rcu_random_state
*rrsp
)
285 const long longdelay
= 200;
287 /* We want there to be long-running readers, but not all the time. */
289 delay
= rcu_random(rrsp
) % (nrealreaders
* 2 * longdelay
);
294 static void rcu_torture_read_unlock(int idx
) __releases(RCU
)
299 static int rcu_torture_completed(void)
301 return rcu_batches_completed();
305 rcu_torture_cb(struct rcu_head
*p
)
308 struct rcu_torture
*rp
= container_of(p
, struct rcu_torture
, rtort_rcu
);
310 if (fullstop
!= FULLSTOP_DONTSTOP
) {
311 /* Test is ending, just drop callbacks on the floor. */
312 /* The next initialization will pick up the pieces. */
315 i
= rp
->rtort_pipe_count
;
316 if (i
> RCU_TORTURE_PIPE_LEN
)
317 i
= RCU_TORTURE_PIPE_LEN
;
318 atomic_inc(&rcu_torture_wcount
[i
]);
319 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
320 rp
->rtort_mbtest
= 0;
321 rcu_torture_free(rp
);
323 cur_ops
->deferredfree(rp
);
326 static void rcu_torture_deferred_free(struct rcu_torture
*p
)
328 call_rcu(&p
->rtort_rcu
, rcu_torture_cb
);
331 static struct rcu_torture_ops rcu_ops
= {
334 .readlock
= rcu_torture_read_lock
,
335 .readdelay
= rcu_read_delay
,
336 .readunlock
= rcu_torture_read_unlock
,
337 .completed
= rcu_torture_completed
,
338 .deferredfree
= rcu_torture_deferred_free
,
339 .sync
= synchronize_rcu
,
340 .cb_barrier
= rcu_barrier
,
346 static void rcu_sync_torture_deferred_free(struct rcu_torture
*p
)
349 struct rcu_torture
*rp
;
350 struct rcu_torture
*rp1
;
353 list_add(&p
->rtort_free
, &rcu_torture_removed
);
354 list_for_each_entry_safe(rp
, rp1
, &rcu_torture_removed
, rtort_free
) {
355 i
= rp
->rtort_pipe_count
;
356 if (i
> RCU_TORTURE_PIPE_LEN
)
357 i
= RCU_TORTURE_PIPE_LEN
;
358 atomic_inc(&rcu_torture_wcount
[i
]);
359 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
360 rp
->rtort_mbtest
= 0;
361 list_del(&rp
->rtort_free
);
362 rcu_torture_free(rp
);
367 static void rcu_sync_torture_init(void)
369 INIT_LIST_HEAD(&rcu_torture_removed
);
372 static struct rcu_torture_ops rcu_sync_ops
= {
373 .init
= rcu_sync_torture_init
,
375 .readlock
= rcu_torture_read_lock
,
376 .readdelay
= rcu_read_delay
,
377 .readunlock
= rcu_torture_read_unlock
,
378 .completed
= rcu_torture_completed
,
379 .deferredfree
= rcu_sync_torture_deferred_free
,
380 .sync
= synchronize_rcu
,
388 * Definitions for rcu_bh torture testing.
391 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH
)
397 static void rcu_bh_torture_read_unlock(int idx
) __releases(RCU_BH
)
399 rcu_read_unlock_bh();
402 static int rcu_bh_torture_completed(void)
404 return rcu_batches_completed_bh();
407 static void rcu_bh_torture_deferred_free(struct rcu_torture
*p
)
409 call_rcu_bh(&p
->rtort_rcu
, rcu_torture_cb
);
412 struct rcu_bh_torture_synchronize
{
413 struct rcu_head head
;
414 struct completion completion
;
417 static void rcu_bh_torture_wakeme_after_cb(struct rcu_head
*head
)
419 struct rcu_bh_torture_synchronize
*rcu
;
421 rcu
= container_of(head
, struct rcu_bh_torture_synchronize
, head
);
422 complete(&rcu
->completion
);
425 static void rcu_bh_torture_synchronize(void)
427 struct rcu_bh_torture_synchronize rcu
;
429 init_completion(&rcu
.completion
);
430 call_rcu_bh(&rcu
.head
, rcu_bh_torture_wakeme_after_cb
);
431 wait_for_completion(&rcu
.completion
);
434 static struct rcu_torture_ops rcu_bh_ops
= {
437 .readlock
= rcu_bh_torture_read_lock
,
438 .readdelay
= rcu_read_delay
, /* just reuse rcu's version. */
439 .readunlock
= rcu_bh_torture_read_unlock
,
440 .completed
= rcu_bh_torture_completed
,
441 .deferredfree
= rcu_bh_torture_deferred_free
,
442 .sync
= rcu_bh_torture_synchronize
,
443 .cb_barrier
= rcu_barrier_bh
,
449 static struct rcu_torture_ops rcu_bh_sync_ops
= {
450 .init
= rcu_sync_torture_init
,
452 .readlock
= rcu_bh_torture_read_lock
,
453 .readdelay
= rcu_read_delay
, /* just reuse rcu's version. */
454 .readunlock
= rcu_bh_torture_read_unlock
,
455 .completed
= rcu_bh_torture_completed
,
456 .deferredfree
= rcu_sync_torture_deferred_free
,
457 .sync
= rcu_bh_torture_synchronize
,
461 .name
= "rcu_bh_sync"
465 * Definitions for srcu torture testing.
468 static struct srcu_struct srcu_ctl
;
470 static void srcu_torture_init(void)
472 init_srcu_struct(&srcu_ctl
);
473 rcu_sync_torture_init();
476 static void srcu_torture_cleanup(void)
478 synchronize_srcu(&srcu_ctl
);
479 cleanup_srcu_struct(&srcu_ctl
);
482 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl
)
484 return srcu_read_lock(&srcu_ctl
);
487 static void srcu_read_delay(struct rcu_random_state
*rrsp
)
490 const long uspertick
= 1000000 / HZ
;
491 const long longdelay
= 10;
493 /* We want there to be long-running readers, but not all the time. */
495 delay
= rcu_random(rrsp
) % (nrealreaders
* 2 * longdelay
* uspertick
);
497 schedule_timeout_interruptible(longdelay
);
500 static void srcu_torture_read_unlock(int idx
) __releases(&srcu_ctl
)
502 srcu_read_unlock(&srcu_ctl
, idx
);
505 static int srcu_torture_completed(void)
507 return srcu_batches_completed(&srcu_ctl
);
510 static void srcu_torture_synchronize(void)
512 synchronize_srcu(&srcu_ctl
);
515 static int srcu_torture_stats(char *page
)
519 int idx
= srcu_ctl
.completed
& 0x1;
521 cnt
+= sprintf(&page
[cnt
], "%s%s per-CPU(idx=%d):",
522 torture_type
, TORTURE_FLAG
, idx
);
523 for_each_possible_cpu(cpu
) {
524 cnt
+= sprintf(&page
[cnt
], " %d(%d,%d)", cpu
,
525 per_cpu_ptr(srcu_ctl
.per_cpu_ref
, cpu
)->c
[!idx
],
526 per_cpu_ptr(srcu_ctl
.per_cpu_ref
, cpu
)->c
[idx
]);
528 cnt
+= sprintf(&page
[cnt
], "\n");
532 static struct rcu_torture_ops srcu_ops
= {
533 .init
= srcu_torture_init
,
534 .cleanup
= srcu_torture_cleanup
,
535 .readlock
= srcu_torture_read_lock
,
536 .readdelay
= srcu_read_delay
,
537 .readunlock
= srcu_torture_read_unlock
,
538 .completed
= srcu_torture_completed
,
539 .deferredfree
= rcu_sync_torture_deferred_free
,
540 .sync
= srcu_torture_synchronize
,
542 .stats
= srcu_torture_stats
,
547 * Definitions for sched torture testing.
550 static int sched_torture_read_lock(void)
556 static void sched_torture_read_unlock(int idx
)
561 static int sched_torture_completed(void)
566 static void rcu_sched_torture_deferred_free(struct rcu_torture
*p
)
568 call_rcu_sched(&p
->rtort_rcu
, rcu_torture_cb
);
571 static void sched_torture_synchronize(void)
576 static struct rcu_torture_ops sched_ops
= {
577 .init
= rcu_sync_torture_init
,
579 .readlock
= sched_torture_read_lock
,
580 .readdelay
= rcu_read_delay
, /* just reuse rcu's version. */
581 .readunlock
= sched_torture_read_unlock
,
582 .completed
= sched_torture_completed
,
583 .deferredfree
= rcu_sched_torture_deferred_free
,
584 .sync
= sched_torture_synchronize
,
585 .cb_barrier
= rcu_barrier_sched
,
591 static struct rcu_torture_ops sched_ops_sync
= {
592 .init
= rcu_sync_torture_init
,
594 .readlock
= sched_torture_read_lock
,
595 .readdelay
= rcu_read_delay
, /* just reuse rcu's version. */
596 .readunlock
= sched_torture_read_unlock
,
597 .completed
= sched_torture_completed
,
598 .deferredfree
= rcu_sync_torture_deferred_free
,
599 .sync
= sched_torture_synchronize
,
606 * RCU torture writer kthread. Repeatedly substitutes a new structure
607 * for that pointed to by rcu_torture_current, freeing the old structure
608 * after a series of grace periods (the "pipeline").
611 rcu_torture_writer(void *arg
)
614 long oldbatch
= rcu_batches_completed();
615 struct rcu_torture
*rp
;
616 struct rcu_torture
*old_rp
;
617 static DEFINE_RCU_RANDOM(rand
);
619 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
620 set_user_nice(current
, 19);
623 schedule_timeout_uninterruptible(1);
624 if ((rp
= rcu_torture_alloc()) == NULL
)
626 rp
->rtort_pipe_count
= 0;
627 udelay(rcu_random(&rand
) & 0x3ff);
628 old_rp
= rcu_torture_current
;
629 rp
->rtort_mbtest
= 1;
630 rcu_assign_pointer(rcu_torture_current
, rp
);
633 i
= old_rp
->rtort_pipe_count
;
634 if (i
> RCU_TORTURE_PIPE_LEN
)
635 i
= RCU_TORTURE_PIPE_LEN
;
636 atomic_inc(&rcu_torture_wcount
[i
]);
637 old_rp
->rtort_pipe_count
++;
638 cur_ops
->deferredfree(old_rp
);
640 rcu_torture_current_version
++;
641 oldbatch
= cur_ops
->completed();
642 rcu_stutter_wait("rcu_torture_writer");
643 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
644 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
645 rcutorture_shutdown_absorb("rcu_torture_writer");
646 while (!kthread_should_stop())
647 schedule_timeout_uninterruptible(1);
652 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
653 * delay between calls.
656 rcu_torture_fakewriter(void *arg
)
658 DEFINE_RCU_RANDOM(rand
);
660 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
661 set_user_nice(current
, 19);
664 schedule_timeout_uninterruptible(1 + rcu_random(&rand
)%10);
665 udelay(rcu_random(&rand
) & 0x3ff);
667 rcu_stutter_wait("rcu_torture_fakewriter");
668 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
670 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
671 rcutorture_shutdown_absorb("rcu_torture_fakewriter");
672 while (!kthread_should_stop())
673 schedule_timeout_uninterruptible(1);
678 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
679 * incrementing the corresponding element of the pipeline array. The
680 * counter in the element should never be greater than 1, otherwise, the
681 * RCU implementation is broken.
683 static void rcu_torture_timer(unsigned long unused
)
687 static DEFINE_RCU_RANDOM(rand
);
688 static DEFINE_SPINLOCK(rand_lock
);
689 struct rcu_torture
*p
;
692 idx
= cur_ops
->readlock();
693 completed
= cur_ops
->completed();
694 p
= rcu_dereference(rcu_torture_current
);
696 /* Leave because rcu_torture_writer is not yet underway */
697 cur_ops
->readunlock(idx
);
700 if (p
->rtort_mbtest
== 0)
701 atomic_inc(&n_rcu_torture_mberror
);
702 spin_lock(&rand_lock
);
703 cur_ops
->readdelay(&rand
);
704 n_rcu_torture_timers
++;
705 spin_unlock(&rand_lock
);
707 pipe_count
= p
->rtort_pipe_count
;
708 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
709 /* Should not happen, but... */
710 pipe_count
= RCU_TORTURE_PIPE_LEN
;
712 ++__get_cpu_var(rcu_torture_count
)[pipe_count
];
713 completed
= cur_ops
->completed() - completed
;
714 if (completed
> RCU_TORTURE_PIPE_LEN
) {
715 /* Should not happen, but... */
716 completed
= RCU_TORTURE_PIPE_LEN
;
718 ++__get_cpu_var(rcu_torture_batch
)[completed
];
720 cur_ops
->readunlock(idx
);
724 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
725 * incrementing the corresponding element of the pipeline array. The
726 * counter in the element should never be greater than 1, otherwise, the
727 * RCU implementation is broken.
730 rcu_torture_reader(void *arg
)
734 DEFINE_RCU_RANDOM(rand
);
735 struct rcu_torture
*p
;
739 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
740 set_user_nice(current
, 19);
741 if (irqreader
&& cur_ops
->irqcapable
)
742 setup_timer_on_stack(&t
, rcu_torture_timer
, 0);
745 if (irqreader
&& cur_ops
->irqcapable
) {
746 if (!timer_pending(&t
))
749 idx
= cur_ops
->readlock();
750 completed
= cur_ops
->completed();
751 p
= rcu_dereference(rcu_torture_current
);
753 /* Wait for rcu_torture_writer to get underway */
754 cur_ops
->readunlock(idx
);
755 schedule_timeout_interruptible(HZ
);
758 if (p
->rtort_mbtest
== 0)
759 atomic_inc(&n_rcu_torture_mberror
);
760 cur_ops
->readdelay(&rand
);
762 pipe_count
= p
->rtort_pipe_count
;
763 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
764 /* Should not happen, but... */
765 pipe_count
= RCU_TORTURE_PIPE_LEN
;
767 ++__get_cpu_var(rcu_torture_count
)[pipe_count
];
768 completed
= cur_ops
->completed() - completed
;
769 if (completed
> RCU_TORTURE_PIPE_LEN
) {
770 /* Should not happen, but... */
771 completed
= RCU_TORTURE_PIPE_LEN
;
773 ++__get_cpu_var(rcu_torture_batch
)[completed
];
775 cur_ops
->readunlock(idx
);
777 rcu_stutter_wait("rcu_torture_reader");
778 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
779 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
780 rcutorture_shutdown_absorb("rcu_torture_reader");
781 if (irqreader
&& cur_ops
->irqcapable
)
783 while (!kthread_should_stop())
784 schedule_timeout_uninterruptible(1);
789 * Create an RCU-torture statistics message in the specified buffer.
792 rcu_torture_printk(char *page
)
797 long pipesummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
798 long batchsummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
800 for_each_possible_cpu(cpu
) {
801 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
802 pipesummary
[i
] += per_cpu(rcu_torture_count
, cpu
)[i
];
803 batchsummary
[i
] += per_cpu(rcu_torture_batch
, cpu
)[i
];
806 for (i
= RCU_TORTURE_PIPE_LEN
- 1; i
>= 0; i
--) {
807 if (pipesummary
[i
] != 0)
810 cnt
+= sprintf(&page
[cnt
], "%s%s ", torture_type
, TORTURE_FLAG
);
811 cnt
+= sprintf(&page
[cnt
],
812 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
815 rcu_torture_current_version
,
816 list_empty(&rcu_torture_freelist
),
817 atomic_read(&n_rcu_torture_alloc
),
818 atomic_read(&n_rcu_torture_alloc_fail
),
819 atomic_read(&n_rcu_torture_free
),
820 atomic_read(&n_rcu_torture_mberror
),
821 n_rcu_torture_timers
);
822 if (atomic_read(&n_rcu_torture_mberror
) != 0)
823 cnt
+= sprintf(&page
[cnt
], " !!!");
824 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
826 cnt
+= sprintf(&page
[cnt
], "!!! ");
827 atomic_inc(&n_rcu_torture_error
);
830 cnt
+= sprintf(&page
[cnt
], "Reader Pipe: ");
831 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
832 cnt
+= sprintf(&page
[cnt
], " %ld", pipesummary
[i
]);
833 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
834 cnt
+= sprintf(&page
[cnt
], "Reader Batch: ");
835 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
836 cnt
+= sprintf(&page
[cnt
], " %ld", batchsummary
[i
]);
837 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
838 cnt
+= sprintf(&page
[cnt
], "Free-Block Circulation: ");
839 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
840 cnt
+= sprintf(&page
[cnt
], " %d",
841 atomic_read(&rcu_torture_wcount
[i
]));
843 cnt
+= sprintf(&page
[cnt
], "\n");
845 cnt
+= cur_ops
->stats(&page
[cnt
]);
850 * Print torture statistics. Caller must ensure that there is only
851 * one call to this function at a given time!!! This is normally
852 * accomplished by relying on the module system to only have one copy
853 * of the module loaded, and then by giving the rcu_torture_stats
854 * kthread full control (or the init/cleanup functions when rcu_torture_stats
855 * thread is not running).
858 rcu_torture_stats_print(void)
862 cnt
= rcu_torture_printk(printk_buf
);
863 printk(KERN_ALERT
"%s", printk_buf
);
867 * Periodically prints torture statistics, if periodic statistics printing
868 * was specified via the stat_interval module parameter.
870 * No need to worry about fullstop here, since this one doesn't reference
871 * volatile state or register callbacks.
874 rcu_torture_stats(void *arg
)
876 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
878 schedule_timeout_interruptible(stat_interval
* HZ
);
879 rcu_torture_stats_print();
880 rcutorture_shutdown_absorb("rcu_torture_stats");
881 } while (!kthread_should_stop());
882 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
886 static int rcu_idle_cpu
; /* Force all torture tasks off this CPU */
888 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
889 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
891 static void rcu_torture_shuffle_tasks(void)
895 cpumask_setall(shuffle_tmp_mask
);
898 /* No point in shuffling if there is only one online CPU (ex: UP) */
899 if (num_online_cpus() == 1) {
904 if (rcu_idle_cpu
!= -1)
905 cpumask_clear_cpu(rcu_idle_cpu
, shuffle_tmp_mask
);
907 set_cpus_allowed_ptr(current
, shuffle_tmp_mask
);
910 for (i
= 0; i
< nrealreaders
; i
++)
912 set_cpus_allowed_ptr(reader_tasks
[i
],
916 if (fakewriter_tasks
) {
917 for (i
= 0; i
< nfakewriters
; i
++)
918 if (fakewriter_tasks
[i
])
919 set_cpus_allowed_ptr(fakewriter_tasks
[i
],
924 set_cpus_allowed_ptr(writer_task
, shuffle_tmp_mask
);
927 set_cpus_allowed_ptr(stats_task
, shuffle_tmp_mask
);
929 if (rcu_idle_cpu
== -1)
930 rcu_idle_cpu
= num_online_cpus() - 1;
937 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
938 * system to become idle at a time and cut off its timer ticks. This is meant
939 * to test the support for such tickless idle CPU in RCU.
942 rcu_torture_shuffle(void *arg
)
944 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
946 schedule_timeout_interruptible(shuffle_interval
* HZ
);
947 rcu_torture_shuffle_tasks();
948 rcutorture_shutdown_absorb("rcu_torture_shuffle");
949 } while (!kthread_should_stop());
950 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
954 /* Cause the rcutorture test to "stutter", starting and stopping all
955 * threads periodically.
958 rcu_torture_stutter(void *arg
)
960 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
962 schedule_timeout_interruptible(stutter
* HZ
);
963 stutter_pause_test
= 1;
964 if (!kthread_should_stop())
965 schedule_timeout_interruptible(stutter
* HZ
);
966 stutter_pause_test
= 0;
967 rcutorture_shutdown_absorb("rcu_torture_stutter");
968 } while (!kthread_should_stop());
969 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
974 rcu_torture_print_module_parms(char *tag
)
976 printk(KERN_ALERT
"%s" TORTURE_FLAG
977 "--- %s: nreaders=%d nfakewriters=%d "
978 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
979 "shuffle_interval=%d stutter=%d irqreader=%d\n",
980 torture_type
, tag
, nrealreaders
, nfakewriters
,
981 stat_interval
, verbose
, test_no_idle_hz
, shuffle_interval
,
985 static struct notifier_block rcutorture_nb
= {
986 .notifier_call
= rcutorture_shutdown_notify
,
990 rcu_torture_cleanup(void)
994 mutex_lock(&fullstop_mutex
);
995 if (fullstop
== FULLSTOP_SHUTDOWN
) {
996 printk(KERN_WARNING
/* but going down anyway, so... */
997 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
998 mutex_unlock(&fullstop_mutex
);
999 schedule_timeout_uninterruptible(10);
1000 if (cur_ops
->cb_barrier
!= NULL
)
1001 cur_ops
->cb_barrier();
1004 fullstop
= FULLSTOP_RMMOD
;
1005 mutex_unlock(&fullstop_mutex
);
1006 unregister_reboot_notifier(&rcutorture_nb
);
1008 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1009 kthread_stop(stutter_task
);
1011 stutter_task
= NULL
;
1012 if (shuffler_task
) {
1013 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1014 kthread_stop(shuffler_task
);
1015 free_cpumask_var(shuffle_tmp_mask
);
1017 shuffler_task
= NULL
;
1020 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1021 kthread_stop(writer_task
);
1026 for (i
= 0; i
< nrealreaders
; i
++) {
1027 if (reader_tasks
[i
]) {
1028 VERBOSE_PRINTK_STRING(
1029 "Stopping rcu_torture_reader task");
1030 kthread_stop(reader_tasks
[i
]);
1032 reader_tasks
[i
] = NULL
;
1034 kfree(reader_tasks
);
1035 reader_tasks
= NULL
;
1037 rcu_torture_current
= NULL
;
1039 if (fakewriter_tasks
) {
1040 for (i
= 0; i
< nfakewriters
; i
++) {
1041 if (fakewriter_tasks
[i
]) {
1042 VERBOSE_PRINTK_STRING(
1043 "Stopping rcu_torture_fakewriter task");
1044 kthread_stop(fakewriter_tasks
[i
]);
1046 fakewriter_tasks
[i
] = NULL
;
1048 kfree(fakewriter_tasks
);
1049 fakewriter_tasks
= NULL
;
1053 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1054 kthread_stop(stats_task
);
1058 /* Wait for all RCU callbacks to fire. */
1060 if (cur_ops
->cb_barrier
!= NULL
)
1061 cur_ops
->cb_barrier();
1063 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1065 if (cur_ops
->cleanup
)
1067 if (atomic_read(&n_rcu_torture_error
))
1068 rcu_torture_print_module_parms("End of test: FAILURE");
1070 rcu_torture_print_module_parms("End of test: SUCCESS");
1074 rcu_torture_init(void)
1079 static struct rcu_torture_ops
*torture_ops
[] =
1080 { &rcu_ops
, &rcu_sync_ops
, &rcu_bh_ops
, &rcu_bh_sync_ops
,
1081 &srcu_ops
, &sched_ops
, &sched_ops_sync
, };
1083 mutex_lock(&fullstop_mutex
);
1085 /* Process args and tell the world that the torturer is on the job. */
1086 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
1087 cur_ops
= torture_ops
[i
];
1088 if (strcmp(torture_type
, cur_ops
->name
) == 0)
1091 if (i
== ARRAY_SIZE(torture_ops
)) {
1092 printk(KERN_ALERT
"rcutorture: invalid torture type: \"%s\"\n",
1094 mutex_unlock(&fullstop_mutex
);
1098 cur_ops
->init(); /* no "goto unwind" prior to this point!!! */
1101 nrealreaders
= nreaders
;
1103 nrealreaders
= 2 * num_online_cpus();
1104 rcu_torture_print_module_parms("Start of test");
1105 fullstop
= FULLSTOP_DONTSTOP
;
1107 /* Set up the freelist. */
1109 INIT_LIST_HEAD(&rcu_torture_freelist
);
1110 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++) {
1111 rcu_tortures
[i
].rtort_mbtest
= 0;
1112 list_add_tail(&rcu_tortures
[i
].rtort_free
,
1113 &rcu_torture_freelist
);
1116 /* Initialize the statistics so that each run gets its own numbers. */
1118 rcu_torture_current
= NULL
;
1119 rcu_torture_current_version
= 0;
1120 atomic_set(&n_rcu_torture_alloc
, 0);
1121 atomic_set(&n_rcu_torture_alloc_fail
, 0);
1122 atomic_set(&n_rcu_torture_free
, 0);
1123 atomic_set(&n_rcu_torture_mberror
, 0);
1124 atomic_set(&n_rcu_torture_error
, 0);
1125 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1126 atomic_set(&rcu_torture_wcount
[i
], 0);
1127 for_each_possible_cpu(cpu
) {
1128 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1129 per_cpu(rcu_torture_count
, cpu
)[i
] = 0;
1130 per_cpu(rcu_torture_batch
, cpu
)[i
] = 0;
1134 /* Start up the kthreads. */
1136 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1137 writer_task
= kthread_run(rcu_torture_writer
, NULL
,
1138 "rcu_torture_writer");
1139 if (IS_ERR(writer_task
)) {
1140 firsterr
= PTR_ERR(writer_task
);
1141 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1145 fakewriter_tasks
= kzalloc(nfakewriters
* sizeof(fakewriter_tasks
[0]),
1147 if (fakewriter_tasks
== NULL
) {
1148 VERBOSE_PRINTK_ERRSTRING("out of memory");
1152 for (i
= 0; i
< nfakewriters
; i
++) {
1153 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1154 fakewriter_tasks
[i
] = kthread_run(rcu_torture_fakewriter
, NULL
,
1155 "rcu_torture_fakewriter");
1156 if (IS_ERR(fakewriter_tasks
[i
])) {
1157 firsterr
= PTR_ERR(fakewriter_tasks
[i
]);
1158 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1159 fakewriter_tasks
[i
] = NULL
;
1163 reader_tasks
= kzalloc(nrealreaders
* sizeof(reader_tasks
[0]),
1165 if (reader_tasks
== NULL
) {
1166 VERBOSE_PRINTK_ERRSTRING("out of memory");
1170 for (i
= 0; i
< nrealreaders
; i
++) {
1171 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1172 reader_tasks
[i
] = kthread_run(rcu_torture_reader
, NULL
,
1173 "rcu_torture_reader");
1174 if (IS_ERR(reader_tasks
[i
])) {
1175 firsterr
= PTR_ERR(reader_tasks
[i
]);
1176 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1177 reader_tasks
[i
] = NULL
;
1181 if (stat_interval
> 0) {
1182 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1183 stats_task
= kthread_run(rcu_torture_stats
, NULL
,
1184 "rcu_torture_stats");
1185 if (IS_ERR(stats_task
)) {
1186 firsterr
= PTR_ERR(stats_task
);
1187 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1192 if (test_no_idle_hz
) {
1193 rcu_idle_cpu
= num_online_cpus() - 1;
1195 if (!alloc_cpumask_var(&shuffle_tmp_mask
, GFP_KERNEL
)) {
1197 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1201 /* Create the shuffler thread */
1202 shuffler_task
= kthread_run(rcu_torture_shuffle
, NULL
,
1203 "rcu_torture_shuffle");
1204 if (IS_ERR(shuffler_task
)) {
1205 free_cpumask_var(shuffle_tmp_mask
);
1206 firsterr
= PTR_ERR(shuffler_task
);
1207 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1208 shuffler_task
= NULL
;
1215 /* Create the stutter thread */
1216 stutter_task
= kthread_run(rcu_torture_stutter
, NULL
,
1217 "rcu_torture_stutter");
1218 if (IS_ERR(stutter_task
)) {
1219 firsterr
= PTR_ERR(stutter_task
);
1220 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1221 stutter_task
= NULL
;
1225 register_reboot_notifier(&rcutorture_nb
);
1226 mutex_unlock(&fullstop_mutex
);
1230 mutex_unlock(&fullstop_mutex
);
1231 rcu_torture_cleanup();
1235 module_init(rcu_torture_init
);
1236 module_exit(rcu_torture_cleanup
);