2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/trace_clock.h>
50 #include <asm/byteorder.h>
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
55 MODULE_ALIAS("rcutorture");
56 #ifdef MODULE_PARAM_PREFIX
57 #undef MODULE_PARAM_PREFIX
59 #define MODULE_PARAM_PREFIX "rcutorture."
61 static int fqs_duration
;
62 module_param(fqs_duration
, int, 0444);
63 MODULE_PARM_DESC(fqs_duration
, "Duration of fqs bursts (us), 0 to disable");
64 static int fqs_holdoff
;
65 module_param(fqs_holdoff
, int, 0444);
66 MODULE_PARM_DESC(fqs_holdoff
, "Holdoff time within fqs bursts (us)");
67 static int fqs_stutter
= 3;
68 module_param(fqs_stutter
, int, 0444);
69 MODULE_PARM_DESC(fqs_stutter
, "Wait time between fqs bursts (s)");
71 module_param(gp_exp
, bool, 0444);
72 MODULE_PARM_DESC(gp_exp
, "Use expedited GP wait primitives");
73 static bool gp_normal
;
74 module_param(gp_normal
, bool, 0444);
75 MODULE_PARM_DESC(gp_normal
, "Use normal (non-expedited) GP wait primitives");
76 static int irqreader
= 1;
77 module_param(irqreader
, int, 0444);
78 MODULE_PARM_DESC(irqreader
, "Allow RCU readers from irq handlers");
79 static int n_barrier_cbs
;
80 module_param(n_barrier_cbs
, int, 0444);
81 MODULE_PARM_DESC(n_barrier_cbs
, "# of callbacks/kthreads for barrier testing");
82 static int nfakewriters
= 4;
83 module_param(nfakewriters
, int, 0444);
84 MODULE_PARM_DESC(nfakewriters
, "Number of RCU fake writer threads");
85 static int nreaders
= -1;
86 module_param(nreaders
, int, 0444);
87 MODULE_PARM_DESC(nreaders
, "Number of RCU reader threads");
88 static int object_debug
;
89 module_param(object_debug
, int, 0444);
90 MODULE_PARM_DESC(object_debug
, "Enable debug-object double call_rcu() testing");
91 static int onoff_holdoff
;
92 module_param(onoff_holdoff
, int, 0444);
93 MODULE_PARM_DESC(onoff_holdoff
, "Time after boot before CPU hotplugs (s)");
94 static int onoff_interval
;
95 module_param(onoff_interval
, int, 0444);
96 MODULE_PARM_DESC(onoff_interval
, "Time between CPU hotplugs (s), 0=disable");
97 static int shuffle_interval
= 3;
98 module_param(shuffle_interval
, int, 0444);
99 MODULE_PARM_DESC(shuffle_interval
, "Number of seconds between shuffles");
100 static int shutdown_secs
;
101 module_param(shutdown_secs
, int, 0444);
102 MODULE_PARM_DESC(shutdown_secs
, "Shutdown time (s), <= zero to disable.");
103 static int stall_cpu
;
104 module_param(stall_cpu
, int, 0444);
105 MODULE_PARM_DESC(stall_cpu
, "Stall duration (s), zero to disable.");
106 static int stall_cpu_holdoff
= 10;
107 module_param(stall_cpu_holdoff
, int, 0444);
108 MODULE_PARM_DESC(stall_cpu_holdoff
, "Time to wait before starting stall (s).");
109 static int stat_interval
= 60;
110 module_param(stat_interval
, int, 0644);
111 MODULE_PARM_DESC(stat_interval
, "Number of seconds between stats printk()s");
112 static int stutter
= 5;
113 module_param(stutter
, int, 0444);
114 MODULE_PARM_DESC(stutter
, "Number of seconds to run/halt test");
115 static int test_boost
= 1;
116 module_param(test_boost
, int, 0444);
117 MODULE_PARM_DESC(test_boost
, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
118 static int test_boost_duration
= 4;
119 module_param(test_boost_duration
, int, 0444);
120 MODULE_PARM_DESC(test_boost_duration
, "Duration of each boost test, seconds.");
121 static int test_boost_interval
= 7;
122 module_param(test_boost_interval
, int, 0444);
123 MODULE_PARM_DESC(test_boost_interval
, "Interval between boost tests, seconds.");
124 static bool test_no_idle_hz
= true;
125 module_param(test_no_idle_hz
, bool, 0444);
126 MODULE_PARM_DESC(test_no_idle_hz
, "Test support for tickless idle CPUs");
127 static char *torture_type
= "rcu";
128 module_param(torture_type
, charp
, 0444);
129 MODULE_PARM_DESC(torture_type
, "Type of RCU to torture (rcu, rcu_bh, ...)");
131 module_param(verbose
, bool, 0444);
132 MODULE_PARM_DESC(verbose
, "Enable verbose debugging printk()s");
134 #define TORTURE_FLAG "-torture:"
135 #define PRINTK_STRING(s) \
136 do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
137 #define VERBOSE_PRINTK_STRING(s) \
138 do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
139 #define VERBOSE_PRINTK_ERRSTRING(s) \
140 do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
142 static char printk_buf
[4096];
144 static int nrealreaders
;
145 static struct task_struct
*writer_task
;
146 static struct task_struct
**fakewriter_tasks
;
147 static struct task_struct
**reader_tasks
;
148 static struct task_struct
*stats_task
;
149 static struct task_struct
*shuffler_task
;
150 static struct task_struct
*stutter_task
;
151 static struct task_struct
*fqs_task
;
152 static struct task_struct
*boost_tasks
[NR_CPUS
];
153 static struct task_struct
*shutdown_task
;
154 #ifdef CONFIG_HOTPLUG_CPU
155 static struct task_struct
*onoff_task
;
156 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
157 static struct task_struct
*stall_task
;
158 static struct task_struct
**barrier_cbs_tasks
;
159 static struct task_struct
*barrier_task
;
161 #define RCU_TORTURE_PIPE_LEN 10
164 struct rcu_head rtort_rcu
;
165 int rtort_pipe_count
;
166 struct list_head rtort_free
;
170 static LIST_HEAD(rcu_torture_freelist
);
171 static struct rcu_torture __rcu
*rcu_torture_current
;
172 static unsigned long rcu_torture_current_version
;
173 static struct rcu_torture rcu_tortures
[10 * RCU_TORTURE_PIPE_LEN
];
174 static DEFINE_SPINLOCK(rcu_torture_lock
);
175 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_count
) =
177 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_batch
) =
179 static atomic_t rcu_torture_wcount
[RCU_TORTURE_PIPE_LEN
+ 1];
180 static atomic_t n_rcu_torture_alloc
;
181 static atomic_t n_rcu_torture_alloc_fail
;
182 static atomic_t n_rcu_torture_free
;
183 static atomic_t n_rcu_torture_mberror
;
184 static atomic_t n_rcu_torture_error
;
185 static long n_rcu_torture_barrier_error
;
186 static long n_rcu_torture_boost_ktrerror
;
187 static long n_rcu_torture_boost_rterror
;
188 static long n_rcu_torture_boost_failure
;
189 static long n_rcu_torture_boosts
;
190 static long n_rcu_torture_timers
;
191 static long n_offline_attempts
;
192 static long n_offline_successes
;
193 static unsigned long sum_offline
;
194 static int min_offline
= -1;
195 static int max_offline
;
196 static long n_online_attempts
;
197 static long n_online_successes
;
198 static unsigned long sum_online
;
199 static int min_online
= -1;
200 static int max_online
;
201 static long n_barrier_attempts
;
202 static long n_barrier_successes
;
203 static struct list_head rcu_torture_removed
;
204 static cpumask_var_t shuffle_tmp_mask
;
206 static int stutter_pause_test
;
208 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
209 #define RCUTORTURE_RUNNABLE_INIT 1
211 #define RCUTORTURE_RUNNABLE_INIT 0
213 int rcutorture_runnable
= RCUTORTURE_RUNNABLE_INIT
;
214 module_param(rcutorture_runnable
, int, 0444);
215 MODULE_PARM_DESC(rcutorture_runnable
, "Start rcutorture at boot");
217 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
218 #define rcu_can_boost() 1
219 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
220 #define rcu_can_boost() 0
221 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
223 #ifdef CONFIG_RCU_TRACE
224 static u64 notrace
rcu_trace_clock_local(void)
226 u64 ts
= trace_clock_local();
227 unsigned long __maybe_unused ts_rem
= do_div(ts
, NSEC_PER_USEC
);
230 #else /* #ifdef CONFIG_RCU_TRACE */
231 static u64 notrace
rcu_trace_clock_local(void)
235 #endif /* #else #ifdef CONFIG_RCU_TRACE */
237 static unsigned long shutdown_time
; /* jiffies to system shutdown. */
238 static unsigned long boost_starttime
; /* jiffies of next boost test start. */
239 DEFINE_MUTEX(boost_mutex
); /* protect setting boost_starttime */
240 /* and boost task create/destroy. */
241 static atomic_t barrier_cbs_count
; /* Barrier callbacks registered. */
242 static bool barrier_phase
; /* Test phase. */
243 static atomic_t barrier_cbs_invoked
; /* Barrier callbacks invoked. */
244 static wait_queue_head_t
*barrier_cbs_wq
; /* Coordinate barrier testing. */
245 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq
);
247 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
249 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
250 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
251 #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
252 static int fullstop
= FULLSTOP_RMMOD
;
254 * Protect fullstop transitions and spawning of kthreads.
256 static DEFINE_MUTEX(fullstop_mutex
);
258 /* Forward reference. */
259 static void rcu_torture_cleanup(void);
262 * Detect and respond to a system shutdown.
265 rcutorture_shutdown_notify(struct notifier_block
*unused1
,
266 unsigned long unused2
, void *unused3
)
268 mutex_lock(&fullstop_mutex
);
269 if (fullstop
== FULLSTOP_DONTSTOP
)
270 fullstop
= FULLSTOP_SHUTDOWN
;
272 pr_warn(/* but going down anyway, so... */
273 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
274 mutex_unlock(&fullstop_mutex
);
279 * Absorb kthreads into a kernel function that won't return, so that
280 * they won't ever access module text or data again.
282 static void rcutorture_shutdown_absorb(const char *title
)
284 if (ACCESS_ONCE(fullstop
) == FULLSTOP_SHUTDOWN
) {
286 "rcutorture thread %s parking due to system shutdown\n",
288 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT
);
293 * Allocate an element from the rcu_tortures pool.
295 static struct rcu_torture
*
296 rcu_torture_alloc(void)
300 spin_lock_bh(&rcu_torture_lock
);
301 if (list_empty(&rcu_torture_freelist
)) {
302 atomic_inc(&n_rcu_torture_alloc_fail
);
303 spin_unlock_bh(&rcu_torture_lock
);
306 atomic_inc(&n_rcu_torture_alloc
);
307 p
= rcu_torture_freelist
.next
;
309 spin_unlock_bh(&rcu_torture_lock
);
310 return container_of(p
, struct rcu_torture
, rtort_free
);
314 * Free an element to the rcu_tortures pool.
317 rcu_torture_free(struct rcu_torture
*p
)
319 atomic_inc(&n_rcu_torture_free
);
320 spin_lock_bh(&rcu_torture_lock
);
321 list_add_tail(&p
->rtort_free
, &rcu_torture_freelist
);
322 spin_unlock_bh(&rcu_torture_lock
);
325 struct rcu_random_state
{
326 unsigned long rrs_state
;
330 #define RCU_RANDOM_MULT 39916801 /* prime */
331 #define RCU_RANDOM_ADD 479001701 /* prime */
332 #define RCU_RANDOM_REFRESH 10000
334 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
337 * Crude but fast random-number generator. Uses a linear congruential
338 * generator, with occasional help from cpu_clock().
341 rcu_random(struct rcu_random_state
*rrsp
)
343 if (--rrsp
->rrs_count
< 0) {
344 rrsp
->rrs_state
+= (unsigned long)local_clock();
345 rrsp
->rrs_count
= RCU_RANDOM_REFRESH
;
347 rrsp
->rrs_state
= rrsp
->rrs_state
* RCU_RANDOM_MULT
+ RCU_RANDOM_ADD
;
348 return swahw32(rrsp
->rrs_state
);
352 rcu_stutter_wait(const char *title
)
354 while (stutter_pause_test
|| !rcutorture_runnable
) {
355 if (rcutorture_runnable
)
356 schedule_timeout_interruptible(1);
358 schedule_timeout_interruptible(round_jiffies_relative(HZ
));
359 rcutorture_shutdown_absorb(title
);
364 * Operations vector for selecting different types of tests.
367 struct rcu_torture_ops
{
369 int (*readlock
)(void);
370 void (*read_delay
)(struct rcu_random_state
*rrsp
);
371 void (*readunlock
)(int idx
);
372 int (*completed
)(void);
373 void (*deferred_free
)(struct rcu_torture
*p
);
375 void (*exp_sync
)(void);
376 void (*call
)(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
));
377 void (*cb_barrier
)(void);
379 int (*stats
)(char *page
);
385 static struct rcu_torture_ops
*cur_ops
;
388 * Definitions for rcu torture testing.
391 static int rcu_torture_read_lock(void) __acquires(RCU
)
397 static void rcu_read_delay(struct rcu_random_state
*rrsp
)
399 const unsigned long shortdelay_us
= 200;
400 const unsigned long longdelay_ms
= 50;
402 /* We want a short delay sometimes to make a reader delay the grace
403 * period, and we want a long delay occasionally to trigger
404 * force_quiescent_state. */
406 if (!(rcu_random(rrsp
) % (nrealreaders
* 2000 * longdelay_ms
)))
407 mdelay(longdelay_ms
);
408 if (!(rcu_random(rrsp
) % (nrealreaders
* 2 * shortdelay_us
)))
409 udelay(shortdelay_us
);
410 #ifdef CONFIG_PREEMPT
411 if (!preempt_count() && !(rcu_random(rrsp
) % (nrealreaders
* 20000)))
412 preempt_schedule(); /* No QS if preempt_disable() in effect */
416 static void rcu_torture_read_unlock(int idx
) __releases(RCU
)
421 static int rcu_torture_completed(void)
423 return rcu_batches_completed();
427 rcu_torture_cb(struct rcu_head
*p
)
430 struct rcu_torture
*rp
= container_of(p
, struct rcu_torture
, rtort_rcu
);
432 if (fullstop
!= FULLSTOP_DONTSTOP
) {
433 /* Test is ending, just drop callbacks on the floor. */
434 /* The next initialization will pick up the pieces. */
437 i
= rp
->rtort_pipe_count
;
438 if (i
> RCU_TORTURE_PIPE_LEN
)
439 i
= RCU_TORTURE_PIPE_LEN
;
440 atomic_inc(&rcu_torture_wcount
[i
]);
441 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
442 rp
->rtort_mbtest
= 0;
443 rcu_torture_free(rp
);
445 cur_ops
->deferred_free(rp
);
449 static int rcu_no_completed(void)
454 static void rcu_torture_deferred_free(struct rcu_torture
*p
)
456 call_rcu(&p
->rtort_rcu
, rcu_torture_cb
);
459 static void rcu_sync_torture_init(void)
461 INIT_LIST_HEAD(&rcu_torture_removed
);
464 static struct rcu_torture_ops rcu_ops
= {
465 .init
= rcu_sync_torture_init
,
466 .readlock
= rcu_torture_read_lock
,
467 .read_delay
= rcu_read_delay
,
468 .readunlock
= rcu_torture_read_unlock
,
469 .completed
= rcu_torture_completed
,
470 .deferred_free
= rcu_torture_deferred_free
,
471 .sync
= synchronize_rcu
,
472 .exp_sync
= synchronize_rcu_expedited
,
474 .cb_barrier
= rcu_barrier
,
475 .fqs
= rcu_force_quiescent_state
,
478 .can_boost
= rcu_can_boost(),
483 * Definitions for rcu_bh torture testing.
486 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH
)
492 static void rcu_bh_torture_read_unlock(int idx
) __releases(RCU_BH
)
494 rcu_read_unlock_bh();
497 static int rcu_bh_torture_completed(void)
499 return rcu_batches_completed_bh();
502 static void rcu_bh_torture_deferred_free(struct rcu_torture
*p
)
504 call_rcu_bh(&p
->rtort_rcu
, rcu_torture_cb
);
507 static struct rcu_torture_ops rcu_bh_ops
= {
508 .init
= rcu_sync_torture_init
,
509 .readlock
= rcu_bh_torture_read_lock
,
510 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
511 .readunlock
= rcu_bh_torture_read_unlock
,
512 .completed
= rcu_bh_torture_completed
,
513 .deferred_free
= rcu_bh_torture_deferred_free
,
514 .sync
= synchronize_rcu_bh
,
515 .exp_sync
= synchronize_rcu_bh_expedited
,
517 .cb_barrier
= rcu_barrier_bh
,
518 .fqs
= rcu_bh_force_quiescent_state
,
525 * Definitions for srcu torture testing.
528 DEFINE_STATIC_SRCU(srcu_ctl
);
530 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl
)
532 return srcu_read_lock(&srcu_ctl
);
535 static void srcu_read_delay(struct rcu_random_state
*rrsp
)
538 const long uspertick
= 1000000 / HZ
;
539 const long longdelay
= 10;
541 /* We want there to be long-running readers, but not all the time. */
543 delay
= rcu_random(rrsp
) % (nrealreaders
* 2 * longdelay
* uspertick
);
545 schedule_timeout_interruptible(longdelay
);
547 rcu_read_delay(rrsp
);
550 static void srcu_torture_read_unlock(int idx
) __releases(&srcu_ctl
)
552 srcu_read_unlock(&srcu_ctl
, idx
);
555 static int srcu_torture_completed(void)
557 return srcu_batches_completed(&srcu_ctl
);
560 static void srcu_torture_deferred_free(struct rcu_torture
*rp
)
562 call_srcu(&srcu_ctl
, &rp
->rtort_rcu
, rcu_torture_cb
);
565 static void srcu_torture_synchronize(void)
567 synchronize_srcu(&srcu_ctl
);
570 static void srcu_torture_call(struct rcu_head
*head
,
571 void (*func
)(struct rcu_head
*head
))
573 call_srcu(&srcu_ctl
, head
, func
);
576 static void srcu_torture_barrier(void)
578 srcu_barrier(&srcu_ctl
);
581 static int srcu_torture_stats(char *page
)
585 int idx
= srcu_ctl
.completed
& 0x1;
587 cnt
+= sprintf(&page
[cnt
], "%s%s per-CPU(idx=%d):",
588 torture_type
, TORTURE_FLAG
, idx
);
589 for_each_possible_cpu(cpu
) {
590 cnt
+= sprintf(&page
[cnt
], " %d(%lu,%lu)", cpu
,
591 per_cpu_ptr(srcu_ctl
.per_cpu_ref
, cpu
)->c
[!idx
],
592 per_cpu_ptr(srcu_ctl
.per_cpu_ref
, cpu
)->c
[idx
]);
594 cnt
+= sprintf(&page
[cnt
], "\n");
598 static void srcu_torture_synchronize_expedited(void)
600 synchronize_srcu_expedited(&srcu_ctl
);
603 static struct rcu_torture_ops srcu_ops
= {
604 .init
= rcu_sync_torture_init
,
605 .readlock
= srcu_torture_read_lock
,
606 .read_delay
= srcu_read_delay
,
607 .readunlock
= srcu_torture_read_unlock
,
608 .completed
= srcu_torture_completed
,
609 .deferred_free
= srcu_torture_deferred_free
,
610 .sync
= srcu_torture_synchronize
,
611 .exp_sync
= srcu_torture_synchronize_expedited
,
612 .call
= srcu_torture_call
,
613 .cb_barrier
= srcu_torture_barrier
,
614 .stats
= srcu_torture_stats
,
619 * Definitions for sched torture testing.
622 static int sched_torture_read_lock(void)
628 static void sched_torture_read_unlock(int idx
)
633 static void rcu_sched_torture_deferred_free(struct rcu_torture
*p
)
635 call_rcu_sched(&p
->rtort_rcu
, rcu_torture_cb
);
638 static struct rcu_torture_ops sched_ops
= {
639 .init
= rcu_sync_torture_init
,
640 .readlock
= sched_torture_read_lock
,
641 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
642 .readunlock
= sched_torture_read_unlock
,
643 .completed
= rcu_no_completed
,
644 .deferred_free
= rcu_sched_torture_deferred_free
,
645 .sync
= synchronize_sched
,
646 .exp_sync
= synchronize_sched_expedited
,
647 .call
= call_rcu_sched
,
648 .cb_barrier
= rcu_barrier_sched
,
649 .fqs
= rcu_sched_force_quiescent_state
,
656 * RCU torture priority-boost testing. Runs one real-time thread per
657 * CPU for moderate bursts, repeatedly registering RCU callbacks and
658 * spinning waiting for them to be invoked. If a given callback takes
659 * too long to be invoked, we assume that priority inversion has occurred.
662 struct rcu_boost_inflight
{
667 static void rcu_torture_boost_cb(struct rcu_head
*head
)
669 struct rcu_boost_inflight
*rbip
=
670 container_of(head
, struct rcu_boost_inflight
, rcu
);
672 smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
676 static int rcu_torture_boost(void *arg
)
678 unsigned long call_rcu_time
;
679 unsigned long endtime
;
680 unsigned long oldstarttime
;
681 struct rcu_boost_inflight rbi
= { .inflight
= 0 };
682 struct sched_param sp
;
684 VERBOSE_PRINTK_STRING("rcu_torture_boost started");
686 /* Set real-time priority. */
687 sp
.sched_priority
= 1;
688 if (sched_setscheduler(current
, SCHED_FIFO
, &sp
) < 0) {
689 VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
690 n_rcu_torture_boost_rterror
++;
693 init_rcu_head_on_stack(&rbi
.rcu
);
694 /* Each pass through the following loop does one boost-test cycle. */
696 /* Wait for the next test interval. */
697 oldstarttime
= boost_starttime
;
698 while (ULONG_CMP_LT(jiffies
, oldstarttime
)) {
699 schedule_timeout_interruptible(oldstarttime
- jiffies
);
700 rcu_stutter_wait("rcu_torture_boost");
701 if (kthread_should_stop() ||
702 fullstop
!= FULLSTOP_DONTSTOP
)
706 /* Do one boost-test interval. */
707 endtime
= oldstarttime
+ test_boost_duration
* HZ
;
708 call_rcu_time
= jiffies
;
709 while (ULONG_CMP_LT(jiffies
, endtime
)) {
710 /* If we don't have a callback in flight, post one. */
712 smp_mb(); /* RCU core before ->inflight = 1. */
714 call_rcu(&rbi
.rcu
, rcu_torture_boost_cb
);
715 if (jiffies
- call_rcu_time
>
716 test_boost_duration
* HZ
- HZ
/ 2) {
717 VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
718 n_rcu_torture_boost_failure
++;
720 call_rcu_time
= jiffies
;
723 rcu_stutter_wait("rcu_torture_boost");
724 if (kthread_should_stop() ||
725 fullstop
!= FULLSTOP_DONTSTOP
)
730 * Set the start time of the next test interval.
731 * Yes, this is vulnerable to long delays, but such
732 * delays simply cause a false negative for the next
733 * interval. Besides, we are running at RT priority,
734 * so delays should be relatively rare.
736 while (oldstarttime
== boost_starttime
&&
737 !kthread_should_stop()) {
738 if (mutex_trylock(&boost_mutex
)) {
739 boost_starttime
= jiffies
+
740 test_boost_interval
* HZ
;
741 n_rcu_torture_boosts
++;
742 mutex_unlock(&boost_mutex
);
745 schedule_timeout_uninterruptible(1);
748 /* Go do the stutter. */
749 checkwait
: rcu_stutter_wait("rcu_torture_boost");
750 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
752 /* Clean up and exit. */
753 VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
754 rcutorture_shutdown_absorb("rcu_torture_boost");
755 while (!kthread_should_stop() || rbi
.inflight
)
756 schedule_timeout_uninterruptible(1);
757 smp_mb(); /* order accesses to ->inflight before stack-frame death. */
758 destroy_rcu_head_on_stack(&rbi
.rcu
);
763 * RCU torture force-quiescent-state kthread. Repeatedly induces
764 * bursts of calls to force_quiescent_state(), increasing the probability
765 * of occurrence of some important types of race conditions.
768 rcu_torture_fqs(void *arg
)
770 unsigned long fqs_resume_time
;
771 int fqs_burst_remaining
;
773 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
775 fqs_resume_time
= jiffies
+ fqs_stutter
* HZ
;
776 while (ULONG_CMP_LT(jiffies
, fqs_resume_time
) &&
777 !kthread_should_stop()) {
778 schedule_timeout_interruptible(1);
780 fqs_burst_remaining
= fqs_duration
;
781 while (fqs_burst_remaining
> 0 &&
782 !kthread_should_stop()) {
785 fqs_burst_remaining
-= fqs_holdoff
;
787 rcu_stutter_wait("rcu_torture_fqs");
788 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
789 VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
790 rcutorture_shutdown_absorb("rcu_torture_fqs");
791 while (!kthread_should_stop())
792 schedule_timeout_uninterruptible(1);
797 * RCU torture writer kthread. Repeatedly substitutes a new structure
798 * for that pointed to by rcu_torture_current, freeing the old structure
799 * after a series of grace periods (the "pipeline").
802 rcu_torture_writer(void *arg
)
806 struct rcu_torture
*rp
;
807 struct rcu_torture
*rp1
;
808 struct rcu_torture
*old_rp
;
809 static DEFINE_RCU_RANDOM(rand
);
811 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
812 set_user_nice(current
, 19);
815 schedule_timeout_uninterruptible(1);
816 rp
= rcu_torture_alloc();
819 rp
->rtort_pipe_count
= 0;
820 udelay(rcu_random(&rand
) & 0x3ff);
821 old_rp
= rcu_dereference_check(rcu_torture_current
,
822 current
== writer_task
);
823 rp
->rtort_mbtest
= 1;
824 rcu_assign_pointer(rcu_torture_current
, rp
);
825 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
827 i
= old_rp
->rtort_pipe_count
;
828 if (i
> RCU_TORTURE_PIPE_LEN
)
829 i
= RCU_TORTURE_PIPE_LEN
;
830 atomic_inc(&rcu_torture_wcount
[i
]);
831 old_rp
->rtort_pipe_count
++;
832 if (gp_normal
== gp_exp
)
833 exp
= !!(rcu_random(&rand
) & 0x80);
837 cur_ops
->deferred_free(old_rp
);
840 list_add(&old_rp
->rtort_free
,
841 &rcu_torture_removed
);
842 list_for_each_entry_safe(rp
, rp1
,
843 &rcu_torture_removed
,
845 i
= rp
->rtort_pipe_count
;
846 if (i
> RCU_TORTURE_PIPE_LEN
)
847 i
= RCU_TORTURE_PIPE_LEN
;
848 atomic_inc(&rcu_torture_wcount
[i
]);
849 if (++rp
->rtort_pipe_count
>=
850 RCU_TORTURE_PIPE_LEN
) {
851 rp
->rtort_mbtest
= 0;
852 list_del(&rp
->rtort_free
);
853 rcu_torture_free(rp
);
858 rcutorture_record_progress(++rcu_torture_current_version
);
859 rcu_stutter_wait("rcu_torture_writer");
860 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
861 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
862 rcutorture_shutdown_absorb("rcu_torture_writer");
863 while (!kthread_should_stop())
864 schedule_timeout_uninterruptible(1);
869 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
870 * delay between calls.
873 rcu_torture_fakewriter(void *arg
)
875 DEFINE_RCU_RANDOM(rand
);
877 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
878 set_user_nice(current
, 19);
881 schedule_timeout_uninterruptible(1 + rcu_random(&rand
)%10);
882 udelay(rcu_random(&rand
) & 0x3ff);
883 if (cur_ops
->cb_barrier
!= NULL
&&
884 rcu_random(&rand
) % (nfakewriters
* 8) == 0) {
885 cur_ops
->cb_barrier();
886 } else if (gp_normal
== gp_exp
) {
887 if (rcu_random(&rand
) & 0x80)
891 } else if (gp_normal
) {
896 rcu_stutter_wait("rcu_torture_fakewriter");
897 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
899 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
900 rcutorture_shutdown_absorb("rcu_torture_fakewriter");
901 while (!kthread_should_stop())
902 schedule_timeout_uninterruptible(1);
906 void rcutorture_trace_dump(void)
908 static atomic_t beenhere
= ATOMIC_INIT(0);
910 if (atomic_read(&beenhere
))
912 if (atomic_xchg(&beenhere
, 1) != 0)
914 ftrace_dump(DUMP_ALL
);
918 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
919 * incrementing the corresponding element of the pipeline array. The
920 * counter in the element should never be greater than 1, otherwise, the
921 * RCU implementation is broken.
923 static void rcu_torture_timer(unsigned long unused
)
928 static DEFINE_RCU_RANDOM(rand
);
929 static DEFINE_SPINLOCK(rand_lock
);
930 struct rcu_torture
*p
;
932 unsigned long long ts
;
934 idx
= cur_ops
->readlock();
935 completed
= cur_ops
->completed();
936 ts
= rcu_trace_clock_local();
937 p
= rcu_dereference_check(rcu_torture_current
,
938 rcu_read_lock_bh_held() ||
939 rcu_read_lock_sched_held() ||
940 srcu_read_lock_held(&srcu_ctl
));
942 /* Leave because rcu_torture_writer is not yet underway */
943 cur_ops
->readunlock(idx
);
946 if (p
->rtort_mbtest
== 0)
947 atomic_inc(&n_rcu_torture_mberror
);
948 spin_lock(&rand_lock
);
949 cur_ops
->read_delay(&rand
);
950 n_rcu_torture_timers
++;
951 spin_unlock(&rand_lock
);
953 pipe_count
= p
->rtort_pipe_count
;
954 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
955 /* Should not happen, but... */
956 pipe_count
= RCU_TORTURE_PIPE_LEN
;
958 completed_end
= cur_ops
->completed();
959 if (pipe_count
> 1) {
960 do_trace_rcu_torture_read(cur_ops
->name
, &p
->rtort_rcu
, ts
,
961 completed
, completed_end
);
962 rcutorture_trace_dump();
964 __this_cpu_inc(rcu_torture_count
[pipe_count
]);
965 completed
= completed_end
- completed
;
966 if (completed
> RCU_TORTURE_PIPE_LEN
) {
967 /* Should not happen, but... */
968 completed
= RCU_TORTURE_PIPE_LEN
;
970 __this_cpu_inc(rcu_torture_batch
[completed
]);
972 cur_ops
->readunlock(idx
);
976 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
977 * incrementing the corresponding element of the pipeline array. The
978 * counter in the element should never be greater than 1, otherwise, the
979 * RCU implementation is broken.
982 rcu_torture_reader(void *arg
)
987 DEFINE_RCU_RANDOM(rand
);
988 struct rcu_torture
*p
;
991 unsigned long long ts
;
993 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
994 set_user_nice(current
, 19);
995 if (irqreader
&& cur_ops
->irq_capable
)
996 setup_timer_on_stack(&t
, rcu_torture_timer
, 0);
999 if (irqreader
&& cur_ops
->irq_capable
) {
1000 if (!timer_pending(&t
))
1001 mod_timer(&t
, jiffies
+ 1);
1003 idx
= cur_ops
->readlock();
1004 completed
= cur_ops
->completed();
1005 ts
= rcu_trace_clock_local();
1006 p
= rcu_dereference_check(rcu_torture_current
,
1007 rcu_read_lock_bh_held() ||
1008 rcu_read_lock_sched_held() ||
1009 srcu_read_lock_held(&srcu_ctl
));
1011 /* Wait for rcu_torture_writer to get underway */
1012 cur_ops
->readunlock(idx
);
1013 schedule_timeout_interruptible(HZ
);
1016 if (p
->rtort_mbtest
== 0)
1017 atomic_inc(&n_rcu_torture_mberror
);
1018 cur_ops
->read_delay(&rand
);
1020 pipe_count
= p
->rtort_pipe_count
;
1021 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
1022 /* Should not happen, but... */
1023 pipe_count
= RCU_TORTURE_PIPE_LEN
;
1025 completed_end
= cur_ops
->completed();
1026 if (pipe_count
> 1) {
1027 do_trace_rcu_torture_read(cur_ops
->name
, &p
->rtort_rcu
,
1028 ts
, completed
, completed_end
);
1029 rcutorture_trace_dump();
1031 __this_cpu_inc(rcu_torture_count
[pipe_count
]);
1032 completed
= completed_end
- completed
;
1033 if (completed
> RCU_TORTURE_PIPE_LEN
) {
1034 /* Should not happen, but... */
1035 completed
= RCU_TORTURE_PIPE_LEN
;
1037 __this_cpu_inc(rcu_torture_batch
[completed
]);
1039 cur_ops
->readunlock(idx
);
1041 rcu_stutter_wait("rcu_torture_reader");
1042 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
1043 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1044 rcutorture_shutdown_absorb("rcu_torture_reader");
1045 if (irqreader
&& cur_ops
->irq_capable
)
1047 while (!kthread_should_stop())
1048 schedule_timeout_uninterruptible(1);
1053 * Create an RCU-torture statistics message in the specified buffer.
1056 rcu_torture_printk(char *page
)
1061 long pipesummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1062 long batchsummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1064 for_each_possible_cpu(cpu
) {
1065 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1066 pipesummary
[i
] += per_cpu(rcu_torture_count
, cpu
)[i
];
1067 batchsummary
[i
] += per_cpu(rcu_torture_batch
, cpu
)[i
];
1070 for (i
= RCU_TORTURE_PIPE_LEN
- 1; i
>= 0; i
--) {
1071 if (pipesummary
[i
] != 0)
1074 cnt
+= sprintf(&page
[cnt
], "%s%s ", torture_type
, TORTURE_FLAG
);
1075 cnt
+= sprintf(&page
[cnt
],
1076 "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1077 rcu_torture_current
,
1078 rcu_torture_current_version
,
1079 list_empty(&rcu_torture_freelist
),
1080 atomic_read(&n_rcu_torture_alloc
),
1081 atomic_read(&n_rcu_torture_alloc_fail
),
1082 atomic_read(&n_rcu_torture_free
));
1083 cnt
+= sprintf(&page
[cnt
], "rtmbe: %d rtbke: %ld rtbre: %ld ",
1084 atomic_read(&n_rcu_torture_mberror
),
1085 n_rcu_torture_boost_ktrerror
,
1086 n_rcu_torture_boost_rterror
);
1087 cnt
+= sprintf(&page
[cnt
], "rtbf: %ld rtb: %ld nt: %ld ",
1088 n_rcu_torture_boost_failure
,
1089 n_rcu_torture_boosts
,
1090 n_rcu_torture_timers
);
1091 cnt
+= sprintf(&page
[cnt
],
1092 "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
1093 n_online_successes
, n_online_attempts
,
1094 n_offline_successes
, n_offline_attempts
,
1095 min_online
, max_online
,
1096 min_offline
, max_offline
,
1097 sum_online
, sum_offline
, HZ
);
1098 cnt
+= sprintf(&page
[cnt
], "barrier: %ld/%ld:%ld",
1099 n_barrier_successes
,
1101 n_rcu_torture_barrier_error
);
1102 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
1103 if (atomic_read(&n_rcu_torture_mberror
) != 0 ||
1104 n_rcu_torture_barrier_error
!= 0 ||
1105 n_rcu_torture_boost_ktrerror
!= 0 ||
1106 n_rcu_torture_boost_rterror
!= 0 ||
1107 n_rcu_torture_boost_failure
!= 0 ||
1109 cnt
+= sprintf(&page
[cnt
], "!!! ");
1110 atomic_inc(&n_rcu_torture_error
);
1113 cnt
+= sprintf(&page
[cnt
], "Reader Pipe: ");
1114 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1115 cnt
+= sprintf(&page
[cnt
], " %ld", pipesummary
[i
]);
1116 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
1117 cnt
+= sprintf(&page
[cnt
], "Reader Batch: ");
1118 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1119 cnt
+= sprintf(&page
[cnt
], " %ld", batchsummary
[i
]);
1120 cnt
+= sprintf(&page
[cnt
], "\n%s%s ", torture_type
, TORTURE_FLAG
);
1121 cnt
+= sprintf(&page
[cnt
], "Free-Block Circulation: ");
1122 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1123 cnt
+= sprintf(&page
[cnt
], " %d",
1124 atomic_read(&rcu_torture_wcount
[i
]));
1126 cnt
+= sprintf(&page
[cnt
], "\n");
1128 cnt
+= cur_ops
->stats(&page
[cnt
]);
1133 * Print torture statistics. Caller must ensure that there is only
1134 * one call to this function at a given time!!! This is normally
1135 * accomplished by relying on the module system to only have one copy
1136 * of the module loaded, and then by giving the rcu_torture_stats
1137 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1138 * thread is not running).
1141 rcu_torture_stats_print(void)
1145 cnt
= rcu_torture_printk(printk_buf
);
1146 pr_alert("%s", printk_buf
);
1150 * Periodically prints torture statistics, if periodic statistics printing
1151 * was specified via the stat_interval module parameter.
1153 * No need to worry about fullstop here, since this one doesn't reference
1154 * volatile state or register callbacks.
1157 rcu_torture_stats(void *arg
)
1159 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1161 schedule_timeout_interruptible(stat_interval
* HZ
);
1162 rcu_torture_stats_print();
1163 rcutorture_shutdown_absorb("rcu_torture_stats");
1164 } while (!kthread_should_stop());
1165 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1169 static int rcu_idle_cpu
; /* Force all torture tasks off this CPU */
1171 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1172 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1174 static void rcu_torture_shuffle_tasks(void)
1178 cpumask_setall(shuffle_tmp_mask
);
1181 /* No point in shuffling if there is only one online CPU (ex: UP) */
1182 if (num_online_cpus() == 1) {
1187 if (rcu_idle_cpu
!= -1)
1188 cpumask_clear_cpu(rcu_idle_cpu
, shuffle_tmp_mask
);
1190 set_cpus_allowed_ptr(current
, shuffle_tmp_mask
);
1193 for (i
= 0; i
< nrealreaders
; i
++)
1194 if (reader_tasks
[i
])
1195 set_cpus_allowed_ptr(reader_tasks
[i
],
1198 if (fakewriter_tasks
) {
1199 for (i
= 0; i
< nfakewriters
; i
++)
1200 if (fakewriter_tasks
[i
])
1201 set_cpus_allowed_ptr(fakewriter_tasks
[i
],
1205 set_cpus_allowed_ptr(writer_task
, shuffle_tmp_mask
);
1207 set_cpus_allowed_ptr(stats_task
, shuffle_tmp_mask
);
1209 set_cpus_allowed_ptr(stutter_task
, shuffle_tmp_mask
);
1211 set_cpus_allowed_ptr(fqs_task
, shuffle_tmp_mask
);
1213 set_cpus_allowed_ptr(shutdown_task
, shuffle_tmp_mask
);
1214 #ifdef CONFIG_HOTPLUG_CPU
1216 set_cpus_allowed_ptr(onoff_task
, shuffle_tmp_mask
);
1217 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1219 set_cpus_allowed_ptr(stall_task
, shuffle_tmp_mask
);
1220 if (barrier_cbs_tasks
)
1221 for (i
= 0; i
< n_barrier_cbs
; i
++)
1222 if (barrier_cbs_tasks
[i
])
1223 set_cpus_allowed_ptr(barrier_cbs_tasks
[i
],
1226 set_cpus_allowed_ptr(barrier_task
, shuffle_tmp_mask
);
1228 if (rcu_idle_cpu
== -1)
1229 rcu_idle_cpu
= num_online_cpus() - 1;
1236 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1237 * system to become idle at a time and cut off its timer ticks. This is meant
1238 * to test the support for such tickless idle CPU in RCU.
1241 rcu_torture_shuffle(void *arg
)
1243 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1245 schedule_timeout_interruptible(shuffle_interval
* HZ
);
1246 rcu_torture_shuffle_tasks();
1247 rcutorture_shutdown_absorb("rcu_torture_shuffle");
1248 } while (!kthread_should_stop());
1249 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1253 /* Cause the rcutorture test to "stutter", starting and stopping all
1254 * threads periodically.
1257 rcu_torture_stutter(void *arg
)
1259 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1261 schedule_timeout_interruptible(stutter
* HZ
);
1262 stutter_pause_test
= 1;
1263 if (!kthread_should_stop())
1264 schedule_timeout_interruptible(stutter
* HZ
);
1265 stutter_pause_test
= 0;
1266 rcutorture_shutdown_absorb("rcu_torture_stutter");
1267 } while (!kthread_should_stop());
1268 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1273 rcu_torture_print_module_parms(struct rcu_torture_ops
*cur_ops
, const char *tag
)
1275 pr_alert("%s" TORTURE_FLAG
1276 "--- %s: nreaders=%d nfakewriters=%d "
1277 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1278 "shuffle_interval=%d stutter=%d irqreader=%d "
1279 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1280 "test_boost=%d/%d test_boost_interval=%d "
1281 "test_boost_duration=%d shutdown_secs=%d "
1282 "stall_cpu=%d stall_cpu_holdoff=%d "
1284 "onoff_interval=%d onoff_holdoff=%d\n",
1285 torture_type
, tag
, nrealreaders
, nfakewriters
,
1286 stat_interval
, verbose
, test_no_idle_hz
, shuffle_interval
,
1287 stutter
, irqreader
, fqs_duration
, fqs_holdoff
, fqs_stutter
,
1288 test_boost
, cur_ops
->can_boost
,
1289 test_boost_interval
, test_boost_duration
, shutdown_secs
,
1290 stall_cpu
, stall_cpu_holdoff
,
1292 onoff_interval
, onoff_holdoff
);
1295 static struct notifier_block rcutorture_shutdown_nb
= {
1296 .notifier_call
= rcutorture_shutdown_notify
,
1299 static void rcutorture_booster_cleanup(int cpu
)
1301 struct task_struct
*t
;
1303 if (boost_tasks
[cpu
] == NULL
)
1305 mutex_lock(&boost_mutex
);
1306 VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1307 t
= boost_tasks
[cpu
];
1308 boost_tasks
[cpu
] = NULL
;
1309 mutex_unlock(&boost_mutex
);
1311 /* This must be outside of the mutex, otherwise deadlock! */
1313 boost_tasks
[cpu
] = NULL
;
1316 static int rcutorture_booster_init(int cpu
)
1320 if (boost_tasks
[cpu
] != NULL
)
1321 return 0; /* Already created, nothing more to do. */
1323 /* Don't allow time recalculation while creating a new task. */
1324 mutex_lock(&boost_mutex
);
1325 VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1326 boost_tasks
[cpu
] = kthread_create_on_node(rcu_torture_boost
, NULL
,
1328 "rcu_torture_boost");
1329 if (IS_ERR(boost_tasks
[cpu
])) {
1330 retval
= PTR_ERR(boost_tasks
[cpu
]);
1331 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1332 n_rcu_torture_boost_ktrerror
++;
1333 boost_tasks
[cpu
] = NULL
;
1334 mutex_unlock(&boost_mutex
);
1337 kthread_bind(boost_tasks
[cpu
], cpu
);
1338 wake_up_process(boost_tasks
[cpu
]);
1339 mutex_unlock(&boost_mutex
);
1344 * Cause the rcutorture test to shutdown the system after the test has
1345 * run for the time specified by the shutdown_secs module parameter.
1348 rcu_torture_shutdown(void *arg
)
1351 unsigned long jiffies_snap
;
1353 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
1354 jiffies_snap
= ACCESS_ONCE(jiffies
);
1355 while (ULONG_CMP_LT(jiffies_snap
, shutdown_time
) &&
1356 !kthread_should_stop()) {
1357 delta
= shutdown_time
- jiffies_snap
;
1359 pr_alert("%s" TORTURE_FLAG
1360 "rcu_torture_shutdown task: %lu jiffies remaining\n",
1361 torture_type
, delta
);
1362 schedule_timeout_interruptible(delta
);
1363 jiffies_snap
= ACCESS_ONCE(jiffies
);
1365 if (kthread_should_stop()) {
1366 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
1370 /* OK, shut down the system. */
1372 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
1373 shutdown_task
= NULL
; /* Avoid self-kill deadlock. */
1374 rcu_torture_cleanup(); /* Get the success/failure message. */
1375 kernel_power_off(); /* Shut down the system. */
1379 #ifdef CONFIG_HOTPLUG_CPU
1382 * Execute random CPU-hotplug operations at the interval specified
1383 * by the onoff_interval.
1386 rcu_torture_onoff(void *arg
)
1389 unsigned long delta
;
1391 DEFINE_RCU_RANDOM(rand
);
1393 unsigned long starttime
;
1395 VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
1396 for_each_online_cpu(cpu
)
1398 WARN_ON(maxcpu
< 0);
1399 if (onoff_holdoff
> 0) {
1400 VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff");
1401 schedule_timeout_interruptible(onoff_holdoff
* HZ
);
1402 VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff");
1404 while (!kthread_should_stop()) {
1405 cpu
= (rcu_random(&rand
) >> 4) % (maxcpu
+ 1);
1406 if (cpu_online(cpu
) && cpu_is_hotpluggable(cpu
)) {
1408 pr_alert("%s" TORTURE_FLAG
1409 "rcu_torture_onoff task: offlining %d\n",
1411 starttime
= jiffies
;
1412 n_offline_attempts
++;
1413 ret
= cpu_down(cpu
);
1416 pr_alert("%s" TORTURE_FLAG
1417 "rcu_torture_onoff task: offline %d failed: errno %d\n",
1418 torture_type
, cpu
, ret
);
1421 pr_alert("%s" TORTURE_FLAG
1422 "rcu_torture_onoff task: offlined %d\n",
1424 n_offline_successes
++;
1425 delta
= jiffies
- starttime
;
1426 sum_offline
+= delta
;
1427 if (min_offline
< 0) {
1428 min_offline
= delta
;
1429 max_offline
= delta
;
1431 if (min_offline
> delta
)
1432 min_offline
= delta
;
1433 if (max_offline
< delta
)
1434 max_offline
= delta
;
1436 } else if (cpu_is_hotpluggable(cpu
)) {
1438 pr_alert("%s" TORTURE_FLAG
1439 "rcu_torture_onoff task: onlining %d\n",
1441 starttime
= jiffies
;
1442 n_online_attempts
++;
1446 pr_alert("%s" TORTURE_FLAG
1447 "rcu_torture_onoff task: online %d failed: errno %d\n",
1448 torture_type
, cpu
, ret
);
1451 pr_alert("%s" TORTURE_FLAG
1452 "rcu_torture_onoff task: onlined %d\n",
1454 n_online_successes
++;
1455 delta
= jiffies
- starttime
;
1456 sum_online
+= delta
;
1457 if (min_online
< 0) {
1461 if (min_online
> delta
)
1463 if (max_online
< delta
)
1467 schedule_timeout_interruptible(onoff_interval
* HZ
);
1469 VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
1474 rcu_torture_onoff_init(void)
1478 if (onoff_interval
<= 0)
1480 onoff_task
= kthread_run(rcu_torture_onoff
, NULL
, "rcu_torture_onoff");
1481 if (IS_ERR(onoff_task
)) {
1482 ret
= PTR_ERR(onoff_task
);
1489 static void rcu_torture_onoff_cleanup(void)
1491 if (onoff_task
== NULL
)
1493 VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
1494 kthread_stop(onoff_task
);
1498 #else /* #ifdef CONFIG_HOTPLUG_CPU */
1501 rcu_torture_onoff_init(void)
1506 static void rcu_torture_onoff_cleanup(void)
1510 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1513 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1514 * induces a CPU stall for the time specified by stall_cpu.
1516 static int rcu_torture_stall(void *args
)
1518 unsigned long stop_at
;
1520 VERBOSE_PRINTK_STRING("rcu_torture_stall task started");
1521 if (stall_cpu_holdoff
> 0) {
1522 VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff");
1523 schedule_timeout_interruptible(stall_cpu_holdoff
* HZ
);
1524 VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff");
1526 if (!kthread_should_stop()) {
1527 stop_at
= get_seconds() + stall_cpu
;
1528 /* RCU CPU stall is expected behavior in following code. */
1529 pr_alert("rcu_torture_stall start.\n");
1532 while (ULONG_CMP_LT(get_seconds(), stop_at
))
1533 continue; /* Induce RCU CPU stall warning. */
1536 pr_alert("rcu_torture_stall end.\n");
1538 rcutorture_shutdown_absorb("rcu_torture_stall");
1539 while (!kthread_should_stop())
1540 schedule_timeout_interruptible(10 * HZ
);
1544 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1545 static int __init
rcu_torture_stall_init(void)
1551 stall_task
= kthread_run(rcu_torture_stall
, NULL
, "rcu_torture_stall");
1552 if (IS_ERR(stall_task
)) {
1553 ret
= PTR_ERR(stall_task
);
1560 /* Clean up after the CPU-stall kthread, if one was spawned. */
1561 static void rcu_torture_stall_cleanup(void)
1563 if (stall_task
== NULL
)
1565 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task.");
1566 kthread_stop(stall_task
);
1570 /* Callback function for RCU barrier testing. */
1571 void rcu_torture_barrier_cbf(struct rcu_head
*rcu
)
1573 atomic_inc(&barrier_cbs_invoked
);
1576 /* kthread function to register callbacks used to test RCU barriers. */
1577 static int rcu_torture_barrier_cbs(void *arg
)
1579 long myid
= (long)arg
;
1581 struct rcu_head rcu
;
1583 init_rcu_head_on_stack(&rcu
);
1584 VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task started");
1585 set_user_nice(current
, 19);
1587 wait_event(barrier_cbs_wq
[myid
],
1588 barrier_phase
!= lastphase
||
1589 kthread_should_stop() ||
1590 fullstop
!= FULLSTOP_DONTSTOP
);
1591 lastphase
= barrier_phase
;
1592 smp_mb(); /* ensure barrier_phase load before ->call(). */
1593 if (kthread_should_stop() || fullstop
!= FULLSTOP_DONTSTOP
)
1595 cur_ops
->call(&rcu
, rcu_torture_barrier_cbf
);
1596 if (atomic_dec_and_test(&barrier_cbs_count
))
1597 wake_up(&barrier_wq
);
1598 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
1599 VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task stopping");
1600 rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
1601 while (!kthread_should_stop())
1602 schedule_timeout_interruptible(1);
1603 cur_ops
->cb_barrier();
1604 destroy_rcu_head_on_stack(&rcu
);
1608 /* kthread function to drive and coordinate RCU barrier testing. */
1609 static int rcu_torture_barrier(void *arg
)
1613 VERBOSE_PRINTK_STRING("rcu_torture_barrier task starting");
1615 atomic_set(&barrier_cbs_invoked
, 0);
1616 atomic_set(&barrier_cbs_count
, n_barrier_cbs
);
1617 smp_mb(); /* Ensure barrier_phase after prior assignments. */
1618 barrier_phase
= !barrier_phase
;
1619 for (i
= 0; i
< n_barrier_cbs
; i
++)
1620 wake_up(&barrier_cbs_wq
[i
]);
1621 wait_event(barrier_wq
,
1622 atomic_read(&barrier_cbs_count
) == 0 ||
1623 kthread_should_stop() ||
1624 fullstop
!= FULLSTOP_DONTSTOP
);
1625 if (kthread_should_stop() || fullstop
!= FULLSTOP_DONTSTOP
)
1627 n_barrier_attempts
++;
1628 cur_ops
->cb_barrier();
1629 if (atomic_read(&barrier_cbs_invoked
) != n_barrier_cbs
) {
1630 n_rcu_torture_barrier_error
++;
1633 n_barrier_successes
++;
1634 schedule_timeout_interruptible(HZ
/ 10);
1635 } while (!kthread_should_stop() && fullstop
== FULLSTOP_DONTSTOP
);
1636 VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
1637 rcutorture_shutdown_absorb("rcu_torture_barrier");
1638 while (!kthread_should_stop())
1639 schedule_timeout_interruptible(1);
1643 /* Initialize RCU barrier testing. */
1644 static int rcu_torture_barrier_init(void)
1649 if (n_barrier_cbs
== 0)
1651 if (cur_ops
->call
== NULL
|| cur_ops
->cb_barrier
== NULL
) {
1652 pr_alert("%s" TORTURE_FLAG
1653 " Call or barrier ops missing for %s,\n",
1654 torture_type
, cur_ops
->name
);
1655 pr_alert("%s" TORTURE_FLAG
1656 " RCU barrier testing omitted from run.\n",
1660 atomic_set(&barrier_cbs_count
, 0);
1661 atomic_set(&barrier_cbs_invoked
, 0);
1663 kzalloc(n_barrier_cbs
* sizeof(barrier_cbs_tasks
[0]),
1666 kzalloc(n_barrier_cbs
* sizeof(barrier_cbs_wq
[0]),
1668 if (barrier_cbs_tasks
== NULL
|| !barrier_cbs_wq
)
1670 for (i
= 0; i
< n_barrier_cbs
; i
++) {
1671 init_waitqueue_head(&barrier_cbs_wq
[i
]);
1672 barrier_cbs_tasks
[i
] = kthread_run(rcu_torture_barrier_cbs
,
1674 "rcu_torture_barrier_cbs");
1675 if (IS_ERR(barrier_cbs_tasks
[i
])) {
1676 ret
= PTR_ERR(barrier_cbs_tasks
[i
]);
1677 VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier_cbs");
1678 barrier_cbs_tasks
[i
] = NULL
;
1682 barrier_task
= kthread_run(rcu_torture_barrier
, NULL
,
1683 "rcu_torture_barrier");
1684 if (IS_ERR(barrier_task
)) {
1685 ret
= PTR_ERR(barrier_task
);
1686 VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier");
1687 barrier_task
= NULL
;
1692 /* Clean up after RCU barrier testing. */
1693 static void rcu_torture_barrier_cleanup(void)
1697 if (barrier_task
!= NULL
) {
1698 VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier task");
1699 kthread_stop(barrier_task
);
1700 barrier_task
= NULL
;
1702 if (barrier_cbs_tasks
!= NULL
) {
1703 for (i
= 0; i
< n_barrier_cbs
; i
++) {
1704 if (barrier_cbs_tasks
[i
] != NULL
) {
1705 VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier_cbs task");
1706 kthread_stop(barrier_cbs_tasks
[i
]);
1707 barrier_cbs_tasks
[i
] = NULL
;
1710 kfree(barrier_cbs_tasks
);
1711 barrier_cbs_tasks
= NULL
;
1713 if (barrier_cbs_wq
!= NULL
) {
1714 kfree(barrier_cbs_wq
);
1715 barrier_cbs_wq
= NULL
;
1719 static int rcutorture_cpu_notify(struct notifier_block
*self
,
1720 unsigned long action
, void *hcpu
)
1722 long cpu
= (long)hcpu
;
1726 case CPU_DOWN_FAILED
:
1727 (void)rcutorture_booster_init(cpu
);
1729 case CPU_DOWN_PREPARE
:
1730 rcutorture_booster_cleanup(cpu
);
1738 static struct notifier_block rcutorture_cpu_nb
= {
1739 .notifier_call
= rcutorture_cpu_notify
,
1743 rcu_torture_cleanup(void)
1747 mutex_lock(&fullstop_mutex
);
1748 rcutorture_record_test_transition();
1749 if (fullstop
== FULLSTOP_SHUTDOWN
) {
1750 pr_warn(/* but going down anyway, so... */
1751 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1752 mutex_unlock(&fullstop_mutex
);
1753 schedule_timeout_uninterruptible(10);
1754 if (cur_ops
->cb_barrier
!= NULL
)
1755 cur_ops
->cb_barrier();
1758 fullstop
= FULLSTOP_RMMOD
;
1759 mutex_unlock(&fullstop_mutex
);
1760 unregister_reboot_notifier(&rcutorture_shutdown_nb
);
1761 rcu_torture_barrier_cleanup();
1762 rcu_torture_stall_cleanup();
1764 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1765 kthread_stop(stutter_task
);
1767 stutter_task
= NULL
;
1768 if (shuffler_task
) {
1769 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1770 kthread_stop(shuffler_task
);
1771 free_cpumask_var(shuffle_tmp_mask
);
1773 shuffler_task
= NULL
;
1776 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1777 kthread_stop(writer_task
);
1782 for (i
= 0; i
< nrealreaders
; i
++) {
1783 if (reader_tasks
[i
]) {
1784 VERBOSE_PRINTK_STRING(
1785 "Stopping rcu_torture_reader task");
1786 kthread_stop(reader_tasks
[i
]);
1788 reader_tasks
[i
] = NULL
;
1790 kfree(reader_tasks
);
1791 reader_tasks
= NULL
;
1793 rcu_torture_current
= NULL
;
1795 if (fakewriter_tasks
) {
1796 for (i
= 0; i
< nfakewriters
; i
++) {
1797 if (fakewriter_tasks
[i
]) {
1798 VERBOSE_PRINTK_STRING(
1799 "Stopping rcu_torture_fakewriter task");
1800 kthread_stop(fakewriter_tasks
[i
]);
1802 fakewriter_tasks
[i
] = NULL
;
1804 kfree(fakewriter_tasks
);
1805 fakewriter_tasks
= NULL
;
1809 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1810 kthread_stop(stats_task
);
1815 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1816 kthread_stop(fqs_task
);
1819 if ((test_boost
== 1 && cur_ops
->can_boost
) ||
1821 unregister_cpu_notifier(&rcutorture_cpu_nb
);
1822 for_each_possible_cpu(i
)
1823 rcutorture_booster_cleanup(i
);
1825 if (shutdown_task
!= NULL
) {
1826 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
1827 kthread_stop(shutdown_task
);
1829 shutdown_task
= NULL
;
1830 rcu_torture_onoff_cleanup();
1832 /* Wait for all RCU callbacks to fire. */
1834 if (cur_ops
->cb_barrier
!= NULL
)
1835 cur_ops
->cb_barrier();
1837 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1839 if (atomic_read(&n_rcu_torture_error
) || n_rcu_torture_barrier_error
)
1840 rcu_torture_print_module_parms(cur_ops
, "End of test: FAILURE");
1841 else if (n_online_successes
!= n_online_attempts
||
1842 n_offline_successes
!= n_offline_attempts
)
1843 rcu_torture_print_module_parms(cur_ops
,
1844 "End of test: RCU_HOTPLUG");
1846 rcu_torture_print_module_parms(cur_ops
, "End of test: SUCCESS");
1849 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1850 static void rcu_torture_leak_cb(struct rcu_head
*rhp
)
1854 static void rcu_torture_err_cb(struct rcu_head
*rhp
)
1857 * This -might- happen due to race conditions, but is unlikely.
1858 * The scenario that leads to this happening is that the
1859 * first of the pair of duplicate callbacks is queued,
1860 * someone else starts a grace period that includes that
1861 * callback, then the second of the pair must wait for the
1862 * next grace period. Unlikely, but can happen. If it
1863 * does happen, the debug-objects subsystem won't have splatted.
1865 pr_alert("rcutorture: duplicated callback was invoked.\n");
1867 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1870 * Verify that double-free causes debug-objects to complain, but only
1871 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
1872 * cannot be carried out.
1874 static void rcu_test_debug_objects(void)
1876 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1877 struct rcu_head rh1
;
1878 struct rcu_head rh2
;
1880 init_rcu_head_on_stack(&rh1
);
1881 init_rcu_head_on_stack(&rh2
);
1882 pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
1884 /* Try to queue the rh2 pair of callbacks for the same grace period. */
1885 preempt_disable(); /* Prevent preemption from interrupting test. */
1886 rcu_read_lock(); /* Make it impossible to finish a grace period. */
1887 call_rcu(&rh1
, rcu_torture_leak_cb
); /* Start grace period. */
1888 local_irq_disable(); /* Make it harder to start a new grace period. */
1889 call_rcu(&rh2
, rcu_torture_leak_cb
);
1890 call_rcu(&rh2
, rcu_torture_err_cb
); /* Duplicate callback. */
1895 /* Wait for them all to get done so we can safely return. */
1897 pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
1898 destroy_rcu_head_on_stack(&rh1
);
1899 destroy_rcu_head_on_stack(&rh2
);
1900 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1901 pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
1902 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1906 rcu_torture_init(void)
1912 static struct rcu_torture_ops
*torture_ops
[] = {
1913 &rcu_ops
, &rcu_bh_ops
, &srcu_ops
, &sched_ops
,
1916 mutex_lock(&fullstop_mutex
);
1918 /* Process args and tell the world that the torturer is on the job. */
1919 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
1920 cur_ops
= torture_ops
[i
];
1921 if (strcmp(torture_type
, cur_ops
->name
) == 0)
1924 if (i
== ARRAY_SIZE(torture_ops
)) {
1925 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
1927 pr_alert("rcu-torture types:");
1928 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
1929 pr_alert(" %s", torture_ops
[i
]->name
);
1931 mutex_unlock(&fullstop_mutex
);
1934 if (cur_ops
->fqs
== NULL
&& fqs_duration
!= 0) {
1935 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1939 cur_ops
->init(); /* no "goto unwind" prior to this point!!! */
1942 nrealreaders
= nreaders
;
1944 nrealreaders
= 2 * num_online_cpus();
1945 rcu_torture_print_module_parms(cur_ops
, "Start of test");
1946 fullstop
= FULLSTOP_DONTSTOP
;
1948 /* Set up the freelist. */
1950 INIT_LIST_HEAD(&rcu_torture_freelist
);
1951 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++) {
1952 rcu_tortures
[i
].rtort_mbtest
= 0;
1953 list_add_tail(&rcu_tortures
[i
].rtort_free
,
1954 &rcu_torture_freelist
);
1957 /* Initialize the statistics so that each run gets its own numbers. */
1959 rcu_torture_current
= NULL
;
1960 rcu_torture_current_version
= 0;
1961 atomic_set(&n_rcu_torture_alloc
, 0);
1962 atomic_set(&n_rcu_torture_alloc_fail
, 0);
1963 atomic_set(&n_rcu_torture_free
, 0);
1964 atomic_set(&n_rcu_torture_mberror
, 0);
1965 atomic_set(&n_rcu_torture_error
, 0);
1966 n_rcu_torture_barrier_error
= 0;
1967 n_rcu_torture_boost_ktrerror
= 0;
1968 n_rcu_torture_boost_rterror
= 0;
1969 n_rcu_torture_boost_failure
= 0;
1970 n_rcu_torture_boosts
= 0;
1971 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1972 atomic_set(&rcu_torture_wcount
[i
], 0);
1973 for_each_possible_cpu(cpu
) {
1974 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1975 per_cpu(rcu_torture_count
, cpu
)[i
] = 0;
1976 per_cpu(rcu_torture_batch
, cpu
)[i
] = 0;
1980 /* Start up the kthreads. */
1982 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1983 writer_task
= kthread_create(rcu_torture_writer
, NULL
,
1984 "rcu_torture_writer");
1985 if (IS_ERR(writer_task
)) {
1986 firsterr
= PTR_ERR(writer_task
);
1987 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1991 wake_up_process(writer_task
);
1992 fakewriter_tasks
= kzalloc(nfakewriters
* sizeof(fakewriter_tasks
[0]),
1994 if (fakewriter_tasks
== NULL
) {
1995 VERBOSE_PRINTK_ERRSTRING("out of memory");
1999 for (i
= 0; i
< nfakewriters
; i
++) {
2000 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
2001 fakewriter_tasks
[i
] = kthread_run(rcu_torture_fakewriter
, NULL
,
2002 "rcu_torture_fakewriter");
2003 if (IS_ERR(fakewriter_tasks
[i
])) {
2004 firsterr
= PTR_ERR(fakewriter_tasks
[i
]);
2005 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
2006 fakewriter_tasks
[i
] = NULL
;
2010 reader_tasks
= kzalloc(nrealreaders
* sizeof(reader_tasks
[0]),
2012 if (reader_tasks
== NULL
) {
2013 VERBOSE_PRINTK_ERRSTRING("out of memory");
2017 for (i
= 0; i
< nrealreaders
; i
++) {
2018 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
2019 reader_tasks
[i
] = kthread_run(rcu_torture_reader
, NULL
,
2020 "rcu_torture_reader");
2021 if (IS_ERR(reader_tasks
[i
])) {
2022 firsterr
= PTR_ERR(reader_tasks
[i
]);
2023 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
2024 reader_tasks
[i
] = NULL
;
2028 if (stat_interval
> 0) {
2029 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
2030 stats_task
= kthread_run(rcu_torture_stats
, NULL
,
2031 "rcu_torture_stats");
2032 if (IS_ERR(stats_task
)) {
2033 firsterr
= PTR_ERR(stats_task
);
2034 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
2039 if (test_no_idle_hz
) {
2040 rcu_idle_cpu
= num_online_cpus() - 1;
2042 if (!alloc_cpumask_var(&shuffle_tmp_mask
, GFP_KERNEL
)) {
2044 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
2048 /* Create the shuffler thread */
2049 shuffler_task
= kthread_run(rcu_torture_shuffle
, NULL
,
2050 "rcu_torture_shuffle");
2051 if (IS_ERR(shuffler_task
)) {
2052 free_cpumask_var(shuffle_tmp_mask
);
2053 firsterr
= PTR_ERR(shuffler_task
);
2054 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
2055 shuffler_task
= NULL
;
2062 /* Create the stutter thread */
2063 stutter_task
= kthread_run(rcu_torture_stutter
, NULL
,
2064 "rcu_torture_stutter");
2065 if (IS_ERR(stutter_task
)) {
2066 firsterr
= PTR_ERR(stutter_task
);
2067 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
2068 stutter_task
= NULL
;
2072 if (fqs_duration
< 0)
2075 /* Create the stutter thread */
2076 fqs_task
= kthread_run(rcu_torture_fqs
, NULL
,
2078 if (IS_ERR(fqs_task
)) {
2079 firsterr
= PTR_ERR(fqs_task
);
2080 VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
2085 if (test_boost_interval
< 1)
2086 test_boost_interval
= 1;
2087 if (test_boost_duration
< 2)
2088 test_boost_duration
= 2;
2089 if ((test_boost
== 1 && cur_ops
->can_boost
) ||
2092 boost_starttime
= jiffies
+ test_boost_interval
* HZ
;
2093 register_cpu_notifier(&rcutorture_cpu_nb
);
2094 for_each_possible_cpu(i
) {
2095 if (cpu_is_offline(i
))
2096 continue; /* Heuristic: CPU can go offline. */
2097 retval
= rcutorture_booster_init(i
);
2104 if (shutdown_secs
> 0) {
2105 shutdown_time
= jiffies
+ shutdown_secs
* HZ
;
2106 shutdown_task
= kthread_create(rcu_torture_shutdown
, NULL
,
2107 "rcu_torture_shutdown");
2108 if (IS_ERR(shutdown_task
)) {
2109 firsterr
= PTR_ERR(shutdown_task
);
2110 VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
2111 shutdown_task
= NULL
;
2114 wake_up_process(shutdown_task
);
2116 i
= rcu_torture_onoff_init();
2121 register_reboot_notifier(&rcutorture_shutdown_nb
);
2122 i
= rcu_torture_stall_init();
2127 retval
= rcu_torture_barrier_init();
2133 rcu_test_debug_objects();
2134 rcutorture_record_test_transition();
2135 mutex_unlock(&fullstop_mutex
);
2139 mutex_unlock(&fullstop_mutex
);
2140 rcu_torture_cleanup();
2144 module_init(rcu_torture_init
);
2145 module_exit(rcu_torture_cleanup
);