Commit d6ad67112a78623025632865d716b2f7645874c5

Authored by Josh Triplett
Committed by Linus Torvalds
1 parent 5efee174f8

[PATCH] Publish rcutorture module parameters via sysfs, read-only

rcutorture's module parameters currently use permissions of 0, so they
don't show up in /sys/module/rcutorture/parameters.  Change the permissions
on all module parameters to world-readable (0444).

rcutorture does all of its initialization and thread startup when loaded
and relies on the parameters not changing during execution, so they should
not permit writing.  However, reading seems fine.

Signed-off-by: Josh Triplett <josh@freedesktop.org>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 7 additions and 7 deletions Inline Diff

1 /* 1 /*
2 * Read-Copy Update module-based torture test facility 2 * Read-Copy Update module-based torture test facility
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * Copyright (C) IBM Corporation, 2005, 2006 18 * Copyright (C) IBM Corporation, 2005, 2006
19 * 19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org> 21 * Josh Triplett <josh@freedesktop.org>
22 * 22 *
23 * See also: Documentation/RCU/torture.txt 23 * See also: Documentation/RCU/torture.txt
24 */ 24 */
25 #include <linux/types.h> 25 #include <linux/types.h>
26 #include <linux/kernel.h> 26 #include <linux/kernel.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/kthread.h> 29 #include <linux/kthread.h>
30 #include <linux/err.h> 30 #include <linux/err.h>
31 #include <linux/spinlock.h> 31 #include <linux/spinlock.h>
32 #include <linux/smp.h> 32 #include <linux/smp.h>
33 #include <linux/rcupdate.h> 33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h> 34 #include <linux/interrupt.h>
35 #include <linux/sched.h> 35 #include <linux/sched.h>
36 #include <asm/atomic.h> 36 #include <asm/atomic.h>
37 #include <linux/bitops.h> 37 #include <linux/bitops.h>
38 #include <linux/module.h> 38 #include <linux/module.h>
39 #include <linux/completion.h> 39 #include <linux/completion.h>
40 #include <linux/moduleparam.h> 40 #include <linux/moduleparam.h>
41 #include <linux/percpu.h> 41 #include <linux/percpu.h>
42 #include <linux/notifier.h> 42 #include <linux/notifier.h>
43 #include <linux/cpu.h> 43 #include <linux/cpu.h>
44 #include <linux/random.h> 44 #include <linux/random.h>
45 #include <linux/delay.h> 45 #include <linux/delay.h>
46 #include <linux/byteorder/swabb.h> 46 #include <linux/byteorder/swabb.h>
47 #include <linux/stat.h> 47 #include <linux/stat.h>
48 #include <linux/srcu.h> 48 #include <linux/srcu.h>
49 49
50 MODULE_LICENSE("GPL"); 50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " 51 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
52 "Josh Triplett <josh@freedesktop.org>"); 52 "Josh Triplett <josh@freedesktop.org>");
53 53
54 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ 54 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
55 static int nfakewriters = 4; /* # fake writer threads */ 55 static int nfakewriters = 4; /* # fake writer threads */
56 static int stat_interval; /* Interval between stats, in seconds. */ 56 static int stat_interval; /* Interval between stats, in seconds. */
57 /* Defaults to "only at end of test". */ 57 /* Defaults to "only at end of test". */
58 static int verbose; /* Print more debug info. */ 58 static int verbose; /* Print more debug info. */
59 static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ 59 static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
60 static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ 60 static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
61 static char *torture_type = "rcu"; /* What RCU implementation to torture. */ 61 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
62 62
63 module_param(nreaders, int, 0); 63 module_param(nreaders, int, 0444);
64 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); 64 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
65 module_param(nfakewriters, int, 0); 65 module_param(nfakewriters, int, 0444);
66 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); 66 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
67 module_param(stat_interval, int, 0); 67 module_param(stat_interval, int, 0444);
68 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); 68 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
69 module_param(verbose, bool, 0); 69 module_param(verbose, bool, 0444);
70 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); 70 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
71 module_param(test_no_idle_hz, bool, 0); 71 module_param(test_no_idle_hz, bool, 0444);
72 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); 72 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
73 module_param(shuffle_interval, int, 0); 73 module_param(shuffle_interval, int, 0444);
74 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); 74 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
75 module_param(torture_type, charp, 0); 75 module_param(torture_type, charp, 0444);
76 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); 76 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
77 77
78 #define TORTURE_FLAG "-torture:" 78 #define TORTURE_FLAG "-torture:"
79 #define PRINTK_STRING(s) \ 79 #define PRINTK_STRING(s) \
80 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0) 80 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
81 #define VERBOSE_PRINTK_STRING(s) \ 81 #define VERBOSE_PRINTK_STRING(s) \
82 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0) 82 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
83 #define VERBOSE_PRINTK_ERRSTRING(s) \ 83 #define VERBOSE_PRINTK_ERRSTRING(s) \
84 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0) 84 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
85 85
86 static char printk_buf[4096]; 86 static char printk_buf[4096];
87 87
88 static int nrealreaders; 88 static int nrealreaders;
89 static struct task_struct *writer_task; 89 static struct task_struct *writer_task;
90 static struct task_struct **fakewriter_tasks; 90 static struct task_struct **fakewriter_tasks;
91 static struct task_struct **reader_tasks; 91 static struct task_struct **reader_tasks;
92 static struct task_struct *stats_task; 92 static struct task_struct *stats_task;
93 static struct task_struct *shuffler_task; 93 static struct task_struct *shuffler_task;
94 94
95 #define RCU_TORTURE_PIPE_LEN 10 95 #define RCU_TORTURE_PIPE_LEN 10
96 96
97 struct rcu_torture { 97 struct rcu_torture {
98 struct rcu_head rtort_rcu; 98 struct rcu_head rtort_rcu;
99 int rtort_pipe_count; 99 int rtort_pipe_count;
100 struct list_head rtort_free; 100 struct list_head rtort_free;
101 int rtort_mbtest; 101 int rtort_mbtest;
102 }; 102 };
103 103
104 static int fullstop = 0; /* stop generating callbacks at test end. */ 104 static int fullstop = 0; /* stop generating callbacks at test end. */
105 static LIST_HEAD(rcu_torture_freelist); 105 static LIST_HEAD(rcu_torture_freelist);
106 static struct rcu_torture *rcu_torture_current = NULL; 106 static struct rcu_torture *rcu_torture_current = NULL;
107 static long rcu_torture_current_version = 0; 107 static long rcu_torture_current_version = 0;
108 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 108 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
109 static DEFINE_SPINLOCK(rcu_torture_lock); 109 static DEFINE_SPINLOCK(rcu_torture_lock);
110 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = 110 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
111 { 0 }; 111 { 0 };
112 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = 112 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
113 { 0 }; 113 { 0 };
114 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 114 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
115 static atomic_t n_rcu_torture_alloc; 115 static atomic_t n_rcu_torture_alloc;
116 static atomic_t n_rcu_torture_alloc_fail; 116 static atomic_t n_rcu_torture_alloc_fail;
117 static atomic_t n_rcu_torture_free; 117 static atomic_t n_rcu_torture_free;
118 static atomic_t n_rcu_torture_mberror; 118 static atomic_t n_rcu_torture_mberror;
119 static atomic_t n_rcu_torture_error; 119 static atomic_t n_rcu_torture_error;
120 static struct list_head rcu_torture_removed; 120 static struct list_head rcu_torture_removed;
121 121
122 /* 122 /*
123 * Allocate an element from the rcu_tortures pool. 123 * Allocate an element from the rcu_tortures pool.
124 */ 124 */
125 static struct rcu_torture * 125 static struct rcu_torture *
126 rcu_torture_alloc(void) 126 rcu_torture_alloc(void)
127 { 127 {
128 struct list_head *p; 128 struct list_head *p;
129 129
130 spin_lock_bh(&rcu_torture_lock); 130 spin_lock_bh(&rcu_torture_lock);
131 if (list_empty(&rcu_torture_freelist)) { 131 if (list_empty(&rcu_torture_freelist)) {
132 atomic_inc(&n_rcu_torture_alloc_fail); 132 atomic_inc(&n_rcu_torture_alloc_fail);
133 spin_unlock_bh(&rcu_torture_lock); 133 spin_unlock_bh(&rcu_torture_lock);
134 return NULL; 134 return NULL;
135 } 135 }
136 atomic_inc(&n_rcu_torture_alloc); 136 atomic_inc(&n_rcu_torture_alloc);
137 p = rcu_torture_freelist.next; 137 p = rcu_torture_freelist.next;
138 list_del_init(p); 138 list_del_init(p);
139 spin_unlock_bh(&rcu_torture_lock); 139 spin_unlock_bh(&rcu_torture_lock);
140 return container_of(p, struct rcu_torture, rtort_free); 140 return container_of(p, struct rcu_torture, rtort_free);
141 } 141 }
142 142
143 /* 143 /*
144 * Free an element to the rcu_tortures pool. 144 * Free an element to the rcu_tortures pool.
145 */ 145 */
146 static void 146 static void
147 rcu_torture_free(struct rcu_torture *p) 147 rcu_torture_free(struct rcu_torture *p)
148 { 148 {
149 atomic_inc(&n_rcu_torture_free); 149 atomic_inc(&n_rcu_torture_free);
150 spin_lock_bh(&rcu_torture_lock); 150 spin_lock_bh(&rcu_torture_lock);
151 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 151 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
152 spin_unlock_bh(&rcu_torture_lock); 152 spin_unlock_bh(&rcu_torture_lock);
153 } 153 }
154 154
155 struct rcu_random_state { 155 struct rcu_random_state {
156 unsigned long rrs_state; 156 unsigned long rrs_state;
157 long rrs_count; 157 long rrs_count;
158 }; 158 };
159 159
160 #define RCU_RANDOM_MULT 39916801 /* prime */ 160 #define RCU_RANDOM_MULT 39916801 /* prime */
161 #define RCU_RANDOM_ADD 479001701 /* prime */ 161 #define RCU_RANDOM_ADD 479001701 /* prime */
162 #define RCU_RANDOM_REFRESH 10000 162 #define RCU_RANDOM_REFRESH 10000
163 163
164 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 } 164 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
165 165
166 /* 166 /*
167 * Crude but fast random-number generator. Uses a linear congruential 167 * Crude but fast random-number generator. Uses a linear congruential
168 * generator, with occasional help from get_random_bytes(). 168 * generator, with occasional help from get_random_bytes().
169 */ 169 */
170 static unsigned long 170 static unsigned long
171 rcu_random(struct rcu_random_state *rrsp) 171 rcu_random(struct rcu_random_state *rrsp)
172 { 172 {
173 long refresh; 173 long refresh;
174 174
175 if (--rrsp->rrs_count < 0) { 175 if (--rrsp->rrs_count < 0) {
176 get_random_bytes(&refresh, sizeof(refresh)); 176 get_random_bytes(&refresh, sizeof(refresh));
177 rrsp->rrs_state += refresh; 177 rrsp->rrs_state += refresh;
178 rrsp->rrs_count = RCU_RANDOM_REFRESH; 178 rrsp->rrs_count = RCU_RANDOM_REFRESH;
179 } 179 }
180 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD; 180 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
181 return swahw32(rrsp->rrs_state); 181 return swahw32(rrsp->rrs_state);
182 } 182 }
183 183
184 /* 184 /*
185 * Operations vector for selecting different types of tests. 185 * Operations vector for selecting different types of tests.
186 */ 186 */
187 187
188 struct rcu_torture_ops { 188 struct rcu_torture_ops {
189 void (*init)(void); 189 void (*init)(void);
190 void (*cleanup)(void); 190 void (*cleanup)(void);
191 int (*readlock)(void); 191 int (*readlock)(void);
192 void (*readdelay)(struct rcu_random_state *rrsp); 192 void (*readdelay)(struct rcu_random_state *rrsp);
193 void (*readunlock)(int idx); 193 void (*readunlock)(int idx);
194 int (*completed)(void); 194 int (*completed)(void);
195 void (*deferredfree)(struct rcu_torture *p); 195 void (*deferredfree)(struct rcu_torture *p);
196 void (*sync)(void); 196 void (*sync)(void);
197 int (*stats)(char *page); 197 int (*stats)(char *page);
198 char *name; 198 char *name;
199 }; 199 };
200 static struct rcu_torture_ops *cur_ops = NULL; 200 static struct rcu_torture_ops *cur_ops = NULL;
201 201
202 /* 202 /*
203 * Definitions for rcu torture testing. 203 * Definitions for rcu torture testing.
204 */ 204 */
205 205
206 static int rcu_torture_read_lock(void) __acquires(RCU) 206 static int rcu_torture_read_lock(void) __acquires(RCU)
207 { 207 {
208 rcu_read_lock(); 208 rcu_read_lock();
209 return 0; 209 return 0;
210 } 210 }
211 211
212 static void rcu_read_delay(struct rcu_random_state *rrsp) 212 static void rcu_read_delay(struct rcu_random_state *rrsp)
213 { 213 {
214 long delay; 214 long delay;
215 const long longdelay = 200; 215 const long longdelay = 200;
216 216
217 /* We want there to be long-running readers, but not all the time. */ 217 /* We want there to be long-running readers, but not all the time. */
218 218
219 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay); 219 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
220 if (!delay) 220 if (!delay)
221 udelay(longdelay); 221 udelay(longdelay);
222 } 222 }
223 223
224 static void rcu_torture_read_unlock(int idx) __releases(RCU) 224 static void rcu_torture_read_unlock(int idx) __releases(RCU)
225 { 225 {
226 rcu_read_unlock(); 226 rcu_read_unlock();
227 } 227 }
228 228
229 static int rcu_torture_completed(void) 229 static int rcu_torture_completed(void)
230 { 230 {
231 return rcu_batches_completed(); 231 return rcu_batches_completed();
232 } 232 }
233 233
234 static void 234 static void
235 rcu_torture_cb(struct rcu_head *p) 235 rcu_torture_cb(struct rcu_head *p)
236 { 236 {
237 int i; 237 int i;
238 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 238 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
239 239
240 if (fullstop) { 240 if (fullstop) {
241 /* Test is ending, just drop callbacks on the floor. */ 241 /* Test is ending, just drop callbacks on the floor. */
242 /* The next initialization will pick up the pieces. */ 242 /* The next initialization will pick up the pieces. */
243 return; 243 return;
244 } 244 }
245 i = rp->rtort_pipe_count; 245 i = rp->rtort_pipe_count;
246 if (i > RCU_TORTURE_PIPE_LEN) 246 if (i > RCU_TORTURE_PIPE_LEN)
247 i = RCU_TORTURE_PIPE_LEN; 247 i = RCU_TORTURE_PIPE_LEN;
248 atomic_inc(&rcu_torture_wcount[i]); 248 atomic_inc(&rcu_torture_wcount[i]);
249 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 249 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
250 rp->rtort_mbtest = 0; 250 rp->rtort_mbtest = 0;
251 rcu_torture_free(rp); 251 rcu_torture_free(rp);
252 } else 252 } else
253 cur_ops->deferredfree(rp); 253 cur_ops->deferredfree(rp);
254 } 254 }
255 255
256 static void rcu_torture_deferred_free(struct rcu_torture *p) 256 static void rcu_torture_deferred_free(struct rcu_torture *p)
257 { 257 {
258 call_rcu(&p->rtort_rcu, rcu_torture_cb); 258 call_rcu(&p->rtort_rcu, rcu_torture_cb);
259 } 259 }
260 260
261 static struct rcu_torture_ops rcu_ops = { 261 static struct rcu_torture_ops rcu_ops = {
262 .init = NULL, 262 .init = NULL,
263 .cleanup = NULL, 263 .cleanup = NULL,
264 .readlock = rcu_torture_read_lock, 264 .readlock = rcu_torture_read_lock,
265 .readdelay = rcu_read_delay, 265 .readdelay = rcu_read_delay,
266 .readunlock = rcu_torture_read_unlock, 266 .readunlock = rcu_torture_read_unlock,
267 .completed = rcu_torture_completed, 267 .completed = rcu_torture_completed,
268 .deferredfree = rcu_torture_deferred_free, 268 .deferredfree = rcu_torture_deferred_free,
269 .sync = synchronize_rcu, 269 .sync = synchronize_rcu,
270 .stats = NULL, 270 .stats = NULL,
271 .name = "rcu" 271 .name = "rcu"
272 }; 272 };
273 273
274 static void rcu_sync_torture_deferred_free(struct rcu_torture *p) 274 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
275 { 275 {
276 int i; 276 int i;
277 struct rcu_torture *rp; 277 struct rcu_torture *rp;
278 struct rcu_torture *rp1; 278 struct rcu_torture *rp1;
279 279
280 cur_ops->sync(); 280 cur_ops->sync();
281 list_add(&p->rtort_free, &rcu_torture_removed); 281 list_add(&p->rtort_free, &rcu_torture_removed);
282 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 282 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
283 i = rp->rtort_pipe_count; 283 i = rp->rtort_pipe_count;
284 if (i > RCU_TORTURE_PIPE_LEN) 284 if (i > RCU_TORTURE_PIPE_LEN)
285 i = RCU_TORTURE_PIPE_LEN; 285 i = RCU_TORTURE_PIPE_LEN;
286 atomic_inc(&rcu_torture_wcount[i]); 286 atomic_inc(&rcu_torture_wcount[i]);
287 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 287 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
288 rp->rtort_mbtest = 0; 288 rp->rtort_mbtest = 0;
289 list_del(&rp->rtort_free); 289 list_del(&rp->rtort_free);
290 rcu_torture_free(rp); 290 rcu_torture_free(rp);
291 } 291 }
292 } 292 }
293 } 293 }
294 294
295 static void rcu_sync_torture_init(void) 295 static void rcu_sync_torture_init(void)
296 { 296 {
297 INIT_LIST_HEAD(&rcu_torture_removed); 297 INIT_LIST_HEAD(&rcu_torture_removed);
298 } 298 }
299 299
300 static struct rcu_torture_ops rcu_sync_ops = { 300 static struct rcu_torture_ops rcu_sync_ops = {
301 .init = rcu_sync_torture_init, 301 .init = rcu_sync_torture_init,
302 .cleanup = NULL, 302 .cleanup = NULL,
303 .readlock = rcu_torture_read_lock, 303 .readlock = rcu_torture_read_lock,
304 .readdelay = rcu_read_delay, 304 .readdelay = rcu_read_delay,
305 .readunlock = rcu_torture_read_unlock, 305 .readunlock = rcu_torture_read_unlock,
306 .completed = rcu_torture_completed, 306 .completed = rcu_torture_completed,
307 .deferredfree = rcu_sync_torture_deferred_free, 307 .deferredfree = rcu_sync_torture_deferred_free,
308 .sync = synchronize_rcu, 308 .sync = synchronize_rcu,
309 .stats = NULL, 309 .stats = NULL,
310 .name = "rcu_sync" 310 .name = "rcu_sync"
311 }; 311 };
312 312
313 /* 313 /*
314 * Definitions for rcu_bh torture testing. 314 * Definitions for rcu_bh torture testing.
315 */ 315 */
316 316
317 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) 317 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
318 { 318 {
319 rcu_read_lock_bh(); 319 rcu_read_lock_bh();
320 return 0; 320 return 0;
321 } 321 }
322 322
323 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) 323 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
324 { 324 {
325 rcu_read_unlock_bh(); 325 rcu_read_unlock_bh();
326 } 326 }
327 327
328 static int rcu_bh_torture_completed(void) 328 static int rcu_bh_torture_completed(void)
329 { 329 {
330 return rcu_batches_completed_bh(); 330 return rcu_batches_completed_bh();
331 } 331 }
332 332
333 static void rcu_bh_torture_deferred_free(struct rcu_torture *p) 333 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
334 { 334 {
335 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 335 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
336 } 336 }
337 337
338 struct rcu_bh_torture_synchronize { 338 struct rcu_bh_torture_synchronize {
339 struct rcu_head head; 339 struct rcu_head head;
340 struct completion completion; 340 struct completion completion;
341 }; 341 };
342 342
343 static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head) 343 static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
344 { 344 {
345 struct rcu_bh_torture_synchronize *rcu; 345 struct rcu_bh_torture_synchronize *rcu;
346 346
347 rcu = container_of(head, struct rcu_bh_torture_synchronize, head); 347 rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
348 complete(&rcu->completion); 348 complete(&rcu->completion);
349 } 349 }
350 350
351 static void rcu_bh_torture_synchronize(void) 351 static void rcu_bh_torture_synchronize(void)
352 { 352 {
353 struct rcu_bh_torture_synchronize rcu; 353 struct rcu_bh_torture_synchronize rcu;
354 354
355 init_completion(&rcu.completion); 355 init_completion(&rcu.completion);
356 call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); 356 call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
357 wait_for_completion(&rcu.completion); 357 wait_for_completion(&rcu.completion);
358 } 358 }
359 359
360 static struct rcu_torture_ops rcu_bh_ops = { 360 static struct rcu_torture_ops rcu_bh_ops = {
361 .init = NULL, 361 .init = NULL,
362 .cleanup = NULL, 362 .cleanup = NULL,
363 .readlock = rcu_bh_torture_read_lock, 363 .readlock = rcu_bh_torture_read_lock,
364 .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 364 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
365 .readunlock = rcu_bh_torture_read_unlock, 365 .readunlock = rcu_bh_torture_read_unlock,
366 .completed = rcu_bh_torture_completed, 366 .completed = rcu_bh_torture_completed,
367 .deferredfree = rcu_bh_torture_deferred_free, 367 .deferredfree = rcu_bh_torture_deferred_free,
368 .sync = rcu_bh_torture_synchronize, 368 .sync = rcu_bh_torture_synchronize,
369 .stats = NULL, 369 .stats = NULL,
370 .name = "rcu_bh" 370 .name = "rcu_bh"
371 }; 371 };
372 372
373 static struct rcu_torture_ops rcu_bh_sync_ops = { 373 static struct rcu_torture_ops rcu_bh_sync_ops = {
374 .init = rcu_sync_torture_init, 374 .init = rcu_sync_torture_init,
375 .cleanup = NULL, 375 .cleanup = NULL,
376 .readlock = rcu_bh_torture_read_lock, 376 .readlock = rcu_bh_torture_read_lock,
377 .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 377 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
378 .readunlock = rcu_bh_torture_read_unlock, 378 .readunlock = rcu_bh_torture_read_unlock,
379 .completed = rcu_bh_torture_completed, 379 .completed = rcu_bh_torture_completed,
380 .deferredfree = rcu_sync_torture_deferred_free, 380 .deferredfree = rcu_sync_torture_deferred_free,
381 .sync = rcu_bh_torture_synchronize, 381 .sync = rcu_bh_torture_synchronize,
382 .stats = NULL, 382 .stats = NULL,
383 .name = "rcu_bh_sync" 383 .name = "rcu_bh_sync"
384 }; 384 };
385 385
386 /* 386 /*
387 * Definitions for srcu torture testing. 387 * Definitions for srcu torture testing.
388 */ 388 */
389 389
390 static struct srcu_struct srcu_ctl; 390 static struct srcu_struct srcu_ctl;
391 391
392 static void srcu_torture_init(void) 392 static void srcu_torture_init(void)
393 { 393 {
394 init_srcu_struct(&srcu_ctl); 394 init_srcu_struct(&srcu_ctl);
395 rcu_sync_torture_init(); 395 rcu_sync_torture_init();
396 } 396 }
397 397
398 static void srcu_torture_cleanup(void) 398 static void srcu_torture_cleanup(void)
399 { 399 {
400 synchronize_srcu(&srcu_ctl); 400 synchronize_srcu(&srcu_ctl);
401 cleanup_srcu_struct(&srcu_ctl); 401 cleanup_srcu_struct(&srcu_ctl);
402 } 402 }
403 403
404 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl) 404 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
405 { 405 {
406 return srcu_read_lock(&srcu_ctl); 406 return srcu_read_lock(&srcu_ctl);
407 } 407 }
408 408
409 static void srcu_read_delay(struct rcu_random_state *rrsp) 409 static void srcu_read_delay(struct rcu_random_state *rrsp)
410 { 410 {
411 long delay; 411 long delay;
412 const long uspertick = 1000000 / HZ; 412 const long uspertick = 1000000 / HZ;
413 const long longdelay = 10; 413 const long longdelay = 10;
414 414
415 /* We want there to be long-running readers, but not all the time. */ 415 /* We want there to be long-running readers, but not all the time. */
416 416
417 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); 417 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
418 if (!delay) 418 if (!delay)
419 schedule_timeout_interruptible(longdelay); 419 schedule_timeout_interruptible(longdelay);
420 } 420 }
421 421
422 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) 422 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
423 { 423 {
424 srcu_read_unlock(&srcu_ctl, idx); 424 srcu_read_unlock(&srcu_ctl, idx);
425 } 425 }
426 426
427 static int srcu_torture_completed(void) 427 static int srcu_torture_completed(void)
428 { 428 {
429 return srcu_batches_completed(&srcu_ctl); 429 return srcu_batches_completed(&srcu_ctl);
430 } 430 }
431 431
432 static void srcu_torture_synchronize(void) 432 static void srcu_torture_synchronize(void)
433 { 433 {
434 synchronize_srcu(&srcu_ctl); 434 synchronize_srcu(&srcu_ctl);
435 } 435 }
436 436
437 static int srcu_torture_stats(char *page) 437 static int srcu_torture_stats(char *page)
438 { 438 {
439 int cnt = 0; 439 int cnt = 0;
440 int cpu; 440 int cpu;
441 int idx = srcu_ctl.completed & 0x1; 441 int idx = srcu_ctl.completed & 0x1;
442 442
443 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):", 443 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
444 torture_type, TORTURE_FLAG, idx); 444 torture_type, TORTURE_FLAG, idx);
445 for_each_possible_cpu(cpu) { 445 for_each_possible_cpu(cpu) {
446 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu, 446 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
447 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx], 447 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
448 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]); 448 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
449 } 449 }
450 cnt += sprintf(&page[cnt], "\n"); 450 cnt += sprintf(&page[cnt], "\n");
451 return cnt; 451 return cnt;
452 } 452 }
453 453
454 static struct rcu_torture_ops srcu_ops = { 454 static struct rcu_torture_ops srcu_ops = {
455 .init = srcu_torture_init, 455 .init = srcu_torture_init,
456 .cleanup = srcu_torture_cleanup, 456 .cleanup = srcu_torture_cleanup,
457 .readlock = srcu_torture_read_lock, 457 .readlock = srcu_torture_read_lock,
458 .readdelay = srcu_read_delay, 458 .readdelay = srcu_read_delay,
459 .readunlock = srcu_torture_read_unlock, 459 .readunlock = srcu_torture_read_unlock,
460 .completed = srcu_torture_completed, 460 .completed = srcu_torture_completed,
461 .deferredfree = rcu_sync_torture_deferred_free, 461 .deferredfree = rcu_sync_torture_deferred_free,
462 .sync = srcu_torture_synchronize, 462 .sync = srcu_torture_synchronize,
463 .stats = srcu_torture_stats, 463 .stats = srcu_torture_stats,
464 .name = "srcu" 464 .name = "srcu"
465 }; 465 };
466 466
467 /* 467 /*
468 * Definitions for sched torture testing. 468 * Definitions for sched torture testing.
469 */ 469 */
470 470
471 static int sched_torture_read_lock(void) 471 static int sched_torture_read_lock(void)
472 { 472 {
473 preempt_disable(); 473 preempt_disable();
474 return 0; 474 return 0;
475 } 475 }
476 476
477 static void sched_torture_read_unlock(int idx) 477 static void sched_torture_read_unlock(int idx)
478 { 478 {
479 preempt_enable(); 479 preempt_enable();
480 } 480 }
481 481
482 static int sched_torture_completed(void) 482 static int sched_torture_completed(void)
483 { 483 {
484 return 0; 484 return 0;
485 } 485 }
486 486
487 static void sched_torture_synchronize(void) 487 static void sched_torture_synchronize(void)
488 { 488 {
489 synchronize_sched(); 489 synchronize_sched();
490 } 490 }
491 491
492 static struct rcu_torture_ops sched_ops = { 492 static struct rcu_torture_ops sched_ops = {
493 .init = rcu_sync_torture_init, 493 .init = rcu_sync_torture_init,
494 .cleanup = NULL, 494 .cleanup = NULL,
495 .readlock = sched_torture_read_lock, 495 .readlock = sched_torture_read_lock,
496 .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 496 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
497 .readunlock = sched_torture_read_unlock, 497 .readunlock = sched_torture_read_unlock,
498 .completed = sched_torture_completed, 498 .completed = sched_torture_completed,
499 .deferredfree = rcu_sync_torture_deferred_free, 499 .deferredfree = rcu_sync_torture_deferred_free,
500 .sync = sched_torture_synchronize, 500 .sync = sched_torture_synchronize,
501 .stats = NULL, 501 .stats = NULL,
502 .name = "sched" 502 .name = "sched"
503 }; 503 };
504 504
505 static struct rcu_torture_ops *torture_ops[] = 505 static struct rcu_torture_ops *torture_ops[] =
506 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, &srcu_ops, 506 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, &srcu_ops,
507 &sched_ops, NULL }; 507 &sched_ops, NULL };
508 508
509 /* 509 /*
510 * RCU torture writer kthread. Repeatedly substitutes a new structure 510 * RCU torture writer kthread. Repeatedly substitutes a new structure
511 * for that pointed to by rcu_torture_current, freeing the old structure 511 * for that pointed to by rcu_torture_current, freeing the old structure
512 * after a series of grace periods (the "pipeline"). 512 * after a series of grace periods (the "pipeline").
513 */ 513 */
514 static int 514 static int
515 rcu_torture_writer(void *arg) 515 rcu_torture_writer(void *arg)
516 { 516 {
517 int i; 517 int i;
518 long oldbatch = rcu_batches_completed(); 518 long oldbatch = rcu_batches_completed();
519 struct rcu_torture *rp; 519 struct rcu_torture *rp;
520 struct rcu_torture *old_rp; 520 struct rcu_torture *old_rp;
521 static DEFINE_RCU_RANDOM(rand); 521 static DEFINE_RCU_RANDOM(rand);
522 522
523 VERBOSE_PRINTK_STRING("rcu_torture_writer task started"); 523 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
524 set_user_nice(current, 19); 524 set_user_nice(current, 19);
525 current->flags |= PF_NOFREEZE; 525 current->flags |= PF_NOFREEZE;
526 526
527 do { 527 do {
528 schedule_timeout_uninterruptible(1); 528 schedule_timeout_uninterruptible(1);
529 if ((rp = rcu_torture_alloc()) == NULL) 529 if ((rp = rcu_torture_alloc()) == NULL)
530 continue; 530 continue;
531 rp->rtort_pipe_count = 0; 531 rp->rtort_pipe_count = 0;
532 udelay(rcu_random(&rand) & 0x3ff); 532 udelay(rcu_random(&rand) & 0x3ff);
533 old_rp = rcu_torture_current; 533 old_rp = rcu_torture_current;
534 rp->rtort_mbtest = 1; 534 rp->rtort_mbtest = 1;
535 rcu_assign_pointer(rcu_torture_current, rp); 535 rcu_assign_pointer(rcu_torture_current, rp);
536 smp_wmb(); 536 smp_wmb();
537 if (old_rp != NULL) { 537 if (old_rp != NULL) {
538 i = old_rp->rtort_pipe_count; 538 i = old_rp->rtort_pipe_count;
539 if (i > RCU_TORTURE_PIPE_LEN) 539 if (i > RCU_TORTURE_PIPE_LEN)
540 i = RCU_TORTURE_PIPE_LEN; 540 i = RCU_TORTURE_PIPE_LEN;
541 atomic_inc(&rcu_torture_wcount[i]); 541 atomic_inc(&rcu_torture_wcount[i]);
542 old_rp->rtort_pipe_count++; 542 old_rp->rtort_pipe_count++;
543 cur_ops->deferredfree(old_rp); 543 cur_ops->deferredfree(old_rp);
544 } 544 }
545 rcu_torture_current_version++; 545 rcu_torture_current_version++;
546 oldbatch = cur_ops->completed(); 546 oldbatch = cur_ops->completed();
547 } while (!kthread_should_stop() && !fullstop); 547 } while (!kthread_should_stop() && !fullstop);
548 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); 548 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
549 while (!kthread_should_stop()) 549 while (!kthread_should_stop())
550 schedule_timeout_uninterruptible(1); 550 schedule_timeout_uninterruptible(1);
551 return 0; 551 return 0;
552 } 552 }
553 553
554 /* 554 /*
555 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 555 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
556 * delay between calls. 556 * delay between calls.
557 */ 557 */
558 static int 558 static int
559 rcu_torture_fakewriter(void *arg) 559 rcu_torture_fakewriter(void *arg)
560 { 560 {
561 DEFINE_RCU_RANDOM(rand); 561 DEFINE_RCU_RANDOM(rand);
562 562
563 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started"); 563 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
564 set_user_nice(current, 19); 564 set_user_nice(current, 19);
565 current->flags |= PF_NOFREEZE; 565 current->flags |= PF_NOFREEZE;
566 566
567 do { 567 do {
568 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); 568 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
569 udelay(rcu_random(&rand) & 0x3ff); 569 udelay(rcu_random(&rand) & 0x3ff);
570 cur_ops->sync(); 570 cur_ops->sync();
571 } while (!kthread_should_stop() && !fullstop); 571 } while (!kthread_should_stop() && !fullstop);
572 572
573 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); 573 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
574 while (!kthread_should_stop()) 574 while (!kthread_should_stop())
575 schedule_timeout_uninterruptible(1); 575 schedule_timeout_uninterruptible(1);
576 return 0; 576 return 0;
577 } 577 }
578 578
579 /* 579 /*
580 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 580 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
581 * incrementing the corresponding element of the pipeline array. The 581 * incrementing the corresponding element of the pipeline array. The
582 * counter in the element should never be greater than 1, otherwise, the 582 * counter in the element should never be greater than 1, otherwise, the
583 * RCU implementation is broken. 583 * RCU implementation is broken.
584 */ 584 */
585 static int 585 static int
586 rcu_torture_reader(void *arg) 586 rcu_torture_reader(void *arg)
587 { 587 {
588 int completed; 588 int completed;
589 int idx; 589 int idx;
590 DEFINE_RCU_RANDOM(rand); 590 DEFINE_RCU_RANDOM(rand);
591 struct rcu_torture *p; 591 struct rcu_torture *p;
592 int pipe_count; 592 int pipe_count;
593 593
594 VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); 594 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
595 set_user_nice(current, 19); 595 set_user_nice(current, 19);
596 current->flags |= PF_NOFREEZE; 596 current->flags |= PF_NOFREEZE;
597 597
598 do { 598 do {
599 idx = cur_ops->readlock(); 599 idx = cur_ops->readlock();
600 completed = cur_ops->completed(); 600 completed = cur_ops->completed();
601 p = rcu_dereference(rcu_torture_current); 601 p = rcu_dereference(rcu_torture_current);
602 if (p == NULL) { 602 if (p == NULL) {
603 /* Wait for rcu_torture_writer to get underway */ 603 /* Wait for rcu_torture_writer to get underway */
604 cur_ops->readunlock(idx); 604 cur_ops->readunlock(idx);
605 schedule_timeout_interruptible(HZ); 605 schedule_timeout_interruptible(HZ);
606 continue; 606 continue;
607 } 607 }
608 if (p->rtort_mbtest == 0) 608 if (p->rtort_mbtest == 0)
609 atomic_inc(&n_rcu_torture_mberror); 609 atomic_inc(&n_rcu_torture_mberror);
610 cur_ops->readdelay(&rand); 610 cur_ops->readdelay(&rand);
611 preempt_disable(); 611 preempt_disable();
612 pipe_count = p->rtort_pipe_count; 612 pipe_count = p->rtort_pipe_count;
613 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 613 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
614 /* Should not happen, but... */ 614 /* Should not happen, but... */
615 pipe_count = RCU_TORTURE_PIPE_LEN; 615 pipe_count = RCU_TORTURE_PIPE_LEN;
616 } 616 }
617 ++__get_cpu_var(rcu_torture_count)[pipe_count]; 617 ++__get_cpu_var(rcu_torture_count)[pipe_count];
618 completed = cur_ops->completed() - completed; 618 completed = cur_ops->completed() - completed;
619 if (completed > RCU_TORTURE_PIPE_LEN) { 619 if (completed > RCU_TORTURE_PIPE_LEN) {
620 /* Should not happen, but... */ 620 /* Should not happen, but... */
621 completed = RCU_TORTURE_PIPE_LEN; 621 completed = RCU_TORTURE_PIPE_LEN;
622 } 622 }
623 ++__get_cpu_var(rcu_torture_batch)[completed]; 623 ++__get_cpu_var(rcu_torture_batch)[completed];
624 preempt_enable(); 624 preempt_enable();
625 cur_ops->readunlock(idx); 625 cur_ops->readunlock(idx);
626 schedule(); 626 schedule();
627 } while (!kthread_should_stop() && !fullstop); 627 } while (!kthread_should_stop() && !fullstop);
628 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); 628 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
629 while (!kthread_should_stop()) 629 while (!kthread_should_stop())
630 schedule_timeout_uninterruptible(1); 630 schedule_timeout_uninterruptible(1);
631 return 0; 631 return 0;
632 } 632 }
633 633
634 /* 634 /*
635 * Create an RCU-torture statistics message in the specified buffer. 635 * Create an RCU-torture statistics message in the specified buffer.
636 */ 636 */
637 static int 637 static int
638 rcu_torture_printk(char *page) 638 rcu_torture_printk(char *page)
639 { 639 {
640 int cnt = 0; 640 int cnt = 0;
641 int cpu; 641 int cpu;
642 int i; 642 int i;
643 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 643 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
644 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 644 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
645 645
646 for_each_possible_cpu(cpu) { 646 for_each_possible_cpu(cpu) {
647 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 647 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
648 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 648 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
649 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 649 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
650 } 650 }
651 } 651 }
652 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 652 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
653 if (pipesummary[i] != 0) 653 if (pipesummary[i] != 0)
654 break; 654 break;
655 } 655 }
656 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); 656 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
657 cnt += sprintf(&page[cnt], 657 cnt += sprintf(&page[cnt],
658 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d " 658 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
659 "rtmbe: %d", 659 "rtmbe: %d",
660 rcu_torture_current, 660 rcu_torture_current,
661 rcu_torture_current_version, 661 rcu_torture_current_version,
662 list_empty(&rcu_torture_freelist), 662 list_empty(&rcu_torture_freelist),
663 atomic_read(&n_rcu_torture_alloc), 663 atomic_read(&n_rcu_torture_alloc),
664 atomic_read(&n_rcu_torture_alloc_fail), 664 atomic_read(&n_rcu_torture_alloc_fail),
665 atomic_read(&n_rcu_torture_free), 665 atomic_read(&n_rcu_torture_free),
666 atomic_read(&n_rcu_torture_mberror)); 666 atomic_read(&n_rcu_torture_mberror));
667 if (atomic_read(&n_rcu_torture_mberror) != 0) 667 if (atomic_read(&n_rcu_torture_mberror) != 0)
668 cnt += sprintf(&page[cnt], " !!!"); 668 cnt += sprintf(&page[cnt], " !!!");
669 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); 669 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
670 if (i > 1) { 670 if (i > 1) {
671 cnt += sprintf(&page[cnt], "!!! "); 671 cnt += sprintf(&page[cnt], "!!! ");
672 atomic_inc(&n_rcu_torture_error); 672 atomic_inc(&n_rcu_torture_error);
673 } 673 }
674 cnt += sprintf(&page[cnt], "Reader Pipe: "); 674 cnt += sprintf(&page[cnt], "Reader Pipe: ");
675 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 675 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
676 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]); 676 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
677 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); 677 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
678 cnt += sprintf(&page[cnt], "Reader Batch: "); 678 cnt += sprintf(&page[cnt], "Reader Batch: ");
679 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 679 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
680 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]); 680 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
681 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); 681 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
682 cnt += sprintf(&page[cnt], "Free-Block Circulation: "); 682 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
683 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 683 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
684 cnt += sprintf(&page[cnt], " %d", 684 cnt += sprintf(&page[cnt], " %d",
685 atomic_read(&rcu_torture_wcount[i])); 685 atomic_read(&rcu_torture_wcount[i]));
686 } 686 }
687 cnt += sprintf(&page[cnt], "\n"); 687 cnt += sprintf(&page[cnt], "\n");
688 if (cur_ops->stats != NULL) 688 if (cur_ops->stats != NULL)
689 cnt += cur_ops->stats(&page[cnt]); 689 cnt += cur_ops->stats(&page[cnt]);
690 return cnt; 690 return cnt;
691 } 691 }
692 692
693 /* 693 /*
694 * Print torture statistics. Caller must ensure that there is only 694 * Print torture statistics. Caller must ensure that there is only
695 * one call to this function at a given time!!! This is normally 695 * one call to this function at a given time!!! This is normally
696 * accomplished by relying on the module system to only have one copy 696 * accomplished by relying on the module system to only have one copy
697 * of the module loaded, and then by giving the rcu_torture_stats 697 * of the module loaded, and then by giving the rcu_torture_stats
698 * kthread full control (or the init/cleanup functions when rcu_torture_stats 698 * kthread full control (or the init/cleanup functions when rcu_torture_stats
699 * thread is not running). 699 * thread is not running).
700 */ 700 */
701 static void 701 static void
702 rcu_torture_stats_print(void) 702 rcu_torture_stats_print(void)
703 { 703 {
704 int cnt; 704 int cnt;
705 705
706 cnt = rcu_torture_printk(printk_buf); 706 cnt = rcu_torture_printk(printk_buf);
707 printk(KERN_ALERT "%s", printk_buf); 707 printk(KERN_ALERT "%s", printk_buf);
708 } 708 }
709 709
710 /* 710 /*
711 * Periodically prints torture statistics, if periodic statistics printing 711 * Periodically prints torture statistics, if periodic statistics printing
712 * was specified via the stat_interval module parameter. 712 * was specified via the stat_interval module parameter.
713 * 713 *
714 * No need to worry about fullstop here, since this one doesn't reference 714 * No need to worry about fullstop here, since this one doesn't reference
715 * volatile state or register callbacks. 715 * volatile state or register callbacks.
716 */ 716 */
717 static int 717 static int
718 rcu_torture_stats(void *arg) 718 rcu_torture_stats(void *arg)
719 { 719 {
720 VERBOSE_PRINTK_STRING("rcu_torture_stats task started"); 720 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
721 do { 721 do {
722 schedule_timeout_interruptible(stat_interval * HZ); 722 schedule_timeout_interruptible(stat_interval * HZ);
723 rcu_torture_stats_print(); 723 rcu_torture_stats_print();
724 } while (!kthread_should_stop()); 724 } while (!kthread_should_stop());
725 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); 725 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
726 return 0; 726 return 0;
727 } 727 }
728 728
729 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ 729 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
730 730
731 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case 731 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
732 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs. 732 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
733 */ 733 */
734 static void rcu_torture_shuffle_tasks(void) 734 static void rcu_torture_shuffle_tasks(void)
735 { 735 {
736 cpumask_t tmp_mask = CPU_MASK_ALL; 736 cpumask_t tmp_mask = CPU_MASK_ALL;
737 int i; 737 int i;
738 738
739 lock_cpu_hotplug(); 739 lock_cpu_hotplug();
740 740
741 /* No point in shuffling if there is only one online CPU (ex: UP) */ 741 /* No point in shuffling if there is only one online CPU (ex: UP) */
742 if (num_online_cpus() == 1) { 742 if (num_online_cpus() == 1) {
743 unlock_cpu_hotplug(); 743 unlock_cpu_hotplug();
744 return; 744 return;
745 } 745 }
746 746
747 if (rcu_idle_cpu != -1) 747 if (rcu_idle_cpu != -1)
748 cpu_clear(rcu_idle_cpu, tmp_mask); 748 cpu_clear(rcu_idle_cpu, tmp_mask);
749 749
750 set_cpus_allowed(current, tmp_mask); 750 set_cpus_allowed(current, tmp_mask);
751 751
752 if (reader_tasks != NULL) { 752 if (reader_tasks != NULL) {
753 for (i = 0; i < nrealreaders; i++) 753 for (i = 0; i < nrealreaders; i++)
754 if (reader_tasks[i]) 754 if (reader_tasks[i])
755 set_cpus_allowed(reader_tasks[i], tmp_mask); 755 set_cpus_allowed(reader_tasks[i], tmp_mask);
756 } 756 }
757 757
758 if (fakewriter_tasks != NULL) { 758 if (fakewriter_tasks != NULL) {
759 for (i = 0; i < nfakewriters; i++) 759 for (i = 0; i < nfakewriters; i++)
760 if (fakewriter_tasks[i]) 760 if (fakewriter_tasks[i])
761 set_cpus_allowed(fakewriter_tasks[i], tmp_mask); 761 set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
762 } 762 }
763 763
764 if (writer_task) 764 if (writer_task)
765 set_cpus_allowed(writer_task, tmp_mask); 765 set_cpus_allowed(writer_task, tmp_mask);
766 766
767 if (stats_task) 767 if (stats_task)
768 set_cpus_allowed(stats_task, tmp_mask); 768 set_cpus_allowed(stats_task, tmp_mask);
769 769
770 if (rcu_idle_cpu == -1) 770 if (rcu_idle_cpu == -1)
771 rcu_idle_cpu = num_online_cpus() - 1; 771 rcu_idle_cpu = num_online_cpus() - 1;
772 else 772 else
773 rcu_idle_cpu--; 773 rcu_idle_cpu--;
774 774
775 unlock_cpu_hotplug(); 775 unlock_cpu_hotplug();
776 } 776 }
777 777
778 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the 778 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
779 * system to become idle at a time and cut off its timer ticks. This is meant 779 * system to become idle at a time and cut off its timer ticks. This is meant
780 * to test the support for such tickless idle CPU in RCU. 780 * to test the support for such tickless idle CPU in RCU.
781 */ 781 */
782 static int 782 static int
783 rcu_torture_shuffle(void *arg) 783 rcu_torture_shuffle(void *arg)
784 { 784 {
785 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started"); 785 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
786 do { 786 do {
787 schedule_timeout_interruptible(shuffle_interval * HZ); 787 schedule_timeout_interruptible(shuffle_interval * HZ);
788 rcu_torture_shuffle_tasks(); 788 rcu_torture_shuffle_tasks();
789 } while (!kthread_should_stop()); 789 } while (!kthread_should_stop());
790 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); 790 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
791 return 0; 791 return 0;
792 } 792 }
793 793
794 static inline void 794 static inline void
795 rcu_torture_print_module_parms(char *tag) 795 rcu_torture_print_module_parms(char *tag)
796 { 796 {
797 printk(KERN_ALERT "%s" TORTURE_FLAG 797 printk(KERN_ALERT "%s" TORTURE_FLAG
798 "--- %s: nreaders=%d nfakewriters=%d " 798 "--- %s: nreaders=%d nfakewriters=%d "
799 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 799 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
800 "shuffle_interval = %d\n", 800 "shuffle_interval = %d\n",
801 torture_type, tag, nrealreaders, nfakewriters, 801 torture_type, tag, nrealreaders, nfakewriters,
802 stat_interval, verbose, test_no_idle_hz, shuffle_interval); 802 stat_interval, verbose, test_no_idle_hz, shuffle_interval);
803 } 803 }
804 804
805 static void 805 static void
806 rcu_torture_cleanup(void) 806 rcu_torture_cleanup(void)
807 { 807 {
808 int i; 808 int i;
809 809
810 fullstop = 1; 810 fullstop = 1;
811 if (shuffler_task != NULL) { 811 if (shuffler_task != NULL) {
812 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); 812 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
813 kthread_stop(shuffler_task); 813 kthread_stop(shuffler_task);
814 } 814 }
815 shuffler_task = NULL; 815 shuffler_task = NULL;
816 816
817 if (writer_task != NULL) { 817 if (writer_task != NULL) {
818 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task"); 818 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
819 kthread_stop(writer_task); 819 kthread_stop(writer_task);
820 } 820 }
821 writer_task = NULL; 821 writer_task = NULL;
822 822
823 if (reader_tasks != NULL) { 823 if (reader_tasks != NULL) {
824 for (i = 0; i < nrealreaders; i++) { 824 for (i = 0; i < nrealreaders; i++) {
825 if (reader_tasks[i] != NULL) { 825 if (reader_tasks[i] != NULL) {
826 VERBOSE_PRINTK_STRING( 826 VERBOSE_PRINTK_STRING(
827 "Stopping rcu_torture_reader task"); 827 "Stopping rcu_torture_reader task");
828 kthread_stop(reader_tasks[i]); 828 kthread_stop(reader_tasks[i]);
829 } 829 }
830 reader_tasks[i] = NULL; 830 reader_tasks[i] = NULL;
831 } 831 }
832 kfree(reader_tasks); 832 kfree(reader_tasks);
833 reader_tasks = NULL; 833 reader_tasks = NULL;
834 } 834 }
835 rcu_torture_current = NULL; 835 rcu_torture_current = NULL;
836 836
837 if (fakewriter_tasks != NULL) { 837 if (fakewriter_tasks != NULL) {
838 for (i = 0; i < nfakewriters; i++) { 838 for (i = 0; i < nfakewriters; i++) {
839 if (fakewriter_tasks[i] != NULL) { 839 if (fakewriter_tasks[i] != NULL) {
840 VERBOSE_PRINTK_STRING( 840 VERBOSE_PRINTK_STRING(
841 "Stopping rcu_torture_fakewriter task"); 841 "Stopping rcu_torture_fakewriter task");
842 kthread_stop(fakewriter_tasks[i]); 842 kthread_stop(fakewriter_tasks[i]);
843 } 843 }
844 fakewriter_tasks[i] = NULL; 844 fakewriter_tasks[i] = NULL;
845 } 845 }
846 kfree(fakewriter_tasks); 846 kfree(fakewriter_tasks);
847 fakewriter_tasks = NULL; 847 fakewriter_tasks = NULL;
848 } 848 }
849 849
850 if (stats_task != NULL) { 850 if (stats_task != NULL) {
851 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task"); 851 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
852 kthread_stop(stats_task); 852 kthread_stop(stats_task);
853 } 853 }
854 stats_task = NULL; 854 stats_task = NULL;
855 855
856 /* Wait for all RCU callbacks to fire. */ 856 /* Wait for all RCU callbacks to fire. */
857 rcu_barrier(); 857 rcu_barrier();
858 858
859 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 859 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
860 860
861 if (cur_ops->cleanup != NULL) 861 if (cur_ops->cleanup != NULL)
862 cur_ops->cleanup(); 862 cur_ops->cleanup();
863 if (atomic_read(&n_rcu_torture_error)) 863 if (atomic_read(&n_rcu_torture_error))
864 rcu_torture_print_module_parms("End of test: FAILURE"); 864 rcu_torture_print_module_parms("End of test: FAILURE");
865 else 865 else
866 rcu_torture_print_module_parms("End of test: SUCCESS"); 866 rcu_torture_print_module_parms("End of test: SUCCESS");
867 } 867 }
868 868
869 static int 869 static int
870 rcu_torture_init(void) 870 rcu_torture_init(void)
871 { 871 {
872 int i; 872 int i;
873 int cpu; 873 int cpu;
874 int firsterr = 0; 874 int firsterr = 0;
875 875
876 /* Process args and tell the world that the torturer is on the job. */ 876 /* Process args and tell the world that the torturer is on the job. */
877 877
878 for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) { 878 for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
879 cur_ops = torture_ops[i]; 879 cur_ops = torture_ops[i];
880 if (strcmp(torture_type, cur_ops->name) == 0) { 880 if (strcmp(torture_type, cur_ops->name) == 0) {
881 break; 881 break;
882 } 882 }
883 } 883 }
884 if (cur_ops == NULL) { 884 if (cur_ops == NULL) {
885 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", 885 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
886 torture_type); 886 torture_type);
887 return (-EINVAL); 887 return (-EINVAL);
888 } 888 }
889 if (cur_ops->init != NULL) 889 if (cur_ops->init != NULL)
890 cur_ops->init(); /* no "goto unwind" prior to this point!!! */ 890 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
891 891
892 if (nreaders >= 0) 892 if (nreaders >= 0)
893 nrealreaders = nreaders; 893 nrealreaders = nreaders;
894 else 894 else
895 nrealreaders = 2 * num_online_cpus(); 895 nrealreaders = 2 * num_online_cpus();
896 rcu_torture_print_module_parms("Start of test"); 896 rcu_torture_print_module_parms("Start of test");
897 fullstop = 0; 897 fullstop = 0;
898 898
899 /* Set up the freelist. */ 899 /* Set up the freelist. */
900 900
901 INIT_LIST_HEAD(&rcu_torture_freelist); 901 INIT_LIST_HEAD(&rcu_torture_freelist);
902 for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) { 902 for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
903 rcu_tortures[i].rtort_mbtest = 0; 903 rcu_tortures[i].rtort_mbtest = 0;
904 list_add_tail(&rcu_tortures[i].rtort_free, 904 list_add_tail(&rcu_tortures[i].rtort_free,
905 &rcu_torture_freelist); 905 &rcu_torture_freelist);
906 } 906 }
907 907
908 /* Initialize the statistics so that each run gets its own numbers. */ 908 /* Initialize the statistics so that each run gets its own numbers. */
909 909
910 rcu_torture_current = NULL; 910 rcu_torture_current = NULL;
911 rcu_torture_current_version = 0; 911 rcu_torture_current_version = 0;
912 atomic_set(&n_rcu_torture_alloc, 0); 912 atomic_set(&n_rcu_torture_alloc, 0);
913 atomic_set(&n_rcu_torture_alloc_fail, 0); 913 atomic_set(&n_rcu_torture_alloc_fail, 0);
914 atomic_set(&n_rcu_torture_free, 0); 914 atomic_set(&n_rcu_torture_free, 0);
915 atomic_set(&n_rcu_torture_mberror, 0); 915 atomic_set(&n_rcu_torture_mberror, 0);
916 atomic_set(&n_rcu_torture_error, 0); 916 atomic_set(&n_rcu_torture_error, 0);
917 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 917 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
918 atomic_set(&rcu_torture_wcount[i], 0); 918 atomic_set(&rcu_torture_wcount[i], 0);
919 for_each_possible_cpu(cpu) { 919 for_each_possible_cpu(cpu) {
920 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 920 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
921 per_cpu(rcu_torture_count, cpu)[i] = 0; 921 per_cpu(rcu_torture_count, cpu)[i] = 0;
922 per_cpu(rcu_torture_batch, cpu)[i] = 0; 922 per_cpu(rcu_torture_batch, cpu)[i] = 0;
923 } 923 }
924 } 924 }
925 925
926 /* Start up the kthreads. */ 926 /* Start up the kthreads. */
927 927
928 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task"); 928 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
929 writer_task = kthread_run(rcu_torture_writer, NULL, 929 writer_task = kthread_run(rcu_torture_writer, NULL,
930 "rcu_torture_writer"); 930 "rcu_torture_writer");
931 if (IS_ERR(writer_task)) { 931 if (IS_ERR(writer_task)) {
932 firsterr = PTR_ERR(writer_task); 932 firsterr = PTR_ERR(writer_task);
933 VERBOSE_PRINTK_ERRSTRING("Failed to create writer"); 933 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
934 writer_task = NULL; 934 writer_task = NULL;
935 goto unwind; 935 goto unwind;
936 } 936 }
937 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), 937 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
938 GFP_KERNEL); 938 GFP_KERNEL);
939 if (fakewriter_tasks == NULL) { 939 if (fakewriter_tasks == NULL) {
940 VERBOSE_PRINTK_ERRSTRING("out of memory"); 940 VERBOSE_PRINTK_ERRSTRING("out of memory");
941 firsterr = -ENOMEM; 941 firsterr = -ENOMEM;
942 goto unwind; 942 goto unwind;
943 } 943 }
944 for (i = 0; i < nfakewriters; i++) { 944 for (i = 0; i < nfakewriters; i++) {
945 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); 945 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
946 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, 946 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
947 "rcu_torture_fakewriter"); 947 "rcu_torture_fakewriter");
948 if (IS_ERR(fakewriter_tasks[i])) { 948 if (IS_ERR(fakewriter_tasks[i])) {
949 firsterr = PTR_ERR(fakewriter_tasks[i]); 949 firsterr = PTR_ERR(fakewriter_tasks[i]);
950 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); 950 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
951 fakewriter_tasks[i] = NULL; 951 fakewriter_tasks[i] = NULL;
952 goto unwind; 952 goto unwind;
953 } 953 }
954 } 954 }
955 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), 955 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
956 GFP_KERNEL); 956 GFP_KERNEL);
957 if (reader_tasks == NULL) { 957 if (reader_tasks == NULL) {
958 VERBOSE_PRINTK_ERRSTRING("out of memory"); 958 VERBOSE_PRINTK_ERRSTRING("out of memory");
959 firsterr = -ENOMEM; 959 firsterr = -ENOMEM;
960 goto unwind; 960 goto unwind;
961 } 961 }
962 for (i = 0; i < nrealreaders; i++) { 962 for (i = 0; i < nrealreaders; i++) {
963 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task"); 963 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
964 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL, 964 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
965 "rcu_torture_reader"); 965 "rcu_torture_reader");
966 if (IS_ERR(reader_tasks[i])) { 966 if (IS_ERR(reader_tasks[i])) {
967 firsterr = PTR_ERR(reader_tasks[i]); 967 firsterr = PTR_ERR(reader_tasks[i]);
968 VERBOSE_PRINTK_ERRSTRING("Failed to create reader"); 968 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
969 reader_tasks[i] = NULL; 969 reader_tasks[i] = NULL;
970 goto unwind; 970 goto unwind;
971 } 971 }
972 } 972 }
973 if (stat_interval > 0) { 973 if (stat_interval > 0) {
974 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task"); 974 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
975 stats_task = kthread_run(rcu_torture_stats, NULL, 975 stats_task = kthread_run(rcu_torture_stats, NULL,
976 "rcu_torture_stats"); 976 "rcu_torture_stats");
977 if (IS_ERR(stats_task)) { 977 if (IS_ERR(stats_task)) {
978 firsterr = PTR_ERR(stats_task); 978 firsterr = PTR_ERR(stats_task);
979 VERBOSE_PRINTK_ERRSTRING("Failed to create stats"); 979 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
980 stats_task = NULL; 980 stats_task = NULL;
981 goto unwind; 981 goto unwind;
982 } 982 }
983 } 983 }
984 if (test_no_idle_hz) { 984 if (test_no_idle_hz) {
985 rcu_idle_cpu = num_online_cpus() - 1; 985 rcu_idle_cpu = num_online_cpus() - 1;
986 /* Create the shuffler thread */ 986 /* Create the shuffler thread */
987 shuffler_task = kthread_run(rcu_torture_shuffle, NULL, 987 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
988 "rcu_torture_shuffle"); 988 "rcu_torture_shuffle");
989 if (IS_ERR(shuffler_task)) { 989 if (IS_ERR(shuffler_task)) {
990 firsterr = PTR_ERR(shuffler_task); 990 firsterr = PTR_ERR(shuffler_task);
991 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); 991 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
992 shuffler_task = NULL; 992 shuffler_task = NULL;
993 goto unwind; 993 goto unwind;
994 } 994 }
995 } 995 }
996 return 0; 996 return 0;
997 997
998 unwind: 998 unwind:
999 rcu_torture_cleanup(); 999 rcu_torture_cleanup();
1000 return firsterr; 1000 return firsterr;
1001 } 1001 }
1002 1002
1003 module_init(rcu_torture_init); 1003 module_init(rcu_torture_init);
1004 module_exit(rcu_torture_cleanup); 1004 module_exit(rcu_torture_cleanup);
1005 1005