Commit bf66f18e79e34c421bbd8f6511e2c556b779df2f

Authored by Paul E. McKenney
Committed by Ingo Molnar
1 parent 46a1e34eda

rcu: Add force_quiescent_state() testing to rcutorture

Add force_quiescent_state() testing to rcutorture, with a
separate thread that repeatedly invokes force_quiescent_state()
in bursts. This can greatly increase the probability of
encountering certain types of race conditions.

Suggested-by: Josh Triplett <josh@joshtriplett.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1262646551116-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 5 changed files with 130 additions and 2 deletions Side-by-side Diff

include/linux/rcutiny.h
... ... @@ -62,6 +62,18 @@
62 62  
63 63 extern int rcu_expedited_torture_stats(char *page);
64 64  
  65 +static inline void rcu_force_quiescent_state(void)
  66 +{
  67 +}
  68 +
  69 +static inline void rcu_bh_force_quiescent_state(void)
  70 +{
  71 +}
  72 +
  73 +static inline void rcu_sched_force_quiescent_state(void)
  74 +{
  75 +}
  76 +
65 77 #define synchronize_rcu synchronize_sched
66 78  
67 79 static inline void synchronize_rcu_expedited(void)
include/linux/rcutree.h
... ... @@ -99,6 +99,9 @@
99 99 extern long rcu_batches_completed(void);
100 100 extern long rcu_batches_completed_bh(void);
101 101 extern long rcu_batches_completed_sched(void);
  102 +extern void rcu_force_quiescent_state(void);
  103 +extern void rcu_bh_force_quiescent_state(void);
  104 +extern void rcu_sched_force_quiescent_state(void);
102 105  
103 106 #ifdef CONFIG_NO_HZ
104 107 void rcu_enter_nohz(void);
... ... @@ -61,6 +61,9 @@
61 61 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62 62 static int stutter = 5; /* Start/stop testing interval (in sec) */
63 63 static int irqreader = 1; /* RCU readers from irq (timers). */
  64 +static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
  65 +static int fqs_holdoff = 0; /* Hold time within burst (us). */
  66 +static int fqs_stutter = 3; /* Wait time between bursts (s). */
64 67 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
65 68  
66 69 module_param(nreaders, int, 0444);
... ... @@ -79,6 +82,12 @@
79 82 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
80 83 module_param(irqreader, int, 0444);
81 84 MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
  85 +module_param(fqs_duration, int, 0444);
  86 +MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
  87 +module_param(fqs_holdoff, int, 0444);
  88 +MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
  89 +module_param(fqs_stutter, int, 0444);
  90 +MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
82 91 module_param(torture_type, charp, 0444);
83 92 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
84 93  
... ... @@ -99,6 +108,7 @@
99 108 static struct task_struct *stats_task;
100 109 static struct task_struct *shuffler_task;
101 110 static struct task_struct *stutter_task;
  111 +static struct task_struct *fqs_task;
102 112  
103 113 #define RCU_TORTURE_PIPE_LEN 10
104 114  
... ... @@ -263,6 +273,7 @@
263 273 void (*deferred_free)(struct rcu_torture *p);
264 274 void (*sync)(void);
265 275 void (*cb_barrier)(void);
  276 + void (*fqs)(void);
266 277 int (*stats)(char *page);
267 278 int irq_capable;
268 279 char *name;
... ... @@ -347,6 +358,7 @@
347 358 .deferred_free = rcu_torture_deferred_free,
348 359 .sync = synchronize_rcu,
349 360 .cb_barrier = rcu_barrier,
  361 + .fqs = rcu_force_quiescent_state,
350 362 .stats = NULL,
351 363 .irq_capable = 1,
352 364 .name = "rcu"
... ... @@ -388,6 +400,7 @@
388 400 .deferred_free = rcu_sync_torture_deferred_free,
389 401 .sync = synchronize_rcu,
390 402 .cb_barrier = NULL,
  403 + .fqs = rcu_force_quiescent_state,
391 404 .stats = NULL,
392 405 .irq_capable = 1,
393 406 .name = "rcu_sync"
... ... @@ -403,6 +416,7 @@
403 416 .deferred_free = rcu_sync_torture_deferred_free,
404 417 .sync = synchronize_rcu_expedited,
405 418 .cb_barrier = NULL,
  419 + .fqs = rcu_force_quiescent_state,
406 420 .stats = NULL,
407 421 .irq_capable = 1,
408 422 .name = "rcu_expedited"
... ... @@ -465,6 +479,7 @@
465 479 .deferred_free = rcu_bh_torture_deferred_free,
466 480 .sync = rcu_bh_torture_synchronize,
467 481 .cb_barrier = rcu_barrier_bh,
  482 + .fqs = rcu_bh_force_quiescent_state,
468 483 .stats = NULL,
469 484 .irq_capable = 1,
470 485 .name = "rcu_bh"
... ... @@ -480,6 +495,7 @@
480 495 .deferred_free = rcu_sync_torture_deferred_free,
481 496 .sync = rcu_bh_torture_synchronize,
482 497 .cb_barrier = NULL,
  498 + .fqs = rcu_bh_force_quiescent_state,
483 499 .stats = NULL,
484 500 .irq_capable = 1,
485 501 .name = "rcu_bh_sync"
... ... @@ -621,6 +637,7 @@
621 637 .deferred_free = rcu_sched_torture_deferred_free,
622 638 .sync = sched_torture_synchronize,
623 639 .cb_barrier = rcu_barrier_sched,
  640 + .fqs = rcu_sched_force_quiescent_state,
624 641 .stats = NULL,
625 642 .irq_capable = 1,
626 643 .name = "sched"
... ... @@ -636,6 +653,7 @@
636 653 .deferred_free = rcu_sync_torture_deferred_free,
637 654 .sync = sched_torture_synchronize,
638 655 .cb_barrier = NULL,
  656 + .fqs = rcu_sched_force_quiescent_state,
639 657 .stats = NULL,
640 658 .name = "sched_sync"
641 659 };
642 660  
... ... @@ -650,12 +668,45 @@
650 668 .deferred_free = rcu_sync_torture_deferred_free,
651 669 .sync = synchronize_sched_expedited,
652 670 .cb_barrier = NULL,
  671 + .fqs = rcu_sched_force_quiescent_state,
653 672 .stats = rcu_expedited_torture_stats,
654 673 .irq_capable = 1,
655 674 .name = "sched_expedited"
656 675 };
657 676  
658 677 /*
  678 + * RCU torture force-quiescent-state kthread. Repeatedly induces
  679 + * bursts of calls to force_quiescent_state(), increasing the probability
  680 + * of occurrence of some important types of race conditions.
  681 + */
  682 +static int
  683 +rcu_torture_fqs(void *arg)
  684 +{
  685 + unsigned long fqs_resume_time;
  686 + int fqs_burst_remaining;
  687 +
  688 + VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
  689 + do {
  690 + fqs_resume_time = jiffies + fqs_stutter * HZ;
  691 + while (jiffies - fqs_resume_time > LONG_MAX) {
  692 + schedule_timeout_interruptible(1);
  693 + }
  694 + fqs_burst_remaining = fqs_duration;
  695 + while (fqs_burst_remaining > 0) {
  696 + cur_ops->fqs();
  697 + udelay(fqs_holdoff);
  698 + fqs_burst_remaining -= fqs_holdoff;
  699 + }
  700 + rcu_stutter_wait("rcu_torture_fqs");
  701 + } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
  702 + VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
  703 + rcutorture_shutdown_absorb("rcu_torture_fqs");
  704 + while (!kthread_should_stop())
  705 + schedule_timeout_uninterruptible(1);
  706 + return 0;
  707 +}
  708 +
  709 +/*
659 710 * RCU torture writer kthread. Repeatedly substitutes a new structure
660 711 * for that pointed to by rcu_torture_current, freeing the old structure
661 712 * after a series of grace periods (the "pipeline").
662 713  
... ... @@ -1030,10 +1081,11 @@
1030 1081 printk(KERN_ALERT "%s" TORTURE_FLAG
1031 1082 "--- %s: nreaders=%d nfakewriters=%d "
1032 1083 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1033   - "shuffle_interval=%d stutter=%d irqreader=%d\n",
  1084 + "shuffle_interval=%d stutter=%d irqreader=%d "
  1085 + "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n",
1034 1086 torture_type, tag, nrealreaders, nfakewriters,
1035 1087 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1036   - stutter, irqreader);
  1088 + stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter);
1037 1089 }
1038 1090  
1039 1091 static struct notifier_block rcutorture_nb = {
... ... @@ -1109,6 +1161,12 @@
1109 1161 }
1110 1162 stats_task = NULL;
1111 1163  
  1164 + if (fqs_task) {
  1165 + VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
  1166 + kthread_stop(fqs_task);
  1167 + }
  1168 + fqs_task = NULL;
  1169 +
1112 1170 /* Wait for all RCU callbacks to fire. */
1113 1171  
1114 1172 if (cur_ops->cb_barrier != NULL)
... ... @@ -1154,6 +1212,11 @@
1154 1212 mutex_unlock(&fullstop_mutex);
1155 1213 return -EINVAL;
1156 1214 }
  1215 + if (cur_ops->fqs == NULL && fqs_duration != 0) {
  1216 + printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
  1217 + "fqs_duration, fqs disabled.\n");
  1218 + fqs_duration = 0;
  1219 + }
1157 1220 if (cur_ops->init)
1158 1221 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1159 1222  
... ... @@ -1279,6 +1342,19 @@
1279 1342 firsterr = PTR_ERR(stutter_task);
1280 1343 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1281 1344 stutter_task = NULL;
  1345 + goto unwind;
  1346 + }
  1347 + }
  1348 + if (fqs_duration < 0)
  1349 + fqs_duration = 0;
  1350 + if (fqs_duration) {
  1351 + /* Create the stutter thread */
  1352 + fqs_task = kthread_run(rcu_torture_fqs, NULL,
  1353 + "rcu_torture_fqs");
  1354 + if (IS_ERR(fqs_task)) {
  1355 + firsterr = PTR_ERR(fqs_task);
  1356 + VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
  1357 + fqs_task = NULL;
1282 1358 goto unwind;
1283 1359 }
1284 1360 }
... ... @@ -157,6 +157,24 @@
157 157 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
158 158  
159 159 /*
  160 + * Force a quiescent state for RCU BH.
  161 + */
  162 +void rcu_bh_force_quiescent_state(void)
  163 +{
  164 + force_quiescent_state(&rcu_bh_state, 0);
  165 +}
  166 +EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  167 +
  168 +/*
  169 + * Force a quiescent state for RCU-sched.
  170 + */
  171 +void rcu_sched_force_quiescent_state(void)
  172 +{
  173 + force_quiescent_state(&rcu_sched_state, 0);
  174 +}
  175 +EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  176 +
  177 +/*
160 178 * Does the CPU have callbacks ready to be invoked?
161 179 */
162 180 static int
kernel/rcutree_plugin.h
... ... @@ -62,6 +62,15 @@
62 62 EXPORT_SYMBOL_GPL(rcu_batches_completed);
63 63  
64 64 /*
  65 + * Force a quiescent state for preemptible RCU.
  66 + */
  67 +void rcu_force_quiescent_state(void)
  68 +{
  69 + force_quiescent_state(&rcu_preempt_state, 0);
  70 +}
  71 +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  72 +
  73 +/*
65 74 * Record a preemptable-RCU quiescent state for the specified CPU. Note
66 75 * that this just means that the task currently running on the CPU is
67 76 * not in a quiescent state. There might be any number of tasks blocked
... ... @@ -711,6 +720,16 @@
711 720 return rcu_batches_completed_sched();
712 721 }
713 722 EXPORT_SYMBOL_GPL(rcu_batches_completed);
  723 +
  724 +/*
  725 + * Force a quiescent state for RCU, which, because there is no preemptible
  726 + * RCU, becomes the same as rcu-sched.
  727 + */
  728 +void rcu_force_quiescent_state(void)
  729 +{
  730 + rcu_sched_force_quiescent_state();
  731 +}
  732 +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
714 733  
715 734 /*
716 735 * Because preemptable RCU does not exist, we never have to check for