Commit 73d0a4b107d58908305f272bfae9bd17f74a2c81
1 parent
aa85ea5b89
Exists in
master
and in
20 other branches
cpumask: convert rcutorture.c
We're getting rid of cpumasks on the stack. Simply change tmp_mask to a global, and allocate it in rcu_torture_init(). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Josh Triplett <josh@freedesktop.org>
Showing 1 changed file with 17 additions and 8 deletions Side-by-side Diff
kernel/rcutorture.c
... | ... | @@ -126,6 +126,7 @@ |
126 | 126 | static atomic_t n_rcu_torture_error; |
127 | 127 | static long n_rcu_torture_timers = 0; |
128 | 128 | static struct list_head rcu_torture_removed; |
129 | +static cpumask_var_t shuffle_tmp_mask; | |
129 | 130 | |
130 | 131 | static int stutter_pause_test = 0; |
131 | 132 | |
132 | 133 | |
... | ... | @@ -889,10 +890,9 @@ |
889 | 890 | */ |
890 | 891 | static void rcu_torture_shuffle_tasks(void) |
891 | 892 | { |
892 | - cpumask_t tmp_mask; | |
893 | 893 | int i; |
894 | 894 | |
895 | - cpus_setall(tmp_mask); | |
895 | + cpumask_setall(shuffle_tmp_mask); | |
896 | 896 | get_online_cpus(); |
897 | 897 | |
898 | 898 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
899 | 899 | |
900 | 900 | |
901 | 901 | |
902 | 902 | |
903 | 903 | |
... | ... | @@ -902,29 +902,29 @@ |
902 | 902 | } |
903 | 903 | |
904 | 904 | if (rcu_idle_cpu != -1) |
905 | - cpu_clear(rcu_idle_cpu, tmp_mask); | |
905 | + cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask); | |
906 | 906 | |
907 | - set_cpus_allowed_ptr(current, &tmp_mask); | |
907 | + set_cpus_allowed_ptr(current, shuffle_tmp_mask); | |
908 | 908 | |
909 | 909 | if (reader_tasks) { |
910 | 910 | for (i = 0; i < nrealreaders; i++) |
911 | 911 | if (reader_tasks[i]) |
912 | 912 | set_cpus_allowed_ptr(reader_tasks[i], |
913 | - &tmp_mask); | |
913 | + shuffle_tmp_mask); | |
914 | 914 | } |
915 | 915 | |
916 | 916 | if (fakewriter_tasks) { |
917 | 917 | for (i = 0; i < nfakewriters; i++) |
918 | 918 | if (fakewriter_tasks[i]) |
919 | 919 | set_cpus_allowed_ptr(fakewriter_tasks[i], |
920 | - &tmp_mask); | |
920 | + shuffle_tmp_mask); | |
921 | 921 | } |
922 | 922 | |
923 | 923 | if (writer_task) |
924 | - set_cpus_allowed_ptr(writer_task, &tmp_mask); | |
924 | + set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); | |
925 | 925 | |
926 | 926 | if (stats_task) |
927 | - set_cpus_allowed_ptr(stats_task, &tmp_mask); | |
927 | + set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); | |
928 | 928 | |
929 | 929 | if (rcu_idle_cpu == -1) |
930 | 930 | rcu_idle_cpu = num_online_cpus() - 1; |
... | ... | @@ -1012,6 +1012,7 @@ |
1012 | 1012 | if (shuffler_task) { |
1013 | 1013 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); |
1014 | 1014 | kthread_stop(shuffler_task); |
1015 | + free_cpumask_var(shuffle_tmp_mask); | |
1015 | 1016 | } |
1016 | 1017 | shuffler_task = NULL; |
1017 | 1018 | |
1018 | 1019 | |
... | ... | @@ -1190,10 +1191,18 @@ |
1190 | 1191 | } |
1191 | 1192 | if (test_no_idle_hz) { |
1192 | 1193 | rcu_idle_cpu = num_online_cpus() - 1; |
1194 | + | |
1195 | + if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { | |
1196 | + firsterr = -ENOMEM; | |
1197 | + VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask"); | |
1198 | + goto unwind; | |
1199 | + } | |
1200 | + | |
1193 | 1201 | /* Create the shuffler thread */ |
1194 | 1202 | shuffler_task = kthread_run(rcu_torture_shuffle, NULL, |
1195 | 1203 | "rcu_torture_shuffle"); |
1196 | 1204 | if (IS_ERR(shuffler_task)) { |
1205 | + free_cpumask_var(shuffle_tmp_mask); | |
1197 | 1206 | firsterr = PTR_ERR(shuffler_task); |
1198 | 1207 | VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); |
1199 | 1208 | shuffler_task = NULL; |