Commit c1ad41f1f7270c1956da13fa8fd59d8d5929d56e

Authored by Ingo Molnar
1 parent 38130ec087

Revert "sched/autogroup: Fix crash on reboot when autogroup is disabled"

This reverts commit 5258f386ea4e8454bc801fb443e8a4217da1947c,
because the underlying autogroups bug got fixed upstream in
a better way, via:

  fd8ef11730f1 Revert "sched, autogroup: Stop going ahead if autogroup is disabled"

Cc: Mike Galbraith <efault@gmx.de>
Cc: Yong Zhang <yong.zhang0@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 4 changed files with 147 additions and 14 deletions Side-by-side Diff

... ... @@ -1272,6 +1272,81 @@
1272 1272  
1273 1273 #endif
1274 1274  
  1275 +#ifdef CONFIG_SCHED_AUTOGROUP
  1276 +/*
  1277 + * Print out autogroup related information:
  1278 + */
  1279 +static int sched_autogroup_show(struct seq_file *m, void *v)
  1280 +{
  1281 + struct inode *inode = m->private;
  1282 + struct task_struct *p;
  1283 +
  1284 + p = get_proc_task(inode);
  1285 + if (!p)
  1286 + return -ESRCH;
  1287 + proc_sched_autogroup_show_task(p, m);
  1288 +
  1289 + put_task_struct(p);
  1290 +
  1291 + return 0;
  1292 +}
  1293 +
  1294 +static ssize_t
  1295 +sched_autogroup_write(struct file *file, const char __user *buf,
  1296 + size_t count, loff_t *offset)
  1297 +{
  1298 + struct inode *inode = file->f_path.dentry->d_inode;
  1299 + struct task_struct *p;
  1300 + char buffer[PROC_NUMBUF];
  1301 + int nice;
  1302 + int err;
  1303 +
  1304 + memset(buffer, 0, sizeof(buffer));
  1305 + if (count > sizeof(buffer) - 1)
  1306 + count = sizeof(buffer) - 1;
  1307 + if (copy_from_user(buffer, buf, count))
  1308 + return -EFAULT;
  1309 +
  1310 + err = kstrtoint(strstrip(buffer), 0, &nice);
  1311 + if (err < 0)
  1312 + return err;
  1313 +
  1314 + p = get_proc_task(inode);
  1315 + if (!p)
  1316 + return -ESRCH;
  1317 +
  1318 + err = proc_sched_autogroup_set_nice(p, nice);
  1319 + if (err)
  1320 + count = err;
  1321 +
  1322 + put_task_struct(p);
  1323 +
  1324 + return count;
  1325 +}
  1326 +
  1327 +static int sched_autogroup_open(struct inode *inode, struct file *filp)
  1328 +{
  1329 + int ret;
  1330 +
  1331 + ret = single_open(filp, sched_autogroup_show, NULL);
  1332 + if (!ret) {
  1333 + struct seq_file *m = filp->private_data;
  1334 +
  1335 + m->private = inode;
  1336 + }
  1337 + return ret;
  1338 +}
  1339 +
  1340 +static const struct file_operations proc_pid_sched_autogroup_operations = {
  1341 + .open = sched_autogroup_open,
  1342 + .read = seq_read,
  1343 + .write = sched_autogroup_write,
  1344 + .llseek = seq_lseek,
  1345 + .release = single_release,
  1346 +};
  1347 +
  1348 +#endif /* CONFIG_SCHED_AUTOGROUP */
  1349 +
1275 1350 static ssize_t comm_write(struct file *file, const char __user *buf,
1276 1351 size_t count, loff_t *offset)
1277 1352 {
... ... @@ -2582,6 +2657,9 @@
2582 2657 INF("limits", S_IRUGO, proc_pid_limits),
2583 2658 #ifdef CONFIG_SCHED_DEBUG
2584 2659 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
  2660 +#endif
  2661 +#ifdef CONFIG_SCHED_AUTOGROUP
  2662 + REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
2585 2663 #endif
2586 2664 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
2587 2665 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
kernel/sched/auto_group.c
... ... @@ -110,9 +110,6 @@
110 110  
111 111 bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
112 112 {
113   - if (!sysctl_sched_autogroup_enabled)
114   - return false;
115   -
116 113 if (tg != &root_task_group)
117 114 return false;
118 115  
119 116  
... ... @@ -146,11 +143,15 @@
146 143  
147 144 p->signal->autogroup = autogroup_kref_get(ag);
148 145  
  146 + if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
  147 + goto out;
  148 +
149 149 t = p;
150 150 do {
151 151 sched_move_task(t);
152 152 } while_each_thread(p, t);
153 153  
  154 +out:
154 155 unlock_task_sighand(p, &flags);
155 156 autogroup_kref_put(prev);
156 157 }
157 158  
... ... @@ -158,11 +159,8 @@
158 159 /* Allocates GFP_KERNEL, cannot be called under any spinlock */
159 160 void sched_autogroup_create_attach(struct task_struct *p)
160 161 {
161   - struct autogroup *ag;
  162 + struct autogroup *ag = autogroup_create();
162 163  
163   - if (!sysctl_sched_autogroup_enabled)
164   - return;
165   - ag = autogroup_create();
166 164 autogroup_move_group(p, ag);
167 165 /* drop extra reference added by autogroup_create() */
168 166 autogroup_kref_put(ag);
169 167  
... ... @@ -178,15 +176,11 @@
178 176  
179 177 void sched_autogroup_fork(struct signal_struct *sig)
180 178 {
181   - if (!sysctl_sched_autogroup_enabled)
182   - return;
183 179 sig->autogroup = autogroup_task_get(current);
184 180 }
185 181  
186 182 void sched_autogroup_exit(struct signal_struct *sig)
187 183 {
188   - if (!sysctl_sched_autogroup_enabled)
189   - return;
190 184 autogroup_kref_put(sig->autogroup);
191 185 }
192 186  
... ... @@ -198,6 +192,58 @@
198 192 }
199 193  
200 194 __setup("noautogroup", setup_autogroup);
  195 +
  196 +#ifdef CONFIG_PROC_FS
  197 +
  198 +int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
  199 +{
  200 + static unsigned long next = INITIAL_JIFFIES;
  201 + struct autogroup *ag;
  202 + int err;
  203 +
  204 + if (nice < -20 || nice > 19)
  205 + return -EINVAL;
  206 +
  207 + err = security_task_setnice(current, nice);
  208 + if (err)
  209 + return err;
  210 +
  211 + if (nice < 0 && !can_nice(current, nice))
  212 + return -EPERM;
  213 +
  214 + /* this is a heavy operation taking global locks.. */
  215 + if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
  216 + return -EAGAIN;
  217 +
  218 + next = HZ / 10 + jiffies;
  219 + ag = autogroup_task_get(p);
  220 +
  221 + down_write(&ag->lock);
  222 + err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]);
  223 + if (!err)
  224 + ag->nice = nice;
  225 + up_write(&ag->lock);
  226 +
  227 + autogroup_kref_put(ag);
  228 +
  229 + return err;
  230 +}
  231 +
  232 +void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
  233 +{
  234 + struct autogroup *ag = autogroup_task_get(p);
  235 +
  236 + if (!task_group_is_autogroup(ag->tg))
  237 + goto out;
  238 +
  239 + down_read(&ag->lock);
  240 + seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
  241 + up_read(&ag->lock);
  242 +
  243 +out:
  244 + autogroup_kref_put(ag);
  245 +}
  246 +#endif /* CONFIG_PROC_FS */
201 247  
202 248 #ifdef CONFIG_SCHED_DEBUG
203 249 int autogroup_path(struct task_group *tg, char *buf, int buflen)
kernel/sched/auto_group.h
... ... @@ -4,6 +4,11 @@
4 4 #include <linux/rwsem.h>
5 5  
6 6 struct autogroup {
  7 + /*
  8 + * reference doesn't mean how many thread attach to this
  9 + * autogroup now. It just stands for the number of task
  10 + * could use this autogroup.
  11 + */
7 12 struct kref kref;
8 13 struct task_group *tg;
9 14 struct rw_semaphore lock;
... ... @@ -24,7 +29,9 @@
24 29 static inline struct task_group *
25 30 autogroup_task_group(struct task_struct *p, struct task_group *tg)
26 31 {
27   - if (task_wants_autogroup(p, tg))
  32 + int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
  33 +
  34 + if (enabled && task_wants_autogroup(p, tg))
28 35 return p->signal->autogroup->tg;
29 36  
30 37 return tg;
... ... @@ -367,8 +367,10 @@
367 367 .procname = "sched_autogroup_enabled",
368 368 .data = &sysctl_sched_autogroup_enabled,
369 369 .maxlen = sizeof(unsigned int),
370   - .mode = 0444,
371   - .proc_handler = proc_dointvec,
  370 + .mode = 0644,
  371 + .proc_handler = proc_dointvec_minmax,
  372 + .extra1 = &zero,
  373 + .extra2 = &one,
372 374 },
373 375 #endif
374 376 #ifdef CONFIG_CFS_BANDWIDTH