Commit 32439700fe1c0fc3c2d3f2aedd3ad6707c88b8ba

Authored by Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Various fixlets, mostly related to the (root-only) SCHED_DEADLINE
  policy, but also a hotplug bug fix and a fix for a NR_CPUS related
  overallocation bug causing a suspend/resume regression"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Fix hotplug vs. set_cpus_allowed_ptr()
  sched/cpupri: Replace NR_CPUS arrays
  sched/deadline: Replace NR_CPUS arrays
  sched/deadline: Restrict user params max value to 2^63 ns
  sched/deadline: Change sched_getparam() behaviour vs SCHED_DEADLINE
  sched: Disallow sched_attr::sched_policy < 0
  sched: Make sched_setattr() correctly return -EFBIG

Showing 6 changed files Side-by-side Diff

... ... @@ -726,10 +726,12 @@
726 726  
727 727 void set_cpu_online(unsigned int cpu, bool online)
728 728 {
729   - if (online)
  729 + if (online) {
730 730 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
731   - else
  731 + cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
  732 + } else {
732 733 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
  734 + }
733 735 }
734 736  
735 737 void set_cpu_active(unsigned int cpu, bool active)
... ... @@ -3195,17 +3195,40 @@
3195 3195 * We ask for the deadline not being zero, and greater or equal
3196 3196 * than the runtime, as well as the period of being zero or
3197 3197 * greater than deadline. Furthermore, we have to be sure that
3198   - * user parameters are above the internal resolution (1us); we
3199   - * check sched_runtime only since it is always the smaller one.
  3198 + * user parameters are above the internal resolution of 1us (we
  3199 + * check sched_runtime only since it is always the smaller one) and
  3200 + * below 2^63 ns (we have to check both sched_deadline and
  3201 + * sched_period, as the latter can be zero).
3200 3202 */
3201 3203 static bool
3202 3204 __checkparam_dl(const struct sched_attr *attr)
3203 3205 {
3204   - return attr && attr->sched_deadline != 0 &&
3205   - (attr->sched_period == 0 ||
3206   - (s64)(attr->sched_period - attr->sched_deadline) >= 0) &&
3207   - (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 &&
3208   - attr->sched_runtime >= (2 << (DL_SCALE - 1));
  3206 + /* deadline != 0 */
  3207 + if (attr->sched_deadline == 0)
  3208 + return false;
  3209 +
  3210 + /*
  3211 + * Since we truncate DL_SCALE bits, make sure we're at least
  3212 + * that big.
  3213 + */
  3214 + if (attr->sched_runtime < (1ULL << DL_SCALE))
  3215 + return false;
  3216 +
  3217 + /*
  3218 + * Since we use the MSB for wrap-around and sign issues, make
  3219 + * sure it's not set (mind that period can be equal to zero).
  3220 + */
  3221 + if (attr->sched_deadline & (1ULL << 63) ||
  3222 + attr->sched_period & (1ULL << 63))
  3223 + return false;
  3224 +
  3225 + /* runtime <= deadline <= period (if period != 0) */
  3226 + if ((attr->sched_period != 0 &&
  3227 + attr->sched_period < attr->sched_deadline) ||
  3228 + attr->sched_deadline < attr->sched_runtime)
  3229 + return false;
  3230 +
  3231 + return true;
3209 3232 }
3210 3233  
3211 3234 /*
3212 3235  
... ... @@ -3658,9 +3681,13 @@
3658 3681 if (!uattr || pid < 0 || flags)
3659 3682 return -EINVAL;
3660 3683  
3661   - if (sched_copy_attr(uattr, &attr))
3662   - return -EFAULT;
  3684 + retval = sched_copy_attr(uattr, &attr);
  3685 + if (retval)
  3686 + return retval;
3663 3687  
  3688 + if (attr.sched_policy < 0)
  3689 + return -EINVAL;
  3690 +
3664 3691 rcu_read_lock();
3665 3692 retval = -ESRCH;
3666 3693 p = find_process_by_pid(pid);
... ... @@ -3709,7 +3736,7 @@
3709 3736 */
3710 3737 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3711 3738 {
3712   - struct sched_param lp;
  3739 + struct sched_param lp = { .sched_priority = 0 };
3713 3740 struct task_struct *p;
3714 3741 int retval;
3715 3742  
... ... @@ -3726,11 +3753,8 @@
3726 3753 if (retval)
3727 3754 goto out_unlock;
3728 3755  
3729   - if (task_has_dl_policy(p)) {
3730   - retval = -EINVAL;
3731   - goto out_unlock;
3732   - }
3733   - lp.sched_priority = p->rt_priority;
  3756 + if (task_has_rt_policy(p))
  3757 + lp.sched_priority = p->rt_priority;
3734 3758 rcu_read_unlock();
3735 3759  
3736 3760 /*
... ... @@ -5052,7 +5076,6 @@
5052 5076 unsigned long action, void *hcpu)
5053 5077 {
5054 5078 switch (action & ~CPU_TASKS_FROZEN) {
5055   - case CPU_STARTING:
5056 5079 case CPU_DOWN_FAILED:
5057 5080 set_cpu_active((long)hcpu, true);
5058 5081 return NOTIFY_OK;
kernel/sched/cpudeadline.c
... ... @@ -13,6 +13,7 @@
13 13  
14 14 #include <linux/gfp.h>
15 15 #include <linux/kernel.h>
  16 +#include <linux/slab.h>
16 17 #include "cpudeadline.h"
17 18  
18 19 static inline int parent(int i)
... ... @@ -39,8 +40,10 @@
39 40 {
40 41 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
41 42  
42   - swap(cp->elements[a], cp->elements[b]);
43   - swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]);
  43 + swap(cp->elements[a].cpu, cp->elements[b].cpu);
  44 + swap(cp->elements[a].dl , cp->elements[b].dl );
  45 +
  46 + swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx);
44 47 }
45 48  
46 49 static void cpudl_heapify(struct cpudl *cp, int idx)
... ... @@ -140,7 +143,7 @@
140 143 WARN_ON(!cpu_present(cpu));
141 144  
142 145 raw_spin_lock_irqsave(&cp->lock, flags);
143   - old_idx = cp->cpu_to_idx[cpu];
  146 + old_idx = cp->elements[cpu].idx;
144 147 if (!is_valid) {
145 148 /* remove item */
146 149 if (old_idx == IDX_INVALID) {
... ... @@ -155,8 +158,8 @@
155 158 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
156 159 cp->elements[old_idx].cpu = new_cpu;
157 160 cp->size--;
158   - cp->cpu_to_idx[new_cpu] = old_idx;
159   - cp->cpu_to_idx[cpu] = IDX_INVALID;
  161 + cp->elements[new_cpu].idx = old_idx;
  162 + cp->elements[cpu].idx = IDX_INVALID;
160 163 while (old_idx > 0 && dl_time_before(
161 164 cp->elements[parent(old_idx)].dl,
162 165 cp->elements[old_idx].dl)) {
... ... @@ -173,7 +176,7 @@
173 176 cp->size++;
174 177 cp->elements[cp->size - 1].dl = 0;
175 178 cp->elements[cp->size - 1].cpu = cpu;
176   - cp->cpu_to_idx[cpu] = cp->size - 1;
  179 + cp->elements[cpu].idx = cp->size - 1;
177 180 cpudl_change_key(cp, cp->size - 1, dl);
178 181 cpumask_clear_cpu(cpu, cp->free_cpus);
179 182 } else {
180 183  
... ... @@ -195,10 +198,21 @@
195 198 memset(cp, 0, sizeof(*cp));
196 199 raw_spin_lock_init(&cp->lock);
197 200 cp->size = 0;
198   - for (i = 0; i < NR_CPUS; i++)
199   - cp->cpu_to_idx[i] = IDX_INVALID;
200   - if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL))
  201 +
  202 + cp->elements = kcalloc(nr_cpu_ids,
  203 + sizeof(struct cpudl_item),
  204 + GFP_KERNEL);
  205 + if (!cp->elements)
201 206 return -ENOMEM;
  207 +
  208 + if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
  209 + kfree(cp->elements);
  210 + return -ENOMEM;
  211 + }
  212 +
  213 + for_each_possible_cpu(i)
  214 + cp->elements[i].idx = IDX_INVALID;
  215 +
202 216 cpumask_setall(cp->free_cpus);
203 217  
204 218 return 0;
... ... @@ -211,5 +225,6 @@
211 225 void cpudl_cleanup(struct cpudl *cp)
212 226 {
213 227 free_cpumask_var(cp->free_cpus);
  228 + kfree(cp->elements);
214 229 }
kernel/sched/cpudeadline.h
... ... @@ -5,17 +5,17 @@
5 5  
6 6 #define IDX_INVALID -1
7 7  
8   -struct array_item {
  8 +struct cpudl_item {
9 9 u64 dl;
10 10 int cpu;
  11 + int idx;
11 12 };
12 13  
13 14 struct cpudl {
14 15 raw_spinlock_t lock;
15 16 int size;
16   - int cpu_to_idx[NR_CPUS];
17   - struct array_item elements[NR_CPUS];
18 17 cpumask_var_t free_cpus;
  18 + struct cpudl_item *elements;
19 19 };
20 20  
21 21  
kernel/sched/cpupri.c
... ... @@ -30,6 +30,7 @@
30 30 #include <linux/gfp.h>
31 31 #include <linux/sched.h>
32 32 #include <linux/sched/rt.h>
  33 +#include <linux/slab.h>
33 34 #include "cpupri.h"
34 35  
35 36 /* Convert between a 140 based task->prio, and our 102 based cpupri */
36 37  
... ... @@ -218,8 +219,13 @@
218 219 goto cleanup;
219 220 }
220 221  
  222 + cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
  223 + if (!cp->cpu_to_pri)
  224 + goto cleanup;
  225 +
221 226 for_each_possible_cpu(i)
222 227 cp->cpu_to_pri[i] = CPUPRI_INVALID;
  228 +
223 229 return 0;
224 230  
225 231 cleanup:
... ... @@ -236,6 +242,7 @@
236 242 {
237 243 int i;
238 244  
  245 + kfree(cp->cpu_to_pri);
239 246 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
240 247 free_cpumask_var(cp->pri_to_cpu[i].mask);
241 248 }
kernel/sched/cpupri.h
... ... @@ -17,7 +17,7 @@
17 17  
18 18 struct cpupri {
19 19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
20   - int cpu_to_pri[NR_CPUS];
  20 + int *cpu_to_pri;
21 21 };
22 22  
23 23 #ifdef CONFIG_SMP