Commit 68e74568fbe5854952355e942acca51f138096d9
Committed by
Ingo Molnar
1 parent
4212823fb4
Exists in
master
and in
39 other branches
sched: convert struct cpupri_vec cpumask_var_t.
Impact: stack usage reduction, (future) size reduction for large NR_CPUS. Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space for small nr_cpu_ids but big CONFIG_NR_CPUS. The fact cpupro_init is called both before and after the slab is available makes for an ugly parameter unfortunately. We also use cpumask_any_and to get rid of a temporary in cpupri_find. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 38 additions and 15 deletions Side-by-side Diff
kernel/sched.c
... | ... | @@ -6792,6 +6792,8 @@ |
6792 | 6792 | |
6793 | 6793 | static void free_rootdomain(struct root_domain *rd) |
6794 | 6794 | { |
6795 | + cpupri_cleanup(&rd->cpupri); | |
6796 | + | |
6795 | 6797 | free_cpumask_var(rd->rto_mask); |
6796 | 6798 | free_cpumask_var(rd->online); |
6797 | 6799 | free_cpumask_var(rd->span); |
... | ... | @@ -6834,7 +6836,7 @@ |
6834 | 6836 | alloc_bootmem_cpumask_var(&def_root_domain.span); |
6835 | 6837 | alloc_bootmem_cpumask_var(&def_root_domain.online); |
6836 | 6838 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); |
6837 | - cpupri_init(&rd->cpupri); | |
6839 | + cpupri_init(&rd->cpupri, true); | |
6838 | 6840 | return 0; |
6839 | 6841 | } |
6840 | 6842 | |
6841 | 6843 | |
... | ... | @@ -6845,9 +6847,12 @@ |
6845 | 6847 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
6846 | 6848 | goto free_online; |
6847 | 6849 | |
6848 | - cpupri_init(&rd->cpupri); | |
6850 | + if (cpupri_init(&rd->cpupri, false) != 0) | |
6851 | + goto free_rto_mask; | |
6849 | 6852 | return 0; |
6850 | 6853 | |
6854 | +free_rto_mask: | |
6855 | + free_cpumask_var(rd->rto_mask); | |
6851 | 6856 | free_online: |
6852 | 6857 | free_cpumask_var(rd->online); |
6853 | 6858 | free_span: |
kernel/sched_cpupri.c
... | ... | @@ -67,24 +67,21 @@ |
67 | 67 | * Returns: (int)bool - CPUs were found |
68 | 68 | */ |
69 | 69 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
70 | - cpumask_t *lowest_mask) | |
70 | + struct cpumask *lowest_mask) | |
71 | 71 | { |
72 | 72 | int idx = 0; |
73 | 73 | int task_pri = convert_prio(p->prio); |
74 | 74 | |
75 | 75 | for_each_cpupri_active(cp->pri_active, idx) { |
76 | 76 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; |
77 | - cpumask_t mask; | |
78 | 77 | |
79 | 78 | if (idx >= task_pri) |
80 | 79 | break; |
81 | 80 | |
82 | - cpus_and(mask, p->cpus_allowed, vec->mask); | |
83 | - | |
84 | - if (cpus_empty(mask)) | |
81 | + if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) | |
85 | 82 | continue; |
86 | 83 | |
87 | - *lowest_mask = mask; | |
84 | + cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | |
88 | 85 | return 1; |
89 | 86 | } |
90 | 87 | |
... | ... | @@ -126,7 +123,7 @@ |
126 | 123 | vec->count--; |
127 | 124 | if (!vec->count) |
128 | 125 | clear_bit(oldpri, cp->pri_active); |
129 | - cpu_clear(cpu, vec->mask); | |
126 | + cpumask_clear_cpu(cpu, vec->mask); | |
130 | 127 | |
131 | 128 | spin_unlock_irqrestore(&vec->lock, flags); |
132 | 129 | } |
... | ... | @@ -136,7 +133,7 @@ |
136 | 133 | |
137 | 134 | spin_lock_irqsave(&vec->lock, flags); |
138 | 135 | |
139 | - cpu_set(cpu, vec->mask); | |
136 | + cpumask_set_cpu(cpu, vec->mask); | |
140 | 137 | vec->count++; |
141 | 138 | if (vec->count == 1) |
142 | 139 | set_bit(newpri, cp->pri_active); |
143 | 140 | |
144 | 141 | |
... | ... | @@ -150,10 +147,11 @@ |
150 | 147 | /** |
151 | 148 | * cpupri_init - initialize the cpupri structure |
152 | 149 | * @cp: The cpupri context |
150 | + * @bootmem: true if allocations need to use bootmem | |
153 | 151 | * |
154 | - * Returns: (void) | |
152 | + * Returns: -ENOMEM if memory fails. | |
155 | 153 | */ |
156 | -void cpupri_init(struct cpupri *cp) | |
154 | +int cpupri_init(struct cpupri *cp, bool bootmem) | |
157 | 155 | { |
158 | 156 | int i; |
159 | 157 | |
160 | 158 | |
... | ... | @@ -164,10 +162,31 @@ |
164 | 162 | |
165 | 163 | spin_lock_init(&vec->lock); |
166 | 164 | vec->count = 0; |
167 | - cpus_clear(vec->mask); | |
165 | + if (bootmem) | |
166 | + alloc_bootmem_cpumask_var(&vec->mask); | |
167 | + else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) | |
168 | + goto cleanup; | |
168 | 169 | } |
169 | 170 | |
170 | 171 | for_each_possible_cpu(i) |
171 | 172 | cp->cpu_to_pri[i] = CPUPRI_INVALID; |
173 | + return 0; | |
174 | + | |
175 | +cleanup: | |
176 | + for (i--; i >= 0; i--) | |
177 | + free_cpumask_var(cp->pri_to_cpu[i].mask); | |
178 | + return -ENOMEM; | |
179 | +} | |
180 | + | |
181 | +/** | |
182 | + * cpupri_cleanup - clean up the cpupri structure | |
183 | + * @cp: The cpupri context | |
184 | + */ | |
185 | +void cpupri_cleanup(struct cpupri *cp) | |
186 | +{ | |
187 | + int i; | |
188 | + | |
189 | + for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) | |
190 | + free_cpumask_var(cp->pri_to_cpu[i].mask); | |
172 | 191 | } |
kernel/sched_cpupri.h
... | ... | @@ -14,7 +14,7 @@ |
14 | 14 | struct cpupri_vec { |
15 | 15 | spinlock_t lock; |
16 | 16 | int count; |
17 | - cpumask_t mask; | |
17 | + cpumask_var_t mask; | |
18 | 18 | }; |
19 | 19 | |
20 | 20 | struct cpupri { |
... | ... | @@ -27,7 +27,8 @@ |
27 | 27 | int cpupri_find(struct cpupri *cp, |
28 | 28 | struct task_struct *p, cpumask_t *lowest_mask); |
29 | 29 | void cpupri_set(struct cpupri *cp, int cpu, int pri); |
30 | -void cpupri_init(struct cpupri *cp); | |
30 | +int cpupri_init(struct cpupri *cp, bool bootmem); | |
31 | +void cpupri_cleanup(struct cpupri *cp); | |
31 | 32 | #else |
32 | 33 | #define cpupri_set(cp, cpu, pri) do { } while (0) |
33 | 34 | #define cpupri_init() do { } while (0) |