Blame view
kernel/padata.c
27.1 KB
08b21fbf4 padata: add SPDX ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
16295bec6 padata: Generic p... |
2 3 4 |
/* * padata.c - generic interface to process data streams in parallel * |
107f8bdac padata: Add a ref... |
5 6 |
* See Documentation/padata.txt for an api documentation. * |
16295bec6 padata: Generic p... |
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
* Copyright (C) 2008, 2009 secunet Security Networks AG * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ |
9984de1a5 kernel: Map most ... |
23 |
#include <linux/export.h> |
16295bec6 padata: Generic p... |
24 25 26 27 28 29 |
#include <linux/cpumask.h> #include <linux/err.h> #include <linux/cpu.h> #include <linux/padata.h> #include <linux/mutex.h> #include <linux/sched.h> |
5a0e3ad6a include cleanup: ... |
30 |
#include <linux/slab.h> |
5e017dc3f padata: Added sys... |
31 |
#include <linux/sysfs.h> |
16295bec6 padata: Generic p... |
32 |
#include <linux/rcupdate.h> |
30e92153b padata: Convert t... |
33 |
#include <linux/module.h> |
16295bec6 padata: Generic p... |
34 |
|
97e3d94aa padata: Dont scal... |
35 |
#define MAX_OBJ_NUM 1000 |
16295bec6 padata: Generic p... |
36 37 38 39 |
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) { int cpu, target_cpu; |
e15bacbeb padata: Make two ... |
40 |
target_cpu = cpumask_first(pd->cpumask.pcpu); |
16295bec6 padata: Generic p... |
41 |
for (cpu = 0; cpu < cpu_index; cpu++) |
e15bacbeb padata: Make two ... |
42 |
target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
16295bec6 padata: Generic p... |
43 44 45 |
return target_cpu; } |
2dc9b5dbd padata: Fix race ... |
46 |
static int padata_cpu_hash(struct parallel_data *pd) |
16295bec6 padata: Generic p... |
47 |
{ |
0b6b098ef padata: make the ... |
48 |
unsigned int seq_nr; |
16295bec6 padata: Generic p... |
49 |
int cpu_index; |
16295bec6 padata: Generic p... |
50 51 52 53 54 |
/* * Hash the sequence numbers to the cpus by taking * seq_nr mod. number of cpus in use. */ |
2dc9b5dbd padata: Fix race ... |
55 |
|
0b6b098ef padata: make the ... |
56 57 |
seq_nr = atomic_inc_return(&pd->seq_nr); cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); |
16295bec6 padata: Generic p... |
58 59 60 |
return padata_index_to_cpu(pd, cpu_index); } |
e15bacbeb padata: Make two ... |
61 |
static void padata_parallel_worker(struct work_struct *parallel_work) |
16295bec6 padata: Generic p... |
62 |
{ |
e15bacbeb padata: Make two ... |
63 |
struct padata_parallel_queue *pqueue; |
16295bec6 padata: Generic p... |
64 65 66 |
LIST_HEAD(local_list); local_bh_disable(); |
e15bacbeb padata: Make two ... |
67 68 |
pqueue = container_of(parallel_work, struct padata_parallel_queue, work); |
16295bec6 padata: Generic p... |
69 |
|
e15bacbeb padata: Make two ... |
70 71 72 |
spin_lock(&pqueue->parallel.lock); list_replace_init(&pqueue->parallel.list, &local_list); spin_unlock(&pqueue->parallel.lock); |
16295bec6 padata: Generic p... |
73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
while (!list_empty(&local_list)) { struct padata_priv *padata; padata = list_entry(local_list.next, struct padata_priv, list); list_del_init(&padata->list); padata->parallel(padata); } local_bh_enable(); } |
0198ffd13 padata: Add some ... |
87 |
/** |
16295bec6 padata: Generic p... |
88 89 90 91 92 |
* padata_do_parallel - padata parallelization function * * @pinst: padata instance * @padata: object to be parallelized * @cb_cpu: cpu the serialization callback function will run on, |
e15bacbeb padata: Make two ... |
93 |
* must be in the serial cpumask of padata(i.e. cpumask.cbcpu). |
16295bec6 padata: Generic p... |
94 95 96 97 98 99 100 101 102 |
* * The parallelization callback function will run with BHs off. * Note: Every object which is parallelized by padata_do_parallel * must be seen by padata_do_serial. */ int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu) { int target_cpu, err; |
e15bacbeb padata: Make two ... |
103 |
struct padata_parallel_queue *queue; |
16295bec6 padata: Generic p... |
104 105 106 |
struct parallel_data *pd; rcu_read_lock_bh(); |
c0e656b7a padata: Fix wrong... |
107 |
pd = rcu_dereference_bh(pinst->pd); |
16295bec6 padata: Generic p... |
108 |
|
83f619f3c padata: make pada... |
109 |
err = -EINVAL; |
7424713b8 padata: Check for... |
110 |
if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) |
16295bec6 padata: Generic p... |
111 |
goto out; |
e15bacbeb padata: Make two ... |
112 |
if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) |
16295bec6 padata: Generic p... |
113 114 115 116 117 118 119 120 |
goto out; err = -EBUSY; if ((pinst->flags & PADATA_RESET)) goto out; if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) goto out; |
83f619f3c padata: make pada... |
121 |
err = 0; |
16295bec6 padata: Generic p... |
122 123 124 |
atomic_inc(&pd->refcnt); padata->pd = pd; padata->cb_cpu = cb_cpu; |
2dc9b5dbd padata: Fix race ... |
125 |
target_cpu = padata_cpu_hash(pd); |
350ef88e7 padata: ensure pa... |
126 |
padata->cpu = target_cpu; |
e15bacbeb padata: Make two ... |
127 |
queue = per_cpu_ptr(pd->pqueue, target_cpu); |
16295bec6 padata: Generic p... |
128 129 130 131 |
spin_lock(&queue->parallel.lock); list_add_tail(&padata->list, &queue->parallel.list); spin_unlock(&queue->parallel.lock); |
e15bacbeb padata: Make two ... |
132 |
queue_work_on(target_cpu, pinst->wq, &queue->work); |
16295bec6 padata: Generic p... |
133 134 135 136 137 138 139 |
out: rcu_read_unlock_bh(); return err; } EXPORT_SYMBOL(padata_do_parallel); |
0198ffd13 padata: Add some ... |
140 141 142 143 144 145 146 147 |
/* * padata_get_next - Get the next object that needs serialization. * * Return values are: * * A pointer to the control struct of the next object that needs * serialization, if present in one of the percpu reorder queues. * |
0198ffd13 padata: Add some ... |
148 149 150 151 152 153 154 |
* -EINPROGRESS, if the next object that needs serialization will * be parallel processed by another cpu and is not yet present in * the cpu's reorder queue. * * -ENODATA, if this cpu has to do the parallel processing for * the next object. */ |
16295bec6 padata: Generic p... |
155 156 |
static struct padata_priv *padata_get_next(struct parallel_data *pd) { |
5f1a8c1bc padata: simplify ... |
157 |
int cpu, num_cpus; |
2dc9b5dbd padata: Fix race ... |
158 |
unsigned int next_nr, next_index; |
f0fcf2002 padata: use __thi... |
159 |
struct padata_parallel_queue *next_queue; |
16295bec6 padata: Generic p... |
160 161 |
struct padata_priv *padata; struct padata_list *reorder; |
e15bacbeb padata: Make two ... |
162 |
num_cpus = cpumask_weight(pd->cpumask.pcpu); |
16295bec6 padata: Generic p... |
163 |
|
5f1a8c1bc padata: simplify ... |
164 165 166 167 168 169 170 |
/* * Calculate the percpu reorder queue and the sequence * number of the next object. */ next_nr = pd->processed; next_index = next_nr % num_cpus; cpu = padata_index_to_cpu(pd, next_index); |
e15bacbeb padata: Make two ... |
171 |
next_queue = per_cpu_ptr(pd->pqueue, cpu); |
5f1a8c1bc padata: simplify ... |
172 |
|
16295bec6 padata: Generic p... |
173 |
reorder = &next_queue->reorder; |
de5540d08 padata: avoid rac... |
174 |
spin_lock(&reorder->lock); |
16295bec6 padata: Generic p... |
175 176 177 |
if (!list_empty(&reorder->list)) { padata = list_entry(reorder->list.next, struct padata_priv, list); |
16295bec6 padata: Generic p... |
178 179 |
list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); |
16295bec6 padata: Generic p... |
180 |
|
5f1a8c1bc padata: simplify ... |
181 |
pd->processed++; |
16295bec6 padata: Generic p... |
182 |
|
de5540d08 padata: avoid rac... |
183 |
spin_unlock(&reorder->lock); |
16295bec6 padata: Generic p... |
184 185 |
goto out; } |
de5540d08 padata: avoid rac... |
186 |
spin_unlock(&reorder->lock); |
16295bec6 padata: Generic p... |
187 |
|
f0fcf2002 padata: use __thi... |
188 |
if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { |
16295bec6 padata: Generic p... |
189 190 191 192 193 194 195 196 197 198 199 |
padata = ERR_PTR(-ENODATA); goto out; } padata = ERR_PTR(-EINPROGRESS); out: return padata; } static void padata_reorder(struct parallel_data *pd) { |
3047817b8 padata: Fix race ... |
200 |
int cb_cpu; |
16295bec6 padata: Generic p... |
201 |
struct padata_priv *padata; |
e15bacbeb padata: Make two ... |
202 |
struct padata_serial_queue *squeue; |
16295bec6 padata: Generic p... |
203 |
struct padata_instance *pinst = pd->pinst; |
0198ffd13 padata: Add some ... |
204 205 206 207 208 209 210 211 212 213 |
/* * We need to ensure that only one cpu can work on dequeueing of * the reorder queue the time. Calculating in which percpu reorder * queue the next object will arrive takes some time. A spinlock * would be highly contended. Also it is not clear in which order * the objects arrive to the reorder queues. So a cpu could wait to * get the lock just to notice that there is nothing to do at the * moment. Therefore we use a trylock and let the holder of the lock * care for all the objects enqueued during the holdtime of the lock. */ |
16295bec6 padata: Generic p... |
214 |
if (!spin_trylock_bh(&pd->lock)) |
d46a5ac7a padata: Use a tim... |
215 |
return; |
16295bec6 padata: Generic p... |
216 217 218 |
while (1) { padata = padata_get_next(pd); |
0198ffd13 padata: Add some ... |
219 |
/* |
69b348449 padata: get_next ... |
220 221 222 |
* If the next object that needs serialization is parallel * processed by another cpu and is still on it's way to the * cpu's reorder queue, nothing to do for now. |
0198ffd13 padata: Add some ... |
223 |
*/ |
69b348449 padata: get_next ... |
224 |
if (PTR_ERR(padata) == -EINPROGRESS) |
16295bec6 padata: Generic p... |
225 |
break; |
0198ffd13 padata: Add some ... |
226 227 228 |
/* * This cpu has to do the parallel processing of the next * object. It's waiting in the cpu's parallelization queue, |
25985edce Fix common misspe... |
229 |
* so exit immediately. |
0198ffd13 padata: Add some ... |
230 |
*/ |
16295bec6 padata: Generic p... |
231 |
if (PTR_ERR(padata) == -ENODATA) { |
d46a5ac7a padata: Use a tim... |
232 |
del_timer(&pd->timer); |
16295bec6 padata: Generic p... |
233 |
spin_unlock_bh(&pd->lock); |
d46a5ac7a padata: Use a tim... |
234 |
return; |
16295bec6 padata: Generic p... |
235 |
} |
3047817b8 padata: Fix race ... |
236 237 |
cb_cpu = padata->cb_cpu; squeue = per_cpu_ptr(pd->squeue, cb_cpu); |
16295bec6 padata: Generic p... |
238 |
|
e15bacbeb padata: Make two ... |
239 240 241 |
spin_lock(&squeue->serial.lock); list_add_tail(&padata->list, &squeue->serial.list); spin_unlock(&squeue->serial.lock); |
16295bec6 padata: Generic p... |
242 |
|
3047817b8 padata: Fix race ... |
243 |
queue_work_on(cb_cpu, pinst->wq, &squeue->work); |
16295bec6 padata: Generic p... |
244 245 246 |
} spin_unlock_bh(&pd->lock); |
0198ffd13 padata: Add some ... |
247 248 249 |
/* * The next object that needs serialization might have arrived to * the reorder queues in the meantime, we will be called again |
25985edce Fix common misspe... |
250 |
* from the timer function if no one else cares for it. |
0198ffd13 padata: Add some ... |
251 |
*/ |
d46a5ac7a padata: Use a tim... |
252 253 254 255 256 |
if (atomic_read(&pd->reorder_objects) && !(pinst->flags & PADATA_RESET)) mod_timer(&pd->timer, jiffies + HZ); else del_timer(&pd->timer); |
16295bec6 padata: Generic p... |
257 |
|
16295bec6 padata: Generic p... |
258 259 |
return; } |
cf5868c8a padata: ensure th... |
260 261 262 263 264 265 266 267 268 269 270 |
static void invoke_padata_reorder(struct work_struct *work) { struct padata_parallel_queue *pqueue; struct parallel_data *pd; local_bh_disable(); pqueue = container_of(work, struct padata_parallel_queue, reorder_work); pd = pqueue->pd; padata_reorder(pd); local_bh_enable(); } |
e99e88a9d treewide: setup_t... |
271 |
static void padata_reorder_timer(struct timer_list *t) |
d46a5ac7a padata: Use a tim... |
272 |
{ |
e99e88a9d treewide: setup_t... |
273 |
struct parallel_data *pd = from_timer(pd, t, timer); |
cf5868c8a padata: ensure th... |
274 275 |
unsigned int weight; int target_cpu, cpu; |
d46a5ac7a padata: Use a tim... |
276 |
|
cf5868c8a padata: ensure th... |
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
cpu = get_cpu(); /* We don't lock pd here to not interfere with parallel processing * padata_reorder() calls on other CPUs. We just need any CPU out of * the cpumask.pcpu set. It would be nice if it's the right one but * it doesn't matter if we're off to the next one by using an outdated * pd->processed value. */ weight = cpumask_weight(pd->cpumask.pcpu); target_cpu = padata_index_to_cpu(pd, pd->processed % weight); /* ensure to call the reorder callback on the correct CPU */ if (cpu != target_cpu) { struct padata_parallel_queue *pqueue; struct padata_instance *pinst; /* The timer function is serialized wrt itself -- no locking * needed. */ pinst = pd->pinst; pqueue = per_cpu_ptr(pd->pqueue, target_cpu); queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work); } else { padata_reorder(pd); } put_cpu(); |
d46a5ac7a padata: Use a tim... |
304 |
} |
e15bacbeb padata: Make two ... |
305 |
static void padata_serial_worker(struct work_struct *serial_work) |
16295bec6 padata: Generic p... |
306 |
{ |
e15bacbeb padata: Make two ... |
307 |
struct padata_serial_queue *squeue; |
16295bec6 padata: Generic p... |
308 309 310 311 |
struct parallel_data *pd; LIST_HEAD(local_list); local_bh_disable(); |
e15bacbeb padata: Make two ... |
312 313 |
squeue = container_of(serial_work, struct padata_serial_queue, work); pd = squeue->pd; |
16295bec6 padata: Generic p... |
314 |
|
e15bacbeb padata: Make two ... |
315 316 317 |
spin_lock(&squeue->serial.lock); list_replace_init(&squeue->serial.list, &local_list); spin_unlock(&squeue->serial.lock); |
16295bec6 padata: Generic p... |
318 319 320 321 322 323 324 325 326 327 328 329 330 331 |
while (!list_empty(&local_list)) { struct padata_priv *padata; padata = list_entry(local_list.next, struct padata_priv, list); list_del_init(&padata->list); padata->serial(padata); atomic_dec(&pd->refcnt); } local_bh_enable(); } |
0198ffd13 padata: Add some ... |
332 |
/** |
16295bec6 padata: Generic p... |
333 334 335 336 337 338 339 340 341 342 |
* padata_do_serial - padata serialization function * * @padata: object to be serialized. * * padata_do_serial must be called for every parallelized object. * The serialization callback function will run with BHs off. */ void padata_do_serial(struct padata_priv *padata) { int cpu; |
e15bacbeb padata: Make two ... |
343 |
struct padata_parallel_queue *pqueue; |
16295bec6 padata: Generic p... |
344 |
struct parallel_data *pd; |
350ef88e7 padata: ensure pa... |
345 |
int reorder_via_wq = 0; |
16295bec6 padata: Generic p... |
346 347 348 349 |
pd = padata->pd; cpu = get_cpu(); |
350ef88e7 padata: ensure pa... |
350 351 352 353 354 355 356 357 358 |
/* We need to run on the same CPU padata_do_parallel(.., padata, ..) * was called on -- or, at least, enqueue the padata object into the * correct per-cpu queue. */ if (cpu != padata->cpu) { reorder_via_wq = 1; cpu = padata->cpu; } |
e15bacbeb padata: Make two ... |
359 |
pqueue = per_cpu_ptr(pd->pqueue, cpu); |
16295bec6 padata: Generic p... |
360 |
|
e15bacbeb padata: Make two ... |
361 |
spin_lock(&pqueue->reorder.lock); |
16295bec6 padata: Generic p... |
362 |
atomic_inc(&pd->reorder_objects); |
e15bacbeb padata: Make two ... |
363 364 |
list_add_tail(&padata->list, &pqueue->reorder.list); spin_unlock(&pqueue->reorder.lock); |
16295bec6 padata: Generic p... |
365 366 |
put_cpu(); |
350ef88e7 padata: ensure pa... |
367 368 369 370 371 372 373 |
/* If we're running on the wrong CPU, call padata_reorder() via a * kernel worker. */ if (reorder_via_wq) queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work); else padata_reorder(pd); |
16295bec6 padata: Generic p... |
374 375 |
} EXPORT_SYMBOL(padata_do_serial); |
e15bacbeb padata: Make two ... |
376 377 378 |
static int padata_setup_cpumasks(struct parallel_data *pd, const struct cpumask *pcpumask, const struct cpumask *cbcpumask) |
16295bec6 padata: Generic p... |
379 |
{ |
e15bacbeb padata: Make two ... |
380 381 |
if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) return -ENOMEM; |
16295bec6 padata: Generic p... |
382 |
|
13614e0fb padata: Use the o... |
383 |
cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); |
e15bacbeb padata: Make two ... |
384 |
if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { |
07a77929b padata: free corr... |
385 |
free_cpumask_var(pd->cpumask.pcpu); |
e15bacbeb padata: Make two ... |
386 387 |
return -ENOMEM; } |
16295bec6 padata: Generic p... |
388 |
|
13614e0fb padata: Use the o... |
389 |
cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); |
e15bacbeb padata: Make two ... |
390 391 |
return 0; } |
16295bec6 padata: Generic p... |
392 |
|
e15bacbeb padata: Make two ... |
393 394 395 396 397 |
static void __padata_list_init(struct padata_list *pd_list) { INIT_LIST_HEAD(&pd_list->list); spin_lock_init(&pd_list->lock); } |
16295bec6 padata: Generic p... |
398 |
|
e15bacbeb padata: Make two ... |
399 400 401 402 403 |
/* Initialize all percpu queues used by serial workers */ static void padata_init_squeues(struct parallel_data *pd) { int cpu; struct padata_serial_queue *squeue; |
7b389b2cc padata: Initializ... |
404 |
|
e15bacbeb padata: Make two ... |
405 406 407 408 409 410 411 |
for_each_cpu(cpu, pd->cpumask.cbcpu) { squeue = per_cpu_ptr(pd->squeue, cpu); squeue->pd = pd; __padata_list_init(&squeue->serial); INIT_WORK(&squeue->work, padata_serial_worker); } } |
16295bec6 padata: Generic p... |
412 |
|
e15bacbeb padata: Make two ... |
413 414 415 |
/* Initialize all percpu queues used by parallel workers */ static void padata_init_pqueues(struct parallel_data *pd) { |
2dc9b5dbd padata: Fix race ... |
416 |
int cpu_index, cpu; |
e15bacbeb padata: Make two ... |
417 |
struct padata_parallel_queue *pqueue; |
16295bec6 padata: Generic p... |
418 |
|
e15bacbeb padata: Make two ... |
419 |
cpu_index = 0; |
1bd845bcb padata: set cpu_i... |
420 |
for_each_possible_cpu(cpu) { |
e15bacbeb padata: Make two ... |
421 |
pqueue = per_cpu_ptr(pd->pqueue, cpu); |
1bd845bcb padata: set cpu_i... |
422 423 424 425 426 |
if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) { pqueue->cpu_index = -1; continue; } |
e15bacbeb padata: Make two ... |
427 428 |
pqueue->pd = pd; pqueue->cpu_index = cpu_index; |
7b389b2cc padata: Initializ... |
429 |
cpu_index++; |
16295bec6 padata: Generic p... |
430 |
|
e15bacbeb padata: Make two ... |
431 432 433 |
__padata_list_init(&pqueue->reorder); __padata_list_init(&pqueue->parallel); INIT_WORK(&pqueue->work, padata_parallel_worker); |
cf5868c8a padata: ensure th... |
434 |
INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder); |
e15bacbeb padata: Make two ... |
435 |
atomic_set(&pqueue->num_obj, 0); |
16295bec6 padata: Generic p... |
436 |
} |
e15bacbeb padata: Make two ... |
437 |
} |
16295bec6 padata: Generic p... |
438 |
|
e15bacbeb padata: Make two ... |
439 440 441 442 443 444 |
/* Allocate and initialize the internal cpumask dependend resources. */ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, const struct cpumask *pcpumask, const struct cpumask *cbcpumask) { struct parallel_data *pd; |
16295bec6 padata: Generic p... |
445 |
|
e15bacbeb padata: Make two ... |
446 447 448 |
pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); if (!pd) goto err; |
16295bec6 padata: Generic p... |
449 |
|
e15bacbeb padata: Make two ... |
450 451 452 453 454 455 456 457 458 |
pd->pqueue = alloc_percpu(struct padata_parallel_queue); if (!pd->pqueue) goto err_free_pd; pd->squeue = alloc_percpu(struct padata_serial_queue); if (!pd->squeue) goto err_free_pqueue; if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) goto err_free_squeue; |
16295bec6 padata: Generic p... |
459 |
|
e15bacbeb padata: Make two ... |
460 461 |
padata_init_pqueues(pd); padata_init_squeues(pd); |
e99e88a9d treewide: setup_t... |
462 |
timer_setup(&pd->timer, padata_reorder_timer, 0); |
0b6b098ef padata: make the ... |
463 |
atomic_set(&pd->seq_nr, -1); |
16295bec6 padata: Generic p... |
464 465 466 467 468 469 |
atomic_set(&pd->reorder_objects, 0); atomic_set(&pd->refcnt, 0); pd->pinst = pinst; spin_lock_init(&pd->lock); return pd; |
e15bacbeb padata: Make two ... |
470 471 472 473 |
err_free_squeue: free_percpu(pd->squeue); err_free_pqueue: free_percpu(pd->pqueue); |
16295bec6 padata: Generic p... |
474 475 476 477 478 479 480 481 |
err_free_pd: kfree(pd); err: return NULL; } static void padata_free_pd(struct parallel_data *pd) { |
e15bacbeb padata: Make two ... |
482 483 484 485 |
free_cpumask_var(pd->cpumask.pcpu); free_cpumask_var(pd->cpumask.cbcpu); free_percpu(pd->pqueue); free_percpu(pd->squeue); |
16295bec6 padata: Generic p... |
486 487 |
kfree(pd); } |
0198ffd13 padata: Add some ... |
488 |
/* Flush all objects out of the padata queues. */ |
2b73b07ab padata: Flush the... |
489 490 491 |
static void padata_flush_queues(struct parallel_data *pd) { int cpu; |
e15bacbeb padata: Make two ... |
492 493 |
struct padata_parallel_queue *pqueue; struct padata_serial_queue *squeue; |
2b73b07ab padata: Flush the... |
494 |
|
e15bacbeb padata: Make two ... |
495 496 497 |
for_each_cpu(cpu, pd->cpumask.pcpu) { pqueue = per_cpu_ptr(pd->pqueue, cpu); flush_work(&pqueue->work); |
2b73b07ab padata: Flush the... |
498 499 500 501 502 503 |
} del_timer_sync(&pd->timer); if (atomic_read(&pd->reorder_objects)) padata_reorder(pd); |
e15bacbeb padata: Make two ... |
504 505 506 |
for_each_cpu(cpu, pd->cpumask.cbcpu) { squeue = per_cpu_ptr(pd->squeue, cpu); flush_work(&squeue->work); |
2b73b07ab padata: Flush the... |
507 508 509 510 |
} BUG_ON(atomic_read(&pd->refcnt) != 0); } |
4c8791702 padata: Check for... |
511 512 513 514 |
static void __padata_start(struct padata_instance *pinst) { pinst->flags |= PADATA_INIT; } |
ee8365551 padata: Block unt... |
515 516 517 518 519 520 521 522 523 524 525 526 527 |
static void __padata_stop(struct padata_instance *pinst) { if (!(pinst->flags & PADATA_INIT)) return; pinst->flags &= ~PADATA_INIT; synchronize_rcu(); get_online_cpus(); padata_flush_queues(pinst->pd); put_online_cpus(); } |
25985edce Fix common misspe... |
528 |
/* Replace the internal control structure with a new one. */ |
16295bec6 padata: Generic p... |
529 530 531 532 |
static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) { struct parallel_data *pd_old = pinst->pd; |
e15bacbeb padata: Make two ... |
533 |
int notification_mask = 0; |
16295bec6 padata: Generic p... |
534 535 536 537 538 539 |
pinst->flags |= PADATA_RESET; rcu_assign_pointer(pinst->pd, pd_new); synchronize_rcu(); |
e15bacbeb padata: Make two ... |
540 541 542 543 |
if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) notification_mask |= PADATA_CPU_PARALLEL; if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) notification_mask |= PADATA_CPU_SERIAL; |
2b73b07ab padata: Flush the... |
544 |
padata_flush_queues(pd_old); |
16295bec6 padata: Generic p... |
545 |
padata_free_pd(pd_old); |
e15bacbeb padata: Make two ... |
546 547 |
if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, |
c635696c7 padata: Pass the ... |
548 549 |
notification_mask, &pd_new->cpumask); |
16295bec6 padata: Generic p... |
550 551 552 |
pinst->flags &= ~PADATA_RESET; } |
0198ffd13 padata: Add some ... |
553 |
/** |
e15bacbeb padata: Make two ... |
554 555 |
* padata_register_cpumask_notifier - Registers a notifier that will be called * if either pcpu or cbcpu or both cpumasks change. |
16295bec6 padata: Generic p... |
556 |
* |
e15bacbeb padata: Make two ... |
557 558 |
* @pinst: A poineter to padata instance * @nblock: A pointer to notifier block. |
16295bec6 padata: Generic p... |
559 |
*/ |
e15bacbeb padata: Make two ... |
560 561 |
int padata_register_cpumask_notifier(struct padata_instance *pinst, struct notifier_block *nblock) |
16295bec6 padata: Generic p... |
562 |
{ |
e15bacbeb padata: Make two ... |
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 |
return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, nblock); } EXPORT_SYMBOL(padata_register_cpumask_notifier); /** * padata_unregister_cpumask_notifier - Unregisters cpumask notifier * registered earlier using padata_register_cpumask_notifier * * @pinst: A pointer to data instance. * @nlock: A pointer to notifier block. */ int padata_unregister_cpumask_notifier(struct padata_instance *pinst, struct notifier_block *nblock) { return blocking_notifier_chain_unregister( &pinst->cpumask_change_notifier, nblock); } EXPORT_SYMBOL(padata_unregister_cpumask_notifier); |
33e544506 padata: Handle em... |
583 584 585 586 |
/* If cpumask contains no active cpu, we mark the instance as invalid. */ static bool padata_validate_cpumask(struct padata_instance *pinst, const struct cpumask *cpumask) { |
13614e0fb padata: Use the o... |
587 |
if (!cpumask_intersects(cpumask, cpu_online_mask)) { |
33e544506 padata: Handle em... |
588 589 590 591 592 593 594 |
pinst->flags |= PADATA_INVALID; return false; } pinst->flags &= ~PADATA_INVALID; return true; } |
65ff577e6 padata: Rearrange... |
595 596 597 598 599 |
static int __padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, cpumask_var_t cbcpumask) { int valid; |
16295bec6 padata: Generic p... |
600 |
struct parallel_data *pd; |
65ff577e6 padata: Rearrange... |
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 |
valid = padata_validate_cpumask(pinst, pcpumask); if (!valid) { __padata_stop(pinst); goto out_replace; } valid = padata_validate_cpumask(pinst, cbcpumask); if (!valid) __padata_stop(pinst); out_replace: pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) return -ENOMEM; cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); padata_replace(pinst, pd); if (valid) __padata_start(pinst); return 0; } /** |
e15bacbeb padata: Make two ... |
629 630 |
* padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value * equivalent to @cpumask. |
16295bec6 padata: Generic p... |
631 632 |
* * @pinst: padata instance |
e15bacbeb padata: Make two ... |
633 634 |
* @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding * to parallel and serial cpumasks respectively. |
16295bec6 padata: Generic p... |
635 636 |
* @cpumask: the cpumask to use */ |
e15bacbeb padata: Make two ... |
637 638 639 640 |
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask) { struct cpumask *serial_mask, *parallel_mask; |
65ff577e6 padata: Rearrange... |
641 642 643 |
int err = -EINVAL; mutex_lock(&pinst->lock); |
6751fb3c0 padata: Use get_o... |
644 |
get_online_cpus(); |
e15bacbeb padata: Make two ... |
645 646 647 648 649 650 651 652 653 654 |
switch (cpumask_type) { case PADATA_CPU_PARALLEL: serial_mask = pinst->cpumask.cbcpu; parallel_mask = cpumask; break; case PADATA_CPU_SERIAL: parallel_mask = pinst->cpumask.pcpu; serial_mask = cpumask; break; default: |
65ff577e6 padata: Rearrange... |
655 |
goto out; |
16295bec6 padata: Generic p... |
656 |
} |
65ff577e6 padata: Rearrange... |
657 |
err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); |
16295bec6 padata: Generic p... |
658 659 |
out: |
6751fb3c0 padata: Use get_o... |
660 |
put_online_cpus(); |
16295bec6 padata: Generic p... |
661 662 663 664 665 |
mutex_unlock(&pinst->lock); return err; } EXPORT_SYMBOL(padata_set_cpumask); |
19d795b67 kernel/padata.c: ... |
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 |
/** * padata_start - start the parallel processing * * @pinst: padata instance to start */ int padata_start(struct padata_instance *pinst) { int err = 0; mutex_lock(&pinst->lock); if (pinst->flags & PADATA_INVALID) err = -EINVAL; __padata_start(pinst); mutex_unlock(&pinst->lock); return err; } EXPORT_SYMBOL(padata_start); /** * padata_stop - stop the parallel processing * * @pinst: padata instance to stop */ void padata_stop(struct padata_instance *pinst) { mutex_lock(&pinst->lock); __padata_stop(pinst); mutex_unlock(&pinst->lock); } EXPORT_SYMBOL(padata_stop); #ifdef CONFIG_HOTPLUG_CPU |
16295bec6 padata: Generic p... |
702 703 704 |
static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { struct parallel_data *pd; |
13614e0fb padata: Use the o... |
705 |
if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
e15bacbeb padata: Make two ... |
706 707 |
pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, pinst->cpumask.cbcpu); |
16295bec6 padata: Generic p... |
708 709 710 711 |
if (!pd) return -ENOMEM; padata_replace(pinst, pd); |
33e544506 padata: Handle em... |
712 |
|
e15bacbeb padata: Make two ... |
713 714 |
if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) |
33e544506 padata: Handle em... |
715 |
__padata_start(pinst); |
16295bec6 padata: Generic p... |
716 717 718 719 |
} return 0; } |
16295bec6 padata: Generic p... |
720 721 |
static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) { |
33e544506 padata: Handle em... |
722 |
struct parallel_data *pd = NULL; |
16295bec6 padata: Generic p... |
723 724 |
if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
33e544506 padata: Handle em... |
725 |
|
e15bacbeb padata: Make two ... |
726 |
if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || |
b89661dff padata: Allocate ... |
727 |
!padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) |
33e544506 padata: Handle em... |
728 |
__padata_stop(pinst); |
33e544506 padata: Handle em... |
729 |
|
e15bacbeb padata: Make two ... |
730 731 |
pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, pinst->cpumask.cbcpu); |
16295bec6 padata: Generic p... |
732 733 734 735 |
if (!pd) return -ENOMEM; padata_replace(pinst, pd); |
961209052 padata: Fix cpu h... |
736 737 738 |
cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); cpumask_clear_cpu(cpu, pd->cpumask.pcpu); |
16295bec6 padata: Generic p... |
739 740 741 742 |
} return 0; } |
e15bacbeb padata: Make two ... |
743 |
/** |
25985edce Fix common misspe... |
744 |
* padata_remove_cpu - remove a cpu from the one or both(serial and parallel) |
e15bacbeb padata: Make two ... |
745 |
* padata cpumasks. |
16295bec6 padata: Generic p... |
746 747 748 |
* * @pinst: padata instance * @cpu: cpu to remove |
e15bacbeb padata: Make two ... |
749 750 751 752 |
* @mask: bitmask specifying from which cpumask @cpu should be removed * The @mask may be any combination of the following flags: * PADATA_CPU_SERIAL - serial cpumask * PADATA_CPU_PARALLEL - parallel cpumask |
16295bec6 padata: Generic p... |
753 |
*/ |
e15bacbeb padata: Make two ... |
754 |
int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) |
16295bec6 padata: Generic p... |
755 756 |
{ int err; |
e15bacbeb padata: Make two ... |
757 758 |
if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) return -EINVAL; |
16295bec6 padata: Generic p... |
759 |
mutex_lock(&pinst->lock); |
6751fb3c0 padata: Use get_o... |
760 |
get_online_cpus(); |
e15bacbeb padata: Make two ... |
761 762 763 764 |
if (mask & PADATA_CPU_SERIAL) cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); if (mask & PADATA_CPU_PARALLEL) cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); |
16295bec6 padata: Generic p... |
765 |
err = __padata_remove_cpu(pinst, cpu); |
6751fb3c0 padata: Use get_o... |
766 |
put_online_cpus(); |
16295bec6 padata: Generic p... |
767 768 769 770 771 772 |
mutex_unlock(&pinst->lock); return err; } EXPORT_SYMBOL(padata_remove_cpu); |
e15bacbeb padata: Make two ... |
773 774 775 776 777 |
static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) { return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); } |
30e92153b padata: Convert t... |
778 |
static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) |
16295bec6 padata: Generic p... |
779 |
{ |
16295bec6 padata: Generic p... |
780 |
struct padata_instance *pinst; |
30e92153b padata: Convert t... |
781 |
int ret; |
16295bec6 padata: Generic p... |
782 |
|
30e92153b padata: Convert t... |
783 784 785 |
pinst = hlist_entry_safe(node, struct padata_instance, node); if (!pinst_has_cpu(pinst, cpu)) return 0; |
16295bec6 padata: Generic p... |
786 |
|
30e92153b padata: Convert t... |
787 788 789 790 791 |
mutex_lock(&pinst->lock); ret = __padata_add_cpu(pinst, cpu); mutex_unlock(&pinst->lock); return ret; } |
16295bec6 padata: Generic p... |
792 |
|
30e92153b padata: Convert t... |
793 794 795 796 797 798 799 800 |
static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node) { struct padata_instance *pinst; int ret; pinst = hlist_entry_safe(node, struct padata_instance, node); if (!pinst_has_cpu(pinst, cpu)) return 0; |
16295bec6 padata: Generic p... |
801 |
|
30e92153b padata: Convert t... |
802 803 804 805 |
mutex_lock(&pinst->lock); ret = __padata_remove_cpu(pinst, cpu); mutex_unlock(&pinst->lock); return ret; |
16295bec6 padata: Generic p... |
806 |
} |
30e92153b padata: Convert t... |
807 808 |
static enum cpuhp_state hp_online; |
e2cb2f1c2 padata: cpu hotpl... |
809 |
#endif |
16295bec6 padata: Generic p... |
810 |
|
5e017dc3f padata: Added sys... |
811 812 813 |
static void __padata_free(struct padata_instance *pinst) { #ifdef CONFIG_HOTPLUG_CPU |
30e92153b padata: Convert t... |
814 |
cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); |
5e017dc3f padata: Added sys... |
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 |
#endif padata_stop(pinst); padata_free_pd(pinst->pd); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); kfree(pinst); } #define kobj2pinst(_kobj) \ container_of(_kobj, struct padata_instance, kobj) #define attr2pentry(_attr) \ container_of(_attr, struct padata_sysfs_entry, attr) static void padata_sysfs_release(struct kobject *kobj) { struct padata_instance *pinst = kobj2pinst(kobj); __padata_free(pinst); } struct padata_sysfs_entry { struct attribute attr; ssize_t (*show)(struct padata_instance *, struct attribute *, char *); ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t); }; static ssize_t show_cpumask(struct padata_instance *pinst, struct attribute *attr, char *buf) { struct cpumask *cpumask; ssize_t len; mutex_lock(&pinst->lock); if (!strcmp(attr->name, "serial_cpumask")) cpumask = pinst->cpumask.cbcpu; else cpumask = pinst->cpumask.pcpu; |
4497da6f9 padata: use %*pb[... |
853 854 855 |
len = snprintf(buf, PAGE_SIZE, "%*pb ", nr_cpu_ids, cpumask_bits(cpumask)); |
5e017dc3f padata: Added sys... |
856 |
mutex_unlock(&pinst->lock); |
4497da6f9 padata: use %*pb[... |
857 |
return len < PAGE_SIZE ? len : -EINVAL; |
5e017dc3f padata: Added sys... |
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 |
} static ssize_t store_cpumask(struct padata_instance *pinst, struct attribute *attr, const char *buf, size_t count) { cpumask_var_t new_cpumask; ssize_t ret; int mask_type; if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) return -ENOMEM; ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), nr_cpumask_bits); if (ret < 0) goto out; mask_type = !strcmp(attr->name, "serial_cpumask") ? PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; ret = padata_set_cpumask(pinst, mask_type, new_cpumask); if (!ret) ret = count; out: free_cpumask_var(new_cpumask); return ret; } #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ static struct padata_sysfs_entry _name##_attr = \ __ATTR(_name, 0644, _show_name, _store_name) #define PADATA_ATTR_RO(_name, _show_name) \ static struct padata_sysfs_entry _name##_attr = \ __ATTR(_name, 0400, _show_name, NULL) PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); /* * Padata sysfs provides the following objects: * serial_cpumask [RW] - cpumask for serial workers * parallel_cpumask [RW] - cpumask for parallel workers */ static struct attribute *padata_default_attrs[] = { &serial_cpumask_attr.attr, ¶llel_cpumask_attr.attr, NULL, }; static ssize_t padata_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct padata_instance *pinst; struct padata_sysfs_entry *pentry; ssize_t ret = -EIO; pinst = kobj2pinst(kobj); pentry = attr2pentry(attr); if (pentry->show) ret = pentry->show(pinst, attr, buf); return ret; } static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct padata_instance *pinst; struct padata_sysfs_entry *pentry; ssize_t ret = -EIO; pinst = kobj2pinst(kobj); pentry = attr2pentry(attr); if (pentry->show) ret = pentry->store(pinst, attr, buf, count); return ret; } static const struct sysfs_ops padata_sysfs_ops = { .show = padata_sysfs_show, .store = padata_sysfs_store, }; static struct kobj_type padata_attr_type = { .sysfs_ops = &padata_sysfs_ops, .default_attrs = padata_default_attrs, .release = padata_sysfs_release, }; |
0198ffd13 padata: Add some ... |
948 |
/** |
e6cc11707 padata: Rename pa... |
949 950 |
* padata_alloc - allocate and initialize a padata instance and specify * cpumasks for serial and parallel workers. |
16295bec6 padata: Generic p... |
951 |
* |
16295bec6 padata: Generic p... |
952 |
* @wq: workqueue to use for the allocated padata instance |
e15bacbeb padata: Make two ... |
953 954 |
* @pcpumask: cpumask that will be used for padata parallelization * @cbcpumask: cpumask that will be used for padata serialization |
c5a81c8ff padata: Avoid nes... |
955 956 |
* * Must be called from a cpus_read_lock() protected region |
16295bec6 padata: Generic p... |
957 |
*/ |
9596695ee padata: Make pada... |
958 959 960 |
static struct padata_instance *padata_alloc(struct workqueue_struct *wq, const struct cpumask *pcpumask, const struct cpumask *cbcpumask) |
16295bec6 padata: Generic p... |
961 |
{ |
16295bec6 padata: Generic p... |
962 |
struct padata_instance *pinst; |
33e544506 padata: Handle em... |
963 |
struct parallel_data *pd = NULL; |
16295bec6 padata: Generic p... |
964 965 966 967 |
pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); if (!pinst) goto err; |
e15bacbeb padata: Make two ... |
968 |
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) |
16295bec6 padata: Generic p... |
969 |
goto err_free_inst; |
e15bacbeb padata: Make two ... |
970 971 |
if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { free_cpumask_var(pinst->cpumask.pcpu); |
16295bec6 padata: Generic p... |
972 |
goto err_free_inst; |
33e544506 padata: Handle em... |
973 |
} |
e15bacbeb padata: Make two ... |
974 975 976 |
if (!padata_validate_cpumask(pinst, pcpumask) || !padata_validate_cpumask(pinst, cbcpumask)) goto err_free_masks; |
16295bec6 padata: Generic p... |
977 |
|
e15bacbeb padata: Make two ... |
978 979 980 |
pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) goto err_free_masks; |
747813878 padata: Allocate ... |
981 |
|
16295bec6 padata: Generic p... |
982 983 984 |
rcu_assign_pointer(pinst->pd, pd); pinst->wq = wq; |
e15bacbeb padata: Make two ... |
985 986 |
cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); |
16295bec6 padata: Generic p... |
987 988 |
pinst->flags = 0; |
e15bacbeb padata: Make two ... |
989 |
BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); |
5e017dc3f padata: Added sys... |
990 |
kobject_init(&pinst->kobj, &padata_attr_type); |
16295bec6 padata: Generic p... |
991 |
mutex_init(&pinst->lock); |
b8b4a4166 padata - Register... |
992 |
#ifdef CONFIG_HOTPLUG_CPU |
c5a81c8ff padata: Avoid nes... |
993 |
cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); |
b8b4a4166 padata - Register... |
994 |
#endif |
16295bec6 padata: Generic p... |
995 |
return pinst; |
e15bacbeb padata: Make two ... |
996 997 998 |
err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); |
16295bec6 padata: Generic p... |
999 1000 1001 1002 1003 |
err_free_inst: kfree(pinst); err: return NULL; } |
16295bec6 padata: Generic p... |
1004 |
|
0198ffd13 padata: Add some ... |
1005 |
/** |
9596695ee padata: Make pada... |
1006 1007 1008 1009 1010 |
* padata_alloc_possible - Allocate and initialize padata instance. * Use the cpu_possible_mask for serial and * parallel workers. * * @wq: workqueue to use for the allocated padata instance |
c5a81c8ff padata: Avoid nes... |
1011 1012 |
* * Must be called from a cpus_read_lock() protected region |
9596695ee padata: Make pada... |
1013 1014 1015 |
*/ struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) { |
c5a81c8ff padata: Avoid nes... |
1016 |
lockdep_assert_cpus_held(); |
9596695ee padata: Make pada... |
1017 1018 1019 1020 1021 |
return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); } EXPORT_SYMBOL(padata_alloc_possible); /** |
16295bec6 padata: Generic p... |
1022 1023 |
* padata_free - free a padata instance * |
0198ffd13 padata: Add some ... |
1024 |
* @padata_inst: padata instance to free |
16295bec6 padata: Generic p... |
1025 1026 1027 |
*/ void padata_free(struct padata_instance *pinst) { |
5e017dc3f padata: Added sys... |
1028 |
kobject_put(&pinst->kobj); |
16295bec6 padata: Generic p... |
1029 1030 |
} EXPORT_SYMBOL(padata_free); |
30e92153b padata: Convert t... |
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 |
#ifdef CONFIG_HOTPLUG_CPU static __init int padata_driver_init(void) { int ret; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", padata_cpu_online, padata_cpu_prep_down); if (ret < 0) return ret; hp_online = ret; return 0; } module_init(padata_driver_init); static __exit void padata_driver_exit(void) { cpuhp_remove_multi_state(hp_online); } module_exit(padata_driver_exit); #endif |