Blame view
kernel/padata.c
28 KB
08b21fbf4 padata: add SPDX ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
16295bec6 padata: Generic p... |
2 3 4 |
/* * padata.c - generic interface to process data streams in parallel * |
bfcdcef8c padata: update do... |
5 |
* See Documentation/core-api/padata.rst for more information. |
107f8bdac padata: Add a ref... |
6 |
* |
16295bec6 padata: Generic p... |
7 8 9 |
* Copyright (C) 2008, 2009 secunet Security Networks AG * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> * |
004ed4263 padata: add basic... |
10 11 12 |
* Copyright (c) 2020 Oracle and/or its affiliates. * Author: Daniel Jordan <daniel.m.jordan@oracle.com> * |
16295bec6 padata: Generic p... |
13 14 15 16 17 18 19 20 21 22 23 24 25 |
* This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ |
004ed4263 padata: add basic... |
26 |
#include <linux/completion.h> |
9984de1a5 kernel: Map most ... |
27 |
#include <linux/export.h> |
16295bec6 padata: Generic p... |
28 29 30 31 32 33 |
#include <linux/cpumask.h> #include <linux/err.h> #include <linux/cpu.h> #include <linux/padata.h> #include <linux/mutex.h> #include <linux/sched.h> |
5a0e3ad6a include cleanup: ... |
34 |
#include <linux/slab.h> |
5e017dc3f padata: Added sys... |
35 |
#include <linux/sysfs.h> |
16295bec6 padata: Generic p... |
36 |
#include <linux/rcupdate.h> |
004ed4263 padata: add basic... |
37 |
#define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */ |
4611ce224 padata: allocate ... |
38 39 40 41 42 43 44 45 46 |
struct padata_work { struct work_struct pw_work; struct list_head pw_list; /* padata_free_works linkage */ void *pw_data; }; static DEFINE_SPINLOCK(padata_works_lock); static struct padata_work *padata_works; static LIST_HEAD(padata_free_works); |
16295bec6 padata: Generic p... |
47 |
|
004ed4263 padata: add basic... |
48 49 50 51 52 53 54 55 |
struct padata_mt_job_state { spinlock_t lock; struct completion completion; struct padata_mt_job *job; int nworks; int nworks_fini; unsigned long chunk_size; }; |
07928d9bf padata: Remove br... |
56 |
static void padata_free_pd(struct parallel_data *pd); |
004ed4263 padata: add basic... |
57 |
static void __init padata_mt_helper(struct work_struct *work); |
07928d9bf padata: Remove br... |
58 |
|
16295bec6 padata: Generic p... |
59 60 61 |
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) { int cpu, target_cpu; |
e15bacbeb padata: Make two ... |
62 |
target_cpu = cpumask_first(pd->cpumask.pcpu); |
16295bec6 padata: Generic p... |
63 |
for (cpu = 0; cpu < cpu_index; cpu++) |
e15bacbeb padata: Make two ... |
64 |
target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
16295bec6 padata: Generic p... |
65 66 67 |
return target_cpu; } |
bfde23ce2 padata: unbind pa... |
68 |
static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr) |
16295bec6 padata: Generic p... |
69 |
{ |
16295bec6 padata: Generic p... |
70 71 72 73 |
/* * Hash the sequence numbers to the cpus by taking * seq_nr mod. number of cpus in use. */ |
bfde23ce2 padata: unbind pa... |
74 |
int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); |
16295bec6 padata: Generic p... |
75 76 77 |
return padata_index_to_cpu(pd, cpu_index); } |
4611ce224 padata: allocate ... |
78 |
static struct padata_work *padata_work_alloc(void) |
16295bec6 padata: Generic p... |
79 |
{ |
4611ce224 padata: allocate ... |
80 |
struct padata_work *pw; |
16295bec6 padata: Generic p... |
81 |
|
4611ce224 padata: allocate ... |
82 |
lockdep_assert_held(&padata_works_lock); |
16295bec6 padata: Generic p... |
83 |
|
4611ce224 padata: allocate ... |
84 85 |
if (list_empty(&padata_free_works)) return NULL; /* No more work items allowed to be queued. */ |
16295bec6 padata: Generic p... |
86 |
|
4611ce224 padata: allocate ... |
87 88 89 90 |
pw = list_first_entry(&padata_free_works, struct padata_work, pw_list); list_del(&pw->pw_list); return pw; } |
16295bec6 padata: Generic p... |
91 |
|
4611ce224 padata: allocate ... |
92 |
static void padata_work_init(struct padata_work *pw, work_func_t work_fn, |
004ed4263 padata: add basic... |
93 |
void *data, int flags) |
4611ce224 padata: allocate ... |
94 |
{ |
004ed4263 padata: add basic... |
95 96 97 98 |
if (flags & PADATA_WORK_ONSTACK) INIT_WORK_ONSTACK(&pw->pw_work, work_fn); else INIT_WORK(&pw->pw_work, work_fn); |
4611ce224 padata: allocate ... |
99 100 |
pw->pw_data = data; } |
16295bec6 padata: Generic p... |
101 |
|
004ed4263 padata: add basic... |
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
static int __init padata_work_alloc_mt(int nworks, void *data, struct list_head *head) { int i; spin_lock(&padata_works_lock); /* Start at 1 because the current task participates in the job. */ for (i = 1; i < nworks; ++i) { struct padata_work *pw = padata_work_alloc(); if (!pw) break; padata_work_init(pw, padata_mt_helper, data, 0); list_add(&pw->pw_list, head); } spin_unlock(&padata_works_lock); return i; } |
4611ce224 padata: allocate ... |
121 122 123 124 125 |
static void padata_work_free(struct padata_work *pw) { lockdep_assert_held(&padata_works_lock); list_add(&pw->pw_list, &padata_free_works); } |
16295bec6 padata: Generic p... |
126 |
|
004ed4263 padata: add basic... |
127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
static void __init padata_works_free(struct list_head *works) { struct padata_work *cur, *next; if (list_empty(works)) return; spin_lock(&padata_works_lock); list_for_each_entry_safe(cur, next, works, pw_list) { list_del(&cur->pw_list); padata_work_free(cur); } spin_unlock(&padata_works_lock); } |
4611ce224 padata: allocate ... |
141 142 143 144 145 |
static void padata_parallel_worker(struct work_struct *parallel_work) { struct padata_work *pw = container_of(parallel_work, struct padata_work, pw_work); struct padata_priv *padata = pw->pw_data; |
16295bec6 padata: Generic p... |
146 |
|
4611ce224 padata: allocate ... |
147 148 149 150 151 |
local_bh_disable(); padata->parallel(padata); spin_lock(&padata_works_lock); padata_work_free(pw); spin_unlock(&padata_works_lock); |
16295bec6 padata: Generic p... |
152 153 |
local_bh_enable(); } |
0198ffd13 padata: Add some ... |
154 |
/** |
16295bec6 padata: Generic p... |
155 156 |
* padata_do_parallel - padata parallelization function * |
bbefa1dd6 crypto: pcrypt - ... |
157 |
* @ps: padatashell |
16295bec6 padata: Generic p... |
158 |
* @padata: object to be parallelized |
e6ce0e080 padata: make pada... |
159 160 161 162 |
* @cb_cpu: pointer to the CPU that the serialization callback function should * run on. If it's not in the serial cpumask of @pinst * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if * none found, returns -EINVAL. |
16295bec6 padata: Generic p... |
163 164 165 166 |
* * The parallelization callback function will run with BHs off. * Note: Every object which is parallelized by padata_do_parallel * must be seen by padata_do_serial. |
bfcdcef8c padata: update do... |
167 168 |
* * Return: 0 on success or else negative error code. |
16295bec6 padata: Generic p... |
169 |
*/ |
bbefa1dd6 crypto: pcrypt - ... |
170 |
int padata_do_parallel(struct padata_shell *ps, |
e6ce0e080 padata: make pada... |
171 |
struct padata_priv *padata, int *cb_cpu) |
16295bec6 padata: Generic p... |
172 |
{ |
bbefa1dd6 crypto: pcrypt - ... |
173 |
struct padata_instance *pinst = ps->pinst; |
4611ce224 padata: allocate ... |
174 |
int i, cpu, cpu_index, err; |
16295bec6 padata: Generic p... |
175 |
struct parallel_data *pd; |
4611ce224 padata: allocate ... |
176 |
struct padata_work *pw; |
16295bec6 padata: Generic p... |
177 178 |
rcu_read_lock_bh(); |
bbefa1dd6 crypto: pcrypt - ... |
179 |
pd = rcu_dereference_bh(ps->pd); |
16295bec6 padata: Generic p... |
180 |
|
83f619f3c padata: make pada... |
181 |
err = -EINVAL; |
7424713b8 padata: Check for... |
182 |
if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) |
16295bec6 padata: Generic p... |
183 |
goto out; |
e6ce0e080 padata: make pada... |
184 185 186 187 188 189 190 191 192 193 194 195 196 |
if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { if (!cpumask_weight(pd->cpumask.cbcpu)) goto out; /* Select an alternate fallback CPU and notify the caller. */ cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); cpu = cpumask_first(pd->cpumask.cbcpu); for (i = 0; i < cpu_index; i++) cpu = cpumask_next(cpu, pd->cpumask.cbcpu); *cb_cpu = cpu; } |
16295bec6 padata: Generic p... |
197 198 199 200 |
err = -EBUSY; if ((pinst->flags & PADATA_RESET)) goto out; |
16295bec6 padata: Generic p... |
201 202 |
atomic_inc(&pd->refcnt); padata->pd = pd; |
e6ce0e080 padata: make pada... |
203 |
padata->cb_cpu = *cb_cpu; |
16295bec6 padata: Generic p... |
204 |
|
4611ce224 padata: allocate ... |
205 206 207 208 |
spin_lock(&padata_works_lock); padata->seq_nr = ++pd->seq_nr; pw = padata_work_alloc(); spin_unlock(&padata_works_lock); |
1b0df11fd padata: fix possi... |
209 210 |
rcu_read_unlock_bh(); |
4611ce224 padata: allocate ... |
211 |
if (pw) { |
004ed4263 padata: add basic... |
212 |
padata_work_init(pw, padata_parallel_worker, padata, 0); |
4611ce224 padata: allocate ... |
213 214 215 216 217 |
queue_work(pinst->parallel_wq, &pw->pw_work); } else { /* Maximum works limit exceeded, run in the current task. */ padata->parallel(padata); } |
16295bec6 padata: Generic p... |
218 |
|
4611ce224 padata: allocate ... |
219 |
return 0; |
16295bec6 padata: Generic p... |
220 221 222 223 224 225 |
out: rcu_read_unlock_bh(); return err; } EXPORT_SYMBOL(padata_do_parallel); |
0198ffd13 padata: Add some ... |
226 |
/* |
bfde23ce2 padata: unbind pa... |
227 |
* padata_find_next - Find the next object that needs serialization. |
0198ffd13 padata: Add some ... |
228 |
* |
bfcdcef8c padata: update do... |
229 230 231 232 233 234 |
* Return: * * A pointer to the control struct of the next object that needs * serialization, if present in one of the percpu reorder queues. * * NULL, if the next object that needs serialization will * be parallel processed by another cpu and is not yet present in * the cpu's reorder queue. |
0198ffd13 padata: Add some ... |
235 |
*/ |
bfde23ce2 padata: unbind pa... |
236 237 |
static struct padata_priv *padata_find_next(struct parallel_data *pd, bool remove_object) |
16295bec6 padata: Generic p... |
238 |
{ |
16295bec6 padata: Generic p... |
239 240 |
struct padata_priv *padata; struct padata_list *reorder; |
6fc4dbcf0 padata: Replace d... |
241 |
int cpu = pd->cpu; |
16295bec6 padata: Generic p... |
242 |
|
f601c725a padata: remove pa... |
243 |
reorder = per_cpu_ptr(pd->reorder_list, cpu); |
16295bec6 padata: Generic p... |
244 |
|
de5540d08 padata: avoid rac... |
245 |
spin_lock(&reorder->lock); |
bfde23ce2 padata: unbind pa... |
246 247 248 249 |
if (list_empty(&reorder->list)) { spin_unlock(&reorder->lock); return NULL; } |
16295bec6 padata: Generic p... |
250 |
|
bfde23ce2 padata: unbind pa... |
251 |
padata = list_entry(reorder->list.next, struct padata_priv, list); |
16295bec6 padata: Generic p... |
252 |
|
bfde23ce2 padata: unbind pa... |
253 254 255 256 257 |
/* * Checks the rare case where two or more parallel jobs have hashed to * the same CPU and one of the later ones finishes first. */ if (padata->seq_nr != pd->processed) { |
de5540d08 padata: avoid rac... |
258 |
spin_unlock(&reorder->lock); |
bfde23ce2 padata: unbind pa... |
259 |
return NULL; |
16295bec6 padata: Generic p... |
260 |
} |
bfde23ce2 padata: unbind pa... |
261 262 |
if (remove_object) { list_del_init(&padata->list); |
bfde23ce2 padata: unbind pa... |
263 264 |
++pd->processed; pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); |
16295bec6 padata: Generic p... |
265 |
} |
bfde23ce2 padata: unbind pa... |
266 |
spin_unlock(&reorder->lock); |
16295bec6 padata: Generic p... |
267 268 269 270 271 |
return padata; } static void padata_reorder(struct parallel_data *pd) { |
bbefa1dd6 crypto: pcrypt - ... |
272 |
struct padata_instance *pinst = pd->ps->pinst; |
3047817b8 padata: Fix race ... |
273 |
int cb_cpu; |
16295bec6 padata: Generic p... |
274 |
struct padata_priv *padata; |
e15bacbeb padata: Make two ... |
275 |
struct padata_serial_queue *squeue; |
f601c725a padata: remove pa... |
276 |
struct padata_list *reorder; |
16295bec6 padata: Generic p... |
277 |
|
0198ffd13 padata: Add some ... |
278 279 280 281 282 283 284 285 286 287 |
/* * We need to ensure that only one cpu can work on dequeueing of * the reorder queue the time. Calculating in which percpu reorder * queue the next object will arrive takes some time. A spinlock * would be highly contended. Also it is not clear in which order * the objects arrive to the reorder queues. So a cpu could wait to * get the lock just to notice that there is nothing to do at the * moment. Therefore we use a trylock and let the holder of the lock * care for all the objects enqueued during the holdtime of the lock. */ |
16295bec6 padata: Generic p... |
288 |
if (!spin_trylock_bh(&pd->lock)) |
d46a5ac7a padata: Use a tim... |
289 |
return; |
16295bec6 padata: Generic p... |
290 291 |
while (1) { |
bfde23ce2 padata: unbind pa... |
292 |
padata = padata_find_next(pd, true); |
16295bec6 padata: Generic p... |
293 |
|
0198ffd13 padata: Add some ... |
294 |
/* |
69b348449 padata: get_next ... |
295 296 297 |
* If the next object that needs serialization is parallel * processed by another cpu and is still on it's way to the * cpu's reorder queue, nothing to do for now. |
0198ffd13 padata: Add some ... |
298 |
*/ |
bfde23ce2 padata: unbind pa... |
299 |
if (!padata) |
16295bec6 padata: Generic p... |
300 |
break; |
3047817b8 padata: Fix race ... |
301 302 |
cb_cpu = padata->cb_cpu; squeue = per_cpu_ptr(pd->squeue, cb_cpu); |
16295bec6 padata: Generic p... |
303 |
|
e15bacbeb padata: Make two ... |
304 305 306 |
spin_lock(&squeue->serial.lock); list_add_tail(&padata->list, &squeue->serial.list); spin_unlock(&squeue->serial.lock); |
16295bec6 padata: Generic p... |
307 |
|
45d153c08 padata: use separ... |
308 |
queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); |
16295bec6 padata: Generic p... |
309 310 311 |
} spin_unlock_bh(&pd->lock); |
0198ffd13 padata: Add some ... |
312 313 |
/* * The next object that needs serialization might have arrived to |
6fc4dbcf0 padata: Replace d... |
314 |
* the reorder queues in the meantime. |
cf144f81a padata: use smp_m... |
315 |
* |
6fc4dbcf0 padata: Replace d... |
316 317 |
* Ensure reorder queue is read after pd->lock is dropped so we see * new objects from another task in padata_do_serial. Pairs with |
e04ec0de6 padata: upgrade s... |
318 |
* smp_mb in padata_do_serial. |
0198ffd13 padata: Add some ... |
319 |
*/ |
cf144f81a padata: use smp_m... |
320 |
smp_mb(); |
16295bec6 padata: Generic p... |
321 |
|
f601c725a padata: remove pa... |
322 323 |
reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); if (!list_empty(&reorder->list) && padata_find_next(pd, false)) |
45d153c08 padata: use separ... |
324 |
queue_work(pinst->serial_wq, &pd->reorder_work); |
16295bec6 padata: Generic p... |
325 |
} |
cf5868c8a padata: ensure th... |
326 327 |
static void invoke_padata_reorder(struct work_struct *work) { |
cf5868c8a padata: ensure th... |
328 329 330 |
struct parallel_data *pd; local_bh_disable(); |
6fc4dbcf0 padata: Replace d... |
331 |
pd = container_of(work, struct parallel_data, reorder_work); |
cf5868c8a padata: ensure th... |
332 333 334 |
padata_reorder(pd); local_bh_enable(); } |
e15bacbeb padata: Make two ... |
335 |
static void padata_serial_worker(struct work_struct *serial_work) |
16295bec6 padata: Generic p... |
336 |
{ |
e15bacbeb padata: Make two ... |
337 |
struct padata_serial_queue *squeue; |
16295bec6 padata: Generic p... |
338 339 |
struct parallel_data *pd; LIST_HEAD(local_list); |
07928d9bf padata: Remove br... |
340 |
int cnt; |
16295bec6 padata: Generic p... |
341 342 |
local_bh_disable(); |
e15bacbeb padata: Make two ... |
343 344 |
squeue = container_of(serial_work, struct padata_serial_queue, work); pd = squeue->pd; |
16295bec6 padata: Generic p... |
345 |
|
e15bacbeb padata: Make two ... |
346 347 348 |
spin_lock(&squeue->serial.lock); list_replace_init(&squeue->serial.list, &local_list); spin_unlock(&squeue->serial.lock); |
16295bec6 padata: Generic p... |
349 |
|
07928d9bf padata: Remove br... |
350 |
cnt = 0; |
16295bec6 padata: Generic p... |
351 352 353 354 355 356 357 358 359 |
while (!list_empty(&local_list)) { struct padata_priv *padata; padata = list_entry(local_list.next, struct padata_priv, list); list_del_init(&padata->list); padata->serial(padata); |
07928d9bf padata: Remove br... |
360 |
cnt++; |
16295bec6 padata: Generic p... |
361 362 |
} local_bh_enable(); |
07928d9bf padata: Remove br... |
363 364 365 |
if (atomic_sub_and_test(cnt, &pd->refcnt)) padata_free_pd(pd); |
16295bec6 padata: Generic p... |
366 |
} |
0198ffd13 padata: Add some ... |
367 |
/** |
16295bec6 padata: Generic p... |
368 369 370 371 372 373 374 375 376 |
* padata_do_serial - padata serialization function * * @padata: object to be serialized. * * padata_do_serial must be called for every parallelized object. * The serialization callback function will run with BHs off. */ void padata_do_serial(struct padata_priv *padata) { |
065cf5771 padata: purge get... |
377 |
struct parallel_data *pd = padata->pd; |
4611ce224 padata: allocate ... |
378 |
int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); |
f601c725a padata: remove pa... |
379 |
struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); |
bfde23ce2 padata: unbind pa... |
380 |
struct padata_priv *cur; |
16295bec6 padata: Generic p... |
381 |
|
f601c725a padata: remove pa... |
382 |
spin_lock(&reorder->lock); |
bfde23ce2 padata: unbind pa... |
383 |
/* Sort in ascending order of sequence number. */ |
f601c725a padata: remove pa... |
384 |
list_for_each_entry_reverse(cur, &reorder->list, list) |
bfde23ce2 padata: unbind pa... |
385 386 387 |
if (cur->seq_nr < padata->seq_nr) break; list_add(&padata->list, &cur->list); |
f601c725a padata: remove pa... |
388 |
spin_unlock(&reorder->lock); |
16295bec6 padata: Generic p... |
389 |
|
cf144f81a padata: use smp_m... |
390 |
/* |
6fc4dbcf0 padata: Replace d... |
391 |
* Ensure the addition to the reorder list is ordered correctly |
cf144f81a padata: use smp_m... |
392 393 394 |
* with the trylock of pd->lock in padata_reorder. Pairs with smp_mb * in padata_reorder. */ |
e04ec0de6 padata: upgrade s... |
395 |
smp_mb(); |
cf144f81a padata: use smp_m... |
396 |
|
6fc4dbcf0 padata: Replace d... |
397 |
padata_reorder(pd); |
16295bec6 padata: Generic p... |
398 399 |
} EXPORT_SYMBOL(padata_do_serial); |
bbefa1dd6 crypto: pcrypt - ... |
400 |
static int padata_setup_cpumasks(struct padata_instance *pinst) |
16295bec6 padata: Generic p... |
401 |
{ |
bfde23ce2 padata: unbind pa... |
402 |
struct workqueue_attrs *attrs; |
bbefa1dd6 crypto: pcrypt - ... |
403 404 405 406 407 408 409 410 411 412 413 414 415 |
int err; attrs = alloc_workqueue_attrs(); if (!attrs) return -ENOMEM; /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); err = apply_workqueue_attrs(pinst->parallel_wq, attrs); free_workqueue_attrs(attrs); return err; } |
004ed4263 padata: add basic... |
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 |
static void __init padata_mt_helper(struct work_struct *w) { struct padata_work *pw = container_of(w, struct padata_work, pw_work); struct padata_mt_job_state *ps = pw->pw_data; struct padata_mt_job *job = ps->job; bool done; spin_lock(&ps->lock); while (job->size > 0) { unsigned long start, size, end; start = job->start; /* So end is chunk size aligned if enough work remains. */ size = roundup(start + 1, ps->chunk_size) - start; size = min(size, job->size); end = start + size; job->start = end; job->size -= size; spin_unlock(&ps->lock); job->thread_fn(start, end, job->fn_arg); spin_lock(&ps->lock); } ++ps->nworks_fini; done = (ps->nworks_fini == ps->nworks); spin_unlock(&ps->lock); if (done) complete(&ps->completion); } /** * padata_do_multithreaded - run a multithreaded job * @job: Description of the job. * * See the definition of struct padata_mt_job for more details. */ void __init padata_do_multithreaded(struct padata_mt_job *job) { /* In case threads finish at different times. */ static const unsigned long load_balance_factor = 4; struct padata_work my_work, *pw; struct padata_mt_job_state ps; LIST_HEAD(works); int nworks; if (job->size == 0) return; /* Ensure at least one thread when size < min_chunk. */ nworks = max(job->size / job->min_chunk, 1ul); nworks = min(nworks, job->max_threads); if (nworks == 1) { /* Single thread, no coordination needed, cut to the chase. */ job->thread_fn(job->start, job->start + job->size, job->fn_arg); return; } spin_lock_init(&ps.lock); init_completion(&ps.completion); ps.job = job; ps.nworks = padata_work_alloc_mt(nworks, &ps, &works); ps.nworks_fini = 0; /* * Chunk size is the amount of work a helper does per call to the * thread function. Load balance large jobs between threads by * increasing the number of chunks, guarantee at least the minimum * chunk size from the caller, and honor the caller's alignment. */ ps.chunk_size = job->size / (ps.nworks * load_balance_factor); ps.chunk_size = max(ps.chunk_size, job->min_chunk); ps.chunk_size = roundup(ps.chunk_size, job->align); list_for_each_entry(pw, &works, pw_list) queue_work(system_unbound_wq, &pw->pw_work); /* Use the current thread, which saves starting a workqueue worker. */ padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK); padata_mt_helper(&my_work.pw_work); /* Wait for all the helpers to finish. */ wait_for_completion(&ps.completion); destroy_work_on_stack(&my_work.pw_work); padata_works_free(&works); } |
e15bacbeb padata: Make two ... |
507 508 509 510 511 |
static void __padata_list_init(struct padata_list *pd_list) { INIT_LIST_HEAD(&pd_list->list); spin_lock_init(&pd_list->lock); } |
16295bec6 padata: Generic p... |
512 |
|
e15bacbeb padata: Make two ... |
513 514 515 516 517 |
/* Initialize all percpu queues used by serial workers */ static void padata_init_squeues(struct parallel_data *pd) { int cpu; struct padata_serial_queue *squeue; |
7b389b2cc padata: Initializ... |
518 |
|
e15bacbeb padata: Make two ... |
519 520 521 522 523 524 525 |
for_each_cpu(cpu, pd->cpumask.cbcpu) { squeue = per_cpu_ptr(pd->squeue, cpu); squeue->pd = pd; __padata_list_init(&squeue->serial); INIT_WORK(&squeue->work, padata_serial_worker); } } |
16295bec6 padata: Generic p... |
526 |
|
f601c725a padata: remove pa... |
527 528 |
/* Initialize per-CPU reorder lists */ static void padata_init_reorder_list(struct parallel_data *pd) |
e15bacbeb padata: Make two ... |
529 |
{ |
c51636a30 padata: remove cp... |
530 |
int cpu; |
f601c725a padata: remove pa... |
531 |
struct padata_list *list; |
16295bec6 padata: Generic p... |
532 |
|
c51636a30 padata: remove cp... |
533 |
for_each_cpu(cpu, pd->cpumask.pcpu) { |
f601c725a padata: remove pa... |
534 535 |
list = per_cpu_ptr(pd->reorder_list, cpu); __padata_list_init(list); |
16295bec6 padata: Generic p... |
536 |
} |
e15bacbeb padata: Make two ... |
537 |
} |
16295bec6 padata: Generic p... |
538 |
|
e15bacbeb padata: Make two ... |
539 |
/* Allocate and initialize the internal cpumask dependend resources. */ |
bbefa1dd6 crypto: pcrypt - ... |
540 |
static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) |
e15bacbeb padata: Make two ... |
541 |
{ |
bbefa1dd6 crypto: pcrypt - ... |
542 |
struct padata_instance *pinst = ps->pinst; |
e15bacbeb padata: Make two ... |
543 |
struct parallel_data *pd; |
16295bec6 padata: Generic p... |
544 |
|
e15bacbeb padata: Make two ... |
545 546 547 |
pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); if (!pd) goto err; |
16295bec6 padata: Generic p... |
548 |
|
f601c725a padata: remove pa... |
549 550 |
pd->reorder_list = alloc_percpu(struct padata_list); if (!pd->reorder_list) |
e15bacbeb padata: Make two ... |
551 552 553 554 |
goto err_free_pd; pd->squeue = alloc_percpu(struct padata_serial_queue); if (!pd->squeue) |
f601c725a padata: remove pa... |
555 |
goto err_free_reorder_list; |
bfde23ce2 padata: unbind pa... |
556 |
|
bbefa1dd6 crypto: pcrypt - ... |
557 |
pd->ps = ps; |
cec00e6e1 padata: inline si... |
558 559 |
if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) |
e15bacbeb padata: Make two ... |
560 |
goto err_free_squeue; |
cec00e6e1 padata: inline si... |
561 562 |
if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) goto err_free_pcpu; |
d69e037bc padata: remove ef... |
563 564 |
cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); |
16295bec6 padata: Generic p... |
565 |
|
f601c725a padata: remove pa... |
566 |
padata_init_reorder_list(pd); |
e15bacbeb padata: Make two ... |
567 |
padata_init_squeues(pd); |
4611ce224 padata: allocate ... |
568 |
pd->seq_nr = -1; |
07928d9bf padata: Remove br... |
569 |
atomic_set(&pd->refcnt, 1); |
16295bec6 padata: Generic p... |
570 |
spin_lock_init(&pd->lock); |
ec9c7d193 padata: initializ... |
571 |
pd->cpu = cpumask_first(pd->cpumask.pcpu); |
6fc4dbcf0 padata: Replace d... |
572 |
INIT_WORK(&pd->reorder_work, invoke_padata_reorder); |
16295bec6 padata: Generic p... |
573 574 |
return pd; |
cec00e6e1 padata: inline si... |
575 576 |
err_free_pcpu: free_cpumask_var(pd->cpumask.pcpu); |
e15bacbeb padata: Make two ... |
577 578 |
err_free_squeue: free_percpu(pd->squeue); |
f601c725a padata: remove pa... |
579 580 |
err_free_reorder_list: free_percpu(pd->reorder_list); |
16295bec6 padata: Generic p... |
581 582 583 584 585 586 587 588 |
err_free_pd: kfree(pd); err: return NULL; } static void padata_free_pd(struct parallel_data *pd) { |
e15bacbeb padata: Make two ... |
589 590 |
free_cpumask_var(pd->cpumask.pcpu); free_cpumask_var(pd->cpumask.cbcpu); |
f601c725a padata: remove pa... |
591 |
free_percpu(pd->reorder_list); |
e15bacbeb padata: Make two ... |
592 |
free_percpu(pd->squeue); |
16295bec6 padata: Generic p... |
593 594 |
kfree(pd); } |
4c8791702 padata: Check for... |
595 596 597 598 |
static void __padata_start(struct padata_instance *pinst) { pinst->flags |= PADATA_INIT; } |
ee8365551 padata: Block unt... |
599 600 601 602 603 604 605 606 |
static void __padata_stop(struct padata_instance *pinst) { if (!(pinst->flags & PADATA_INIT)) return; pinst->flags &= ~PADATA_INIT; synchronize_rcu(); |
ee8365551 padata: Block unt... |
607 |
} |
25985edce Fix common misspe... |
608 |
/* Replace the internal control structure with a new one. */ |
bbefa1dd6 crypto: pcrypt - ... |
609 |
static int padata_replace_one(struct padata_shell *ps) |
16295bec6 padata: Generic p... |
610 |
{ |
bbefa1dd6 crypto: pcrypt - ... |
611 |
struct parallel_data *pd_new; |
16295bec6 padata: Generic p... |
612 |
|
bbefa1dd6 crypto: pcrypt - ... |
613 614 615 |
pd_new = padata_alloc_pd(ps); if (!pd_new) return -ENOMEM; |
16295bec6 padata: Generic p... |
616 |
|
bbefa1dd6 crypto: pcrypt - ... |
617 618 |
ps->opd = rcu_dereference_protected(ps->pd, 1); rcu_assign_pointer(ps->pd, pd_new); |
16295bec6 padata: Generic p... |
619 |
|
bbefa1dd6 crypto: pcrypt - ... |
620 621 |
return 0; } |
894c9ef97 padata: validate ... |
622 |
static int padata_replace(struct padata_instance *pinst) |
bbefa1dd6 crypto: pcrypt - ... |
623 |
{ |
bbefa1dd6 crypto: pcrypt - ... |
624 |
struct padata_shell *ps; |
41ccdbfd5 padata: fix unini... |
625 |
int err = 0; |
bbefa1dd6 crypto: pcrypt - ... |
626 627 |
pinst->flags |= PADATA_RESET; |
16295bec6 padata: Generic p... |
628 |
|
bbefa1dd6 crypto: pcrypt - ... |
629 630 631 632 633 634 635 636 637 638 639 |
list_for_each_entry(ps, &pinst->pslist, list) { err = padata_replace_one(ps); if (err) break; } synchronize_rcu(); list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) if (atomic_dec_and_test(&ps->opd->refcnt)) padata_free_pd(ps->opd); |
16295bec6 padata: Generic p... |
640 641 |
pinst->flags &= ~PADATA_RESET; |
bbefa1dd6 crypto: pcrypt - ... |
642 643 |
return err; |
16295bec6 padata: Generic p... |
644 |
} |
33e544506 padata: Handle em... |
645 646 647 648 |
/* If cpumask contains no active cpu, we mark the instance as invalid. */ static bool padata_validate_cpumask(struct padata_instance *pinst, const struct cpumask *cpumask) { |
13614e0fb padata: Use the o... |
649 |
if (!cpumask_intersects(cpumask, cpu_online_mask)) { |
33e544506 padata: Handle em... |
650 651 652 653 654 655 656 |
pinst->flags |= PADATA_INVALID; return false; } pinst->flags &= ~PADATA_INVALID; return true; } |
65ff577e6 padata: Rearrange... |
657 658 659 660 661 |
static int __padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, cpumask_var_t cbcpumask) { int valid; |
bbefa1dd6 crypto: pcrypt - ... |
662 |
int err; |
65ff577e6 padata: Rearrange... |
663 664 665 666 667 668 669 670 671 672 673 674 |
valid = padata_validate_cpumask(pinst, pcpumask); if (!valid) { __padata_stop(pinst); goto out_replace; } valid = padata_validate_cpumask(pinst, cbcpumask); if (!valid) __padata_stop(pinst); out_replace: |
65ff577e6 padata: Rearrange... |
675 676 |
cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); |
894c9ef97 padata: validate ... |
677 |
err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst); |
65ff577e6 padata: Rearrange... |
678 679 680 |
if (valid) __padata_start(pinst); |
bbefa1dd6 crypto: pcrypt - ... |
681 |
return err; |
65ff577e6 padata: Rearrange... |
682 683 684 |
} /** |
bfcdcef8c padata: update do... |
685 686 |
* padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value * equivalent to @cpumask. |
16295bec6 padata: Generic p... |
687 |
* @pinst: padata instance |
e15bacbeb padata: Make two ... |
688 689 |
* @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding * to parallel and serial cpumasks respectively. |
16295bec6 padata: Generic p... |
690 |
* @cpumask: the cpumask to use |
bfcdcef8c padata: update do... |
691 692 |
* * Return: 0 on success or negative error code |
16295bec6 padata: Generic p... |
693 |
*/ |
e15bacbeb padata: Make two ... |
694 695 696 697 |
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask) { struct cpumask *serial_mask, *parallel_mask; |
65ff577e6 padata: Rearrange... |
698 |
int err = -EINVAL; |
6751fb3c0 padata: Use get_o... |
699 |
get_online_cpus(); |
38228e884 padata: always ac... |
700 |
mutex_lock(&pinst->lock); |
6751fb3c0 padata: Use get_o... |
701 |
|
e15bacbeb padata: Make two ... |
702 703 704 705 706 707 708 709 710 711 |
switch (cpumask_type) { case PADATA_CPU_PARALLEL: serial_mask = pinst->cpumask.cbcpu; parallel_mask = cpumask; break; case PADATA_CPU_SERIAL: parallel_mask = pinst->cpumask.pcpu; serial_mask = cpumask; break; default: |
65ff577e6 padata: Rearrange... |
712 |
goto out; |
16295bec6 padata: Generic p... |
713 |
} |
65ff577e6 padata: Rearrange... |
714 |
err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); |
16295bec6 padata: Generic p... |
715 716 717 |
out: mutex_unlock(&pinst->lock); |
38228e884 padata: always ac... |
718 |
put_online_cpus(); |
16295bec6 padata: Generic p... |
719 720 721 722 |
return err; } EXPORT_SYMBOL(padata_set_cpumask); |
19d795b67 kernel/padata.c: ... |
723 |
#ifdef CONFIG_HOTPLUG_CPU |
16295bec6 padata: Generic p... |
724 725 |
static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { |
bbefa1dd6 crypto: pcrypt - ... |
726 |
int err = 0; |
16295bec6 padata: Generic p... |
727 |
|
13614e0fb padata: Use the o... |
728 |
if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
894c9ef97 padata: validate ... |
729 |
err = padata_replace(pinst); |
33e544506 padata: Handle em... |
730 |
|
e15bacbeb padata: Make two ... |
731 732 |
if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) |
33e544506 padata: Handle em... |
733 |
__padata_start(pinst); |
16295bec6 padata: Generic p... |
734 |
} |
bbefa1dd6 crypto: pcrypt - ... |
735 |
return err; |
16295bec6 padata: Generic p... |
736 |
} |
16295bec6 padata: Generic p... |
737 738 |
static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) { |
bbefa1dd6 crypto: pcrypt - ... |
739 |
int err = 0; |
16295bec6 padata: Generic p... |
740 |
|
894c9ef97 padata: validate ... |
741 |
if (!cpumask_test_cpu(cpu, cpu_online_mask)) { |
e15bacbeb padata: Make two ... |
742 |
if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || |
b89661dff padata: Allocate ... |
743 |
!padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) |
33e544506 padata: Handle em... |
744 |
__padata_stop(pinst); |
33e544506 padata: Handle em... |
745 |
|
894c9ef97 padata: validate ... |
746 |
err = padata_replace(pinst); |
16295bec6 padata: Generic p... |
747 |
} |
bbefa1dd6 crypto: pcrypt - ... |
748 |
return err; |
16295bec6 padata: Generic p... |
749 |
} |
e15bacbeb padata: Make two ... |
750 751 752 753 754 |
static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) { return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); } |
30e92153b padata: Convert t... |
755 |
static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) |
16295bec6 padata: Generic p... |
756 |
{ |
16295bec6 padata: Generic p... |
757 |
struct padata_instance *pinst; |
30e92153b padata: Convert t... |
758 |
int ret; |
16295bec6 padata: Generic p... |
759 |
|
3c2214b60 padata: add separ... |
760 |
pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); |
30e92153b padata: Convert t... |
761 762 |
if (!pinst_has_cpu(pinst, cpu)) return 0; |
16295bec6 padata: Generic p... |
763 |
|
30e92153b padata: Convert t... |
764 765 766 767 768 |
mutex_lock(&pinst->lock); ret = __padata_add_cpu(pinst, cpu); mutex_unlock(&pinst->lock); return ret; } |
16295bec6 padata: Generic p... |
769 |
|
894c9ef97 padata: validate ... |
770 |
static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) |
30e92153b padata: Convert t... |
771 772 773 |
{ struct padata_instance *pinst; int ret; |
3c2214b60 padata: add separ... |
774 |
pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); |
30e92153b padata: Convert t... |
775 776 |
if (!pinst_has_cpu(pinst, cpu)) return 0; |
16295bec6 padata: Generic p... |
777 |
|
30e92153b padata: Convert t... |
778 779 780 781 |
mutex_lock(&pinst->lock); ret = __padata_remove_cpu(pinst, cpu); mutex_unlock(&pinst->lock); return ret; |
16295bec6 padata: Generic p... |
782 |
} |
30e92153b padata: Convert t... |
783 784 |
static enum cpuhp_state hp_online; |
e2cb2f1c2 padata: cpu hotpl... |
785 |
#endif |
16295bec6 padata: Generic p... |
786 |
|
5e017dc3f padata: Added sys... |
787 788 789 |
static void __padata_free(struct padata_instance *pinst) { #ifdef CONFIG_HOTPLUG_CPU |
3c2214b60 padata: add separ... |
790 791 792 |
cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->cpu_dead_node); cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); |
5e017dc3f padata: Added sys... |
793 |
#endif |
bbefa1dd6 crypto: pcrypt - ... |
794 |
WARN_ON(!list_empty(&pinst->pslist)); |
5e017dc3f padata: Added sys... |
795 796 |
free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); |
45d153c08 padata: use separ... |
797 798 |
destroy_workqueue(pinst->serial_wq); destroy_workqueue(pinst->parallel_wq); |
5e017dc3f padata: Added sys... |
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 |
kfree(pinst); } #define kobj2pinst(_kobj) \ container_of(_kobj, struct padata_instance, kobj) #define attr2pentry(_attr) \ container_of(_attr, struct padata_sysfs_entry, attr) static void padata_sysfs_release(struct kobject *kobj) { struct padata_instance *pinst = kobj2pinst(kobj); __padata_free(pinst); } struct padata_sysfs_entry { struct attribute attr; ssize_t (*show)(struct padata_instance *, struct attribute *, char *); ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t); }; static ssize_t show_cpumask(struct padata_instance *pinst, struct attribute *attr, char *buf) { struct cpumask *cpumask; ssize_t len; mutex_lock(&pinst->lock); if (!strcmp(attr->name, "serial_cpumask")) cpumask = pinst->cpumask.cbcpu; else cpumask = pinst->cpumask.pcpu; |
4497da6f9 padata: use %*pb[... |
831 832 833 |
len = snprintf(buf, PAGE_SIZE, "%*pb ", nr_cpu_ids, cpumask_bits(cpumask)); |
5e017dc3f padata: Added sys... |
834 |
mutex_unlock(&pinst->lock); |
4497da6f9 padata: use %*pb[... |
835 |
return len < PAGE_SIZE ? len : -EINVAL; |
5e017dc3f padata: Added sys... |
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 |
} static ssize_t store_cpumask(struct padata_instance *pinst, struct attribute *attr, const char *buf, size_t count) { cpumask_var_t new_cpumask; ssize_t ret; int mask_type; if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) return -ENOMEM; ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), nr_cpumask_bits); if (ret < 0) goto out; mask_type = !strcmp(attr->name, "serial_cpumask") ? PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; ret = padata_set_cpumask(pinst, mask_type, new_cpumask); if (!ret) ret = count; out: free_cpumask_var(new_cpumask); return ret; } #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ static struct padata_sysfs_entry _name##_attr = \ __ATTR(_name, 0644, _show_name, _store_name) #define PADATA_ATTR_RO(_name, _show_name) \ static struct padata_sysfs_entry _name##_attr = \ __ATTR(_name, 0400, _show_name, NULL) PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); /* * Padata sysfs provides the following objects: * serial_cpumask [RW] - cpumask for serial workers * parallel_cpumask [RW] - cpumask for parallel workers */ static struct attribute *padata_default_attrs[] = { &serial_cpumask_attr.attr, ¶llel_cpumask_attr.attr, NULL, }; |
2064fbc77 padata: Replace p... |
885 |
ATTRIBUTE_GROUPS(padata_default); |
5e017dc3f padata: Added sys... |
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 |
static ssize_t padata_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct padata_instance *pinst; struct padata_sysfs_entry *pentry; ssize_t ret = -EIO; pinst = kobj2pinst(kobj); pentry = attr2pentry(attr); if (pentry->show) ret = pentry->show(pinst, attr, buf); return ret; } static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct padata_instance *pinst; struct padata_sysfs_entry *pentry; ssize_t ret = -EIO; pinst = kobj2pinst(kobj); pentry = attr2pentry(attr); if (pentry->show) ret = pentry->store(pinst, attr, buf, count); return ret; } static const struct sysfs_ops padata_sysfs_ops = { .show = padata_sysfs_show, .store = padata_sysfs_store, }; static struct kobj_type padata_attr_type = { .sysfs_ops = &padata_sysfs_ops, |
2064fbc77 padata: Replace p... |
924 |
.default_groups = padata_default_groups, |
5e017dc3f padata: Added sys... |
925 926 |
.release = padata_sysfs_release, }; |
0198ffd13 padata: Add some ... |
927 |
/** |
3f257191d padata: fold pada... |
928 |
* padata_alloc - allocate and initialize a padata instance |
b128a3040 padata: allocate ... |
929 |
* @name: used to identify the instance |
bfcdcef8c padata: update do... |
930 931 |
* * Return: new instance on success, NULL on error |
16295bec6 padata: Generic p... |
932 |
*/ |
3f257191d padata: fold pada... |
933 |
struct padata_instance *padata_alloc(const char *name) |
16295bec6 padata: Generic p... |
934 |
{ |
16295bec6 padata: Generic p... |
935 |
struct padata_instance *pinst; |
16295bec6 padata: Generic p... |
936 937 938 939 |
pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); if (!pinst) goto err; |
bfde23ce2 padata: unbind pa... |
940 941 |
pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0, name); |
45d153c08 padata: use separ... |
942 |
if (!pinst->parallel_wq) |
16295bec6 padata: Generic p... |
943 |
goto err_free_inst; |
b128a3040 padata: allocate ... |
944 |
|
cc491d8e6 padata, pcrypt: t... |
945 |
get_online_cpus(); |
45d153c08 padata: use separ... |
946 947 948 |
pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name); if (!pinst->serial_wq) |
cc491d8e6 padata, pcrypt: t... |
949 |
goto err_put_cpus; |
45d153c08 padata: use separ... |
950 951 952 |
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) goto err_free_serial_wq; |
e15bacbeb padata: Make two ... |
953 954 |
if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { free_cpumask_var(pinst->cpumask.pcpu); |
45d153c08 padata: use separ... |
955 |
goto err_free_serial_wq; |
33e544506 padata: Handle em... |
956 |
} |
16295bec6 padata: Generic p... |
957 |
|
bbefa1dd6 crypto: pcrypt - ... |
958 |
INIT_LIST_HEAD(&pinst->pslist); |
16295bec6 padata: Generic p... |
959 |
|
3f257191d padata: fold pada... |
960 961 |
cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask); cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); |
bbefa1dd6 crypto: pcrypt - ... |
962 963 |
if (padata_setup_cpumasks(pinst)) |
d69e037bc padata: remove ef... |
964 |
goto err_free_masks; |
16295bec6 padata: Generic p... |
965 |
|
bd25b4886 padata: remove st... |
966 |
__padata_start(pinst); |
16295bec6 padata: Generic p... |
967 |
|
5e017dc3f padata: Added sys... |
968 |
kobject_init(&pinst->kobj, &padata_attr_type); |
16295bec6 padata: Generic p... |
969 |
mutex_init(&pinst->lock); |
b8b4a4166 padata - Register... |
970 |
#ifdef CONFIG_HOTPLUG_CPU |
3c2214b60 padata: add separ... |
971 972 |
cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->cpu_online_node); |
894c9ef97 padata: validate ... |
973 |
cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, |
3c2214b60 padata: add separ... |
974 |
&pinst->cpu_dead_node); |
b8b4a4166 padata - Register... |
975 |
#endif |
cc491d8e6 padata, pcrypt: t... |
976 977 |
put_online_cpus(); |
16295bec6 padata: Generic p... |
978 |
return pinst; |
e15bacbeb padata: Make two ... |
979 980 981 |
err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); |
45d153c08 padata: use separ... |
982 983 |
err_free_serial_wq: destroy_workqueue(pinst->serial_wq); |
cc491d8e6 padata, pcrypt: t... |
984 985 |
err_put_cpus: put_online_cpus(); |
45d153c08 padata: use separ... |
986 |
destroy_workqueue(pinst->parallel_wq); |
16295bec6 padata: Generic p... |
987 988 989 990 991 |
err_free_inst: kfree(pinst); err: return NULL; } |
3f257191d padata: fold pada... |
992 |
EXPORT_SYMBOL(padata_alloc); |
9596695ee padata: Make pada... |
993 994 |
/** |
16295bec6 padata: Generic p... |
995 996 |
* padata_free - free a padata instance * |
bfcdcef8c padata: update do... |
997 |
* @pinst: padata instance to free |
16295bec6 padata: Generic p... |
998 999 1000 |
*/ void padata_free(struct padata_instance *pinst) { |
5e017dc3f padata: Added sys... |
1001 |
kobject_put(&pinst->kobj); |
16295bec6 padata: Generic p... |
1002 1003 |
} EXPORT_SYMBOL(padata_free); |
30e92153b padata: Convert t... |
1004 |
|
bbefa1dd6 crypto: pcrypt - ... |
1005 1006 1007 1008 |
/** * padata_alloc_shell - Allocate and initialize padata shell. * * @pinst: Parent padata_instance object. |
bfcdcef8c padata: update do... |
1009 1010 |
* * Return: new shell on success, NULL on error |
bbefa1dd6 crypto: pcrypt - ... |
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 |
*/ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) { struct parallel_data *pd; struct padata_shell *ps; ps = kzalloc(sizeof(*ps), GFP_KERNEL); if (!ps) goto out; ps->pinst = pinst; get_online_cpus(); pd = padata_alloc_pd(ps); put_online_cpus(); if (!pd) goto out_free_ps; mutex_lock(&pinst->lock); RCU_INIT_POINTER(ps->pd, pd); list_add(&ps->list, &pinst->pslist); mutex_unlock(&pinst->lock); return ps; out_free_ps: kfree(ps); out: return NULL; } EXPORT_SYMBOL(padata_alloc_shell); /** * padata_free_shell - free a padata shell * * @ps: padata shell to free */ void padata_free_shell(struct padata_shell *ps) { |
07b24c7c0 crypto: pcrypt - ... |
1051 1052 |
if (!ps) return; |
bbefa1dd6 crypto: pcrypt - ... |
1053 |
|
07b24c7c0 crypto: pcrypt - ... |
1054 |
mutex_lock(&ps->pinst->lock); |
bbefa1dd6 crypto: pcrypt - ... |
1055 1056 |
list_del(&ps->list); padata_free_pd(rcu_dereference_protected(ps->pd, 1)); |
07b24c7c0 crypto: pcrypt - ... |
1057 |
mutex_unlock(&ps->pinst->lock); |
bbefa1dd6 crypto: pcrypt - ... |
1058 1059 1060 1061 |
kfree(ps); } EXPORT_SYMBOL(padata_free_shell); |
f1b192b11 padata: initializ... |
1062 |
void __init padata_init(void) |
30e92153b padata: Convert t... |
1063 |
{ |
4611ce224 padata: allocate ... |
1064 |
unsigned int i, possible_cpus; |
f1b192b11 padata: initializ... |
1065 |
#ifdef CONFIG_HOTPLUG_CPU |
30e92153b padata: Convert t... |
1066 1067 1068 |
int ret; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", |
894c9ef97 padata: validate ... |
1069 |
padata_cpu_online, NULL); |
30e92153b padata: Convert t... |
1070 |
if (ret < 0) |
f1b192b11 padata: initializ... |
1071 |
goto err; |
30e92153b padata: Convert t... |
1072 |
hp_online = ret; |
894c9ef97 padata: validate ... |
1073 1074 1075 |
ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead", NULL, padata_cpu_dead); |
4611ce224 padata: allocate ... |
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 |
if (ret < 0) goto remove_online_state; #endif possible_cpus = num_possible_cpus(); padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work), GFP_KERNEL); if (!padata_works) goto remove_dead_state; for (i = 0; i < possible_cpus; ++i) list_add(&padata_works[i].pw_list, &padata_free_works); |
30e92153b padata: Convert t... |
1088 |
|
f1b192b11 padata: initializ... |
1089 |
return; |
4611ce224 padata: allocate ... |
1090 1091 1092 1093 1094 1095 |
remove_dead_state: #ifdef CONFIG_HOTPLUG_CPU cpuhp_remove_multi_state(CPUHP_PADATA_DEAD); remove_online_state: cpuhp_remove_multi_state(hp_online); |
f1b192b11 padata: initializ... |
1096 |
err: |
30e92153b padata: Convert t... |
1097 |
#endif |
4611ce224 padata: allocate ... |
1098 1099 |
pr_warn("padata: initialization failed "); |
f1b192b11 padata: initializ... |
1100 |
} |