Commit d3f64e46aa21dd86a239274d218ec286461bfa68
Committed by
Herbert Xu
1 parent
c57e842eff
Exists in
master
and in
7 other branches
crypto: pcrypt - Update pcrypt cpumask according to the padata cpumask notifier
The padata cpumask change notifier passes a padata_cpumask to the notifier chain. So we use this cpumask instead of asking padata for the cpumask. Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Showing 1 changed file with 11 additions and 2 deletions Inline Diff
crypto/pcrypt.c
1 | /* | 1 | /* |
2 | * pcrypt - Parallel crypto wrapper. | 2 | * pcrypt - Parallel crypto wrapper. |
3 | * | 3 | * |
4 | * Copyright (C) 2009 secunet Security Networks AG | 4 | * Copyright (C) 2009 secunet Security Networks AG |
5 | * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> | 5 | * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms and conditions of the GNU General Public License, | 8 | * under the terms and conditions of the GNU General Public License, |
9 | * version 2, as published by the Free Software Foundation. | 9 | * version 2, as published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | 11 | * This program is distributed in the hope it will be useful, but WITHOUT |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
14 | * more details. | 14 | * more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License along with | 16 | * You should have received a copy of the GNU General Public License along with |
17 | * this program; if not, write to the Free Software Foundation, Inc., | 17 | * this program; if not, write to the Free Software Foundation, Inc., |
18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <crypto/algapi.h> | 21 | #include <crypto/algapi.h> |
22 | #include <crypto/internal/aead.h> | 22 | #include <crypto/internal/aead.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/notifier.h> | 27 | #include <linux/notifier.h> |
28 | #include <linux/kobject.h> | 28 | #include <linux/kobject.h> |
29 | #include <linux/cpu.h> | ||
29 | #include <crypto/pcrypt.h> | 30 | #include <crypto/pcrypt.h> |
30 | 31 | ||
31 | struct padata_pcrypt { | 32 | struct padata_pcrypt { |
32 | struct padata_instance *pinst; | 33 | struct padata_instance *pinst; |
33 | struct workqueue_struct *wq; | 34 | struct workqueue_struct *wq; |
34 | 35 | ||
35 | /* | 36 | /* |
36 | * Cpumask for callback CPUs. It should be | 37 | * Cpumask for callback CPUs. It should be |
37 | * equal to serial cpumask of corresponding padata instance, | 38 | * equal to serial cpumask of corresponding padata instance, |
38 | * so it is updated when padata notifies us about serial | 39 | * so it is updated when padata notifies us about serial |
39 | * cpumask change. | 40 | * cpumask change. |
40 | * | 41 | * |
41 | * cb_cpumask is protected by RCU. This fact prevents us from | 42 | * cb_cpumask is protected by RCU. This fact prevents us from |
42 | * using cpumask_var_t directly because the actual type of | 43 | * using cpumask_var_t directly because the actual type of |
43 | * cpumsak_var_t depends on kernel configuration(particularly on | 44 | * cpumsak_var_t depends on kernel configuration(particularly on |
44 | * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration | 45 | * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration |
45 | * cpumask_var_t may be either a pointer to the struct cpumask | 46 | * cpumask_var_t may be either a pointer to the struct cpumask |
46 | * or a variable allocated on the stack. Thus we can not safely use | 47 | * or a variable allocated on the stack. Thus we can not safely use |
47 | * cpumask_var_t with RCU operations such as rcu_assign_pointer or | 48 | * cpumask_var_t with RCU operations such as rcu_assign_pointer or |
48 | * rcu_dereference. So cpumask_var_t is wrapped with struct | 49 | * rcu_dereference. So cpumask_var_t is wrapped with struct |
49 | * pcrypt_cpumask which makes possible to use it with RCU. | 50 | * pcrypt_cpumask which makes possible to use it with RCU. |
50 | */ | 51 | */ |
51 | struct pcrypt_cpumask { | 52 | struct pcrypt_cpumask { |
52 | cpumask_var_t mask; | 53 | cpumask_var_t mask; |
53 | } *cb_cpumask; | 54 | } *cb_cpumask; |
54 | struct notifier_block nblock; | 55 | struct notifier_block nblock; |
55 | }; | 56 | }; |
56 | 57 | ||
57 | static struct padata_pcrypt pencrypt; | 58 | static struct padata_pcrypt pencrypt; |
58 | static struct padata_pcrypt pdecrypt; | 59 | static struct padata_pcrypt pdecrypt; |
59 | static struct kset *pcrypt_kset; | 60 | static struct kset *pcrypt_kset; |
60 | 61 | ||
61 | struct pcrypt_instance_ctx { | 62 | struct pcrypt_instance_ctx { |
62 | struct crypto_spawn spawn; | 63 | struct crypto_spawn spawn; |
63 | unsigned int tfm_count; | 64 | unsigned int tfm_count; |
64 | }; | 65 | }; |
65 | 66 | ||
66 | struct pcrypt_aead_ctx { | 67 | struct pcrypt_aead_ctx { |
67 | struct crypto_aead *child; | 68 | struct crypto_aead *child; |
68 | unsigned int cb_cpu; | 69 | unsigned int cb_cpu; |
69 | }; | 70 | }; |
70 | 71 | ||
71 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, | 72 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, |
72 | struct padata_pcrypt *pcrypt) | 73 | struct padata_pcrypt *pcrypt) |
73 | { | 74 | { |
74 | unsigned int cpu_index, cpu, i; | 75 | unsigned int cpu_index, cpu, i; |
75 | struct pcrypt_cpumask *cpumask; | 76 | struct pcrypt_cpumask *cpumask; |
76 | 77 | ||
77 | cpu = *cb_cpu; | 78 | cpu = *cb_cpu; |
78 | 79 | ||
79 | rcu_read_lock_bh(); | 80 | rcu_read_lock_bh(); |
80 | cpumask = rcu_dereference(pcrypt->cb_cpumask); | 81 | cpumask = rcu_dereference(pcrypt->cb_cpumask); |
81 | if (cpumask_test_cpu(cpu, cpumask->mask)) | 82 | if (cpumask_test_cpu(cpu, cpumask->mask)) |
82 | goto out; | 83 | goto out; |
83 | 84 | ||
84 | if (!cpumask_weight(cpumask->mask)) | 85 | if (!cpumask_weight(cpumask->mask)) |
85 | goto out; | 86 | goto out; |
86 | 87 | ||
87 | cpu_index = cpu % cpumask_weight(cpumask->mask); | 88 | cpu_index = cpu % cpumask_weight(cpumask->mask); |
88 | 89 | ||
89 | cpu = cpumask_first(cpumask->mask); | 90 | cpu = cpumask_first(cpumask->mask); |
90 | for (i = 0; i < cpu_index; i++) | 91 | for (i = 0; i < cpu_index; i++) |
91 | cpu = cpumask_next(cpu, cpumask->mask); | 92 | cpu = cpumask_next(cpu, cpumask->mask); |
92 | 93 | ||
93 | *cb_cpu = cpu; | 94 | *cb_cpu = cpu; |
94 | 95 | ||
95 | out: | 96 | out: |
96 | rcu_read_unlock_bh(); | 97 | rcu_read_unlock_bh(); |
97 | return padata_do_parallel(pcrypt->pinst, padata, cpu); | 98 | return padata_do_parallel(pcrypt->pinst, padata, cpu); |
98 | } | 99 | } |
99 | 100 | ||
100 | static int pcrypt_aead_setkey(struct crypto_aead *parent, | 101 | static int pcrypt_aead_setkey(struct crypto_aead *parent, |
101 | const u8 *key, unsigned int keylen) | 102 | const u8 *key, unsigned int keylen) |
102 | { | 103 | { |
103 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); | 104 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); |
104 | 105 | ||
105 | return crypto_aead_setkey(ctx->child, key, keylen); | 106 | return crypto_aead_setkey(ctx->child, key, keylen); |
106 | } | 107 | } |
107 | 108 | ||
108 | static int pcrypt_aead_setauthsize(struct crypto_aead *parent, | 109 | static int pcrypt_aead_setauthsize(struct crypto_aead *parent, |
109 | unsigned int authsize) | 110 | unsigned int authsize) |
110 | { | 111 | { |
111 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); | 112 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); |
112 | 113 | ||
113 | return crypto_aead_setauthsize(ctx->child, authsize); | 114 | return crypto_aead_setauthsize(ctx->child, authsize); |
114 | } | 115 | } |
115 | 116 | ||
116 | static void pcrypt_aead_serial(struct padata_priv *padata) | 117 | static void pcrypt_aead_serial(struct padata_priv *padata) |
117 | { | 118 | { |
118 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 119 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
119 | struct aead_request *req = pcrypt_request_ctx(preq); | 120 | struct aead_request *req = pcrypt_request_ctx(preq); |
120 | 121 | ||
121 | aead_request_complete(req->base.data, padata->info); | 122 | aead_request_complete(req->base.data, padata->info); |
122 | } | 123 | } |
123 | 124 | ||
124 | static void pcrypt_aead_giv_serial(struct padata_priv *padata) | 125 | static void pcrypt_aead_giv_serial(struct padata_priv *padata) |
125 | { | 126 | { |
126 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 127 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
127 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | 128 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); |
128 | 129 | ||
129 | aead_request_complete(req->areq.base.data, padata->info); | 130 | aead_request_complete(req->areq.base.data, padata->info); |
130 | } | 131 | } |
131 | 132 | ||
132 | static void pcrypt_aead_done(struct crypto_async_request *areq, int err) | 133 | static void pcrypt_aead_done(struct crypto_async_request *areq, int err) |
133 | { | 134 | { |
134 | struct aead_request *req = areq->data; | 135 | struct aead_request *req = areq->data; |
135 | struct pcrypt_request *preq = aead_request_ctx(req); | 136 | struct pcrypt_request *preq = aead_request_ctx(req); |
136 | struct padata_priv *padata = pcrypt_request_padata(preq); | 137 | struct padata_priv *padata = pcrypt_request_padata(preq); |
137 | 138 | ||
138 | padata->info = err; | 139 | padata->info = err; |
139 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 140 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
140 | 141 | ||
141 | padata_do_serial(padata); | 142 | padata_do_serial(padata); |
142 | } | 143 | } |
143 | 144 | ||
144 | static void pcrypt_aead_enc(struct padata_priv *padata) | 145 | static void pcrypt_aead_enc(struct padata_priv *padata) |
145 | { | 146 | { |
146 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 147 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
147 | struct aead_request *req = pcrypt_request_ctx(preq); | 148 | struct aead_request *req = pcrypt_request_ctx(preq); |
148 | 149 | ||
149 | padata->info = crypto_aead_encrypt(req); | 150 | padata->info = crypto_aead_encrypt(req); |
150 | 151 | ||
151 | if (padata->info == -EINPROGRESS) | 152 | if (padata->info == -EINPROGRESS) |
152 | return; | 153 | return; |
153 | 154 | ||
154 | padata_do_serial(padata); | 155 | padata_do_serial(padata); |
155 | } | 156 | } |
156 | 157 | ||
157 | static int pcrypt_aead_encrypt(struct aead_request *req) | 158 | static int pcrypt_aead_encrypt(struct aead_request *req) |
158 | { | 159 | { |
159 | int err; | 160 | int err; |
160 | struct pcrypt_request *preq = aead_request_ctx(req); | 161 | struct pcrypt_request *preq = aead_request_ctx(req); |
161 | struct aead_request *creq = pcrypt_request_ctx(preq); | 162 | struct aead_request *creq = pcrypt_request_ctx(preq); |
162 | struct padata_priv *padata = pcrypt_request_padata(preq); | 163 | struct padata_priv *padata = pcrypt_request_padata(preq); |
163 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 164 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
164 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | 165 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); |
165 | u32 flags = aead_request_flags(req); | 166 | u32 flags = aead_request_flags(req); |
166 | 167 | ||
167 | memset(padata, 0, sizeof(struct padata_priv)); | 168 | memset(padata, 0, sizeof(struct padata_priv)); |
168 | 169 | ||
169 | padata->parallel = pcrypt_aead_enc; | 170 | padata->parallel = pcrypt_aead_enc; |
170 | padata->serial = pcrypt_aead_serial; | 171 | padata->serial = pcrypt_aead_serial; |
171 | 172 | ||
172 | aead_request_set_tfm(creq, ctx->child); | 173 | aead_request_set_tfm(creq, ctx->child); |
173 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | 174 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
174 | pcrypt_aead_done, req); | 175 | pcrypt_aead_done, req); |
175 | aead_request_set_crypt(creq, req->src, req->dst, | 176 | aead_request_set_crypt(creq, req->src, req->dst, |
176 | req->cryptlen, req->iv); | 177 | req->cryptlen, req->iv); |
177 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 178 | aead_request_set_assoc(creq, req->assoc, req->assoclen); |
178 | 179 | ||
179 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); | 180 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
180 | if (!err) | 181 | if (!err) |
181 | return -EINPROGRESS; | 182 | return -EINPROGRESS; |
182 | 183 | ||
183 | return err; | 184 | return err; |
184 | } | 185 | } |
185 | 186 | ||
186 | static void pcrypt_aead_dec(struct padata_priv *padata) | 187 | static void pcrypt_aead_dec(struct padata_priv *padata) |
187 | { | 188 | { |
188 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 189 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
189 | struct aead_request *req = pcrypt_request_ctx(preq); | 190 | struct aead_request *req = pcrypt_request_ctx(preq); |
190 | 191 | ||
191 | padata->info = crypto_aead_decrypt(req); | 192 | padata->info = crypto_aead_decrypt(req); |
192 | 193 | ||
193 | if (padata->info == -EINPROGRESS) | 194 | if (padata->info == -EINPROGRESS) |
194 | return; | 195 | return; |
195 | 196 | ||
196 | padata_do_serial(padata); | 197 | padata_do_serial(padata); |
197 | } | 198 | } |
198 | 199 | ||
199 | static int pcrypt_aead_decrypt(struct aead_request *req) | 200 | static int pcrypt_aead_decrypt(struct aead_request *req) |
200 | { | 201 | { |
201 | int err; | 202 | int err; |
202 | struct pcrypt_request *preq = aead_request_ctx(req); | 203 | struct pcrypt_request *preq = aead_request_ctx(req); |
203 | struct aead_request *creq = pcrypt_request_ctx(preq); | 204 | struct aead_request *creq = pcrypt_request_ctx(preq); |
204 | struct padata_priv *padata = pcrypt_request_padata(preq); | 205 | struct padata_priv *padata = pcrypt_request_padata(preq); |
205 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 206 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
206 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | 207 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); |
207 | u32 flags = aead_request_flags(req); | 208 | u32 flags = aead_request_flags(req); |
208 | 209 | ||
209 | memset(padata, 0, sizeof(struct padata_priv)); | 210 | memset(padata, 0, sizeof(struct padata_priv)); |
210 | 211 | ||
211 | padata->parallel = pcrypt_aead_dec; | 212 | padata->parallel = pcrypt_aead_dec; |
212 | padata->serial = pcrypt_aead_serial; | 213 | padata->serial = pcrypt_aead_serial; |
213 | 214 | ||
214 | aead_request_set_tfm(creq, ctx->child); | 215 | aead_request_set_tfm(creq, ctx->child); |
215 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | 216 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
216 | pcrypt_aead_done, req); | 217 | pcrypt_aead_done, req); |
217 | aead_request_set_crypt(creq, req->src, req->dst, | 218 | aead_request_set_crypt(creq, req->src, req->dst, |
218 | req->cryptlen, req->iv); | 219 | req->cryptlen, req->iv); |
219 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 220 | aead_request_set_assoc(creq, req->assoc, req->assoclen); |
220 | 221 | ||
221 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); | 222 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); |
222 | if (!err) | 223 | if (!err) |
223 | return -EINPROGRESS; | 224 | return -EINPROGRESS; |
224 | 225 | ||
225 | return err; | 226 | return err; |
226 | } | 227 | } |
227 | 228 | ||
228 | static void pcrypt_aead_givenc(struct padata_priv *padata) | 229 | static void pcrypt_aead_givenc(struct padata_priv *padata) |
229 | { | 230 | { |
230 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 231 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
231 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | 232 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); |
232 | 233 | ||
233 | padata->info = crypto_aead_givencrypt(req); | 234 | padata->info = crypto_aead_givencrypt(req); |
234 | 235 | ||
235 | if (padata->info == -EINPROGRESS) | 236 | if (padata->info == -EINPROGRESS) |
236 | return; | 237 | return; |
237 | 238 | ||
238 | padata_do_serial(padata); | 239 | padata_do_serial(padata); |
239 | } | 240 | } |
240 | 241 | ||
241 | static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) | 242 | static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) |
242 | { | 243 | { |
243 | int err; | 244 | int err; |
244 | struct aead_request *areq = &req->areq; | 245 | struct aead_request *areq = &req->areq; |
245 | struct pcrypt_request *preq = aead_request_ctx(areq); | 246 | struct pcrypt_request *preq = aead_request_ctx(areq); |
246 | struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); | 247 | struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); |
247 | struct padata_priv *padata = pcrypt_request_padata(preq); | 248 | struct padata_priv *padata = pcrypt_request_padata(preq); |
248 | struct crypto_aead *aead = aead_givcrypt_reqtfm(req); | 249 | struct crypto_aead *aead = aead_givcrypt_reqtfm(req); |
249 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | 250 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); |
250 | u32 flags = aead_request_flags(areq); | 251 | u32 flags = aead_request_flags(areq); |
251 | 252 | ||
252 | memset(padata, 0, sizeof(struct padata_priv)); | 253 | memset(padata, 0, sizeof(struct padata_priv)); |
253 | 254 | ||
254 | padata->parallel = pcrypt_aead_givenc; | 255 | padata->parallel = pcrypt_aead_givenc; |
255 | padata->serial = pcrypt_aead_giv_serial; | 256 | padata->serial = pcrypt_aead_giv_serial; |
256 | 257 | ||
257 | aead_givcrypt_set_tfm(creq, ctx->child); | 258 | aead_givcrypt_set_tfm(creq, ctx->child); |
258 | aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | 259 | aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
259 | pcrypt_aead_done, areq); | 260 | pcrypt_aead_done, areq); |
260 | aead_givcrypt_set_crypt(creq, areq->src, areq->dst, | 261 | aead_givcrypt_set_crypt(creq, areq->src, areq->dst, |
261 | areq->cryptlen, areq->iv); | 262 | areq->cryptlen, areq->iv); |
262 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); | 263 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); |
263 | aead_givcrypt_set_giv(creq, req->giv, req->seq); | 264 | aead_givcrypt_set_giv(creq, req->giv, req->seq); |
264 | 265 | ||
265 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); | 266 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
266 | if (!err) | 267 | if (!err) |
267 | return -EINPROGRESS; | 268 | return -EINPROGRESS; |
268 | 269 | ||
269 | return err; | 270 | return err; |
270 | } | 271 | } |
271 | 272 | ||
272 | static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) | 273 | static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) |
273 | { | 274 | { |
274 | int cpu, cpu_index; | 275 | int cpu, cpu_index; |
275 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 276 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
276 | struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); | 277 | struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); |
277 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 278 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
278 | struct crypto_aead *cipher; | 279 | struct crypto_aead *cipher; |
279 | 280 | ||
280 | ictx->tfm_count++; | 281 | ictx->tfm_count++; |
281 | 282 | ||
282 | cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask); | 283 | cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask); |
283 | 284 | ||
284 | ctx->cb_cpu = cpumask_first(cpu_active_mask); | 285 | ctx->cb_cpu = cpumask_first(cpu_active_mask); |
285 | for (cpu = 0; cpu < cpu_index; cpu++) | 286 | for (cpu = 0; cpu < cpu_index; cpu++) |
286 | ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask); | 287 | ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask); |
287 | 288 | ||
288 | cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); | 289 | cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); |
289 | 290 | ||
290 | if (IS_ERR(cipher)) | 291 | if (IS_ERR(cipher)) |
291 | return PTR_ERR(cipher); | 292 | return PTR_ERR(cipher); |
292 | 293 | ||
293 | ctx->child = cipher; | 294 | ctx->child = cipher; |
294 | tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) | 295 | tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) |
295 | + sizeof(struct aead_givcrypt_request) | 296 | + sizeof(struct aead_givcrypt_request) |
296 | + crypto_aead_reqsize(cipher); | 297 | + crypto_aead_reqsize(cipher); |
297 | 298 | ||
298 | return 0; | 299 | return 0; |
299 | } | 300 | } |
300 | 301 | ||
301 | static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) | 302 | static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) |
302 | { | 303 | { |
303 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 304 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
304 | 305 | ||
305 | crypto_free_aead(ctx->child); | 306 | crypto_free_aead(ctx->child); |
306 | } | 307 | } |
307 | 308 | ||
308 | static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) | 309 | static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) |
309 | { | 310 | { |
310 | struct crypto_instance *inst; | 311 | struct crypto_instance *inst; |
311 | struct pcrypt_instance_ctx *ctx; | 312 | struct pcrypt_instance_ctx *ctx; |
312 | int err; | 313 | int err; |
313 | 314 | ||
314 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 315 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
315 | if (!inst) { | 316 | if (!inst) { |
316 | inst = ERR_PTR(-ENOMEM); | 317 | inst = ERR_PTR(-ENOMEM); |
317 | goto out; | 318 | goto out; |
318 | } | 319 | } |
319 | 320 | ||
320 | err = -ENAMETOOLONG; | 321 | err = -ENAMETOOLONG; |
321 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 322 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
322 | "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 323 | "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
323 | goto out_free_inst; | 324 | goto out_free_inst; |
324 | 325 | ||
325 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 326 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
326 | 327 | ||
327 | ctx = crypto_instance_ctx(inst); | 328 | ctx = crypto_instance_ctx(inst); |
328 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | 329 | err = crypto_init_spawn(&ctx->spawn, alg, inst, |
329 | CRYPTO_ALG_TYPE_MASK); | 330 | CRYPTO_ALG_TYPE_MASK); |
330 | if (err) | 331 | if (err) |
331 | goto out_free_inst; | 332 | goto out_free_inst; |
332 | 333 | ||
333 | inst->alg.cra_priority = alg->cra_priority + 100; | 334 | inst->alg.cra_priority = alg->cra_priority + 100; |
334 | inst->alg.cra_blocksize = alg->cra_blocksize; | 335 | inst->alg.cra_blocksize = alg->cra_blocksize; |
335 | inst->alg.cra_alignmask = alg->cra_alignmask; | 336 | inst->alg.cra_alignmask = alg->cra_alignmask; |
336 | 337 | ||
337 | out: | 338 | out: |
338 | return inst; | 339 | return inst; |
339 | 340 | ||
340 | out_free_inst: | 341 | out_free_inst: |
341 | kfree(inst); | 342 | kfree(inst); |
342 | inst = ERR_PTR(err); | 343 | inst = ERR_PTR(err); |
343 | goto out; | 344 | goto out; |
344 | } | 345 | } |
345 | 346 | ||
346 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, | 347 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, |
347 | u32 type, u32 mask) | 348 | u32 type, u32 mask) |
348 | { | 349 | { |
349 | struct crypto_instance *inst; | 350 | struct crypto_instance *inst; |
350 | struct crypto_alg *alg; | 351 | struct crypto_alg *alg; |
351 | 352 | ||
352 | alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); | 353 | alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); |
353 | if (IS_ERR(alg)) | 354 | if (IS_ERR(alg)) |
354 | return ERR_CAST(alg); | 355 | return ERR_CAST(alg); |
355 | 356 | ||
356 | inst = pcrypt_alloc_instance(alg); | 357 | inst = pcrypt_alloc_instance(alg); |
357 | if (IS_ERR(inst)) | 358 | if (IS_ERR(inst)) |
358 | goto out_put_alg; | 359 | goto out_put_alg; |
359 | 360 | ||
360 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | 361 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; |
361 | inst->alg.cra_type = &crypto_aead_type; | 362 | inst->alg.cra_type = &crypto_aead_type; |
362 | 363 | ||
363 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | 364 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; |
364 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | 365 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; |
365 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | 366 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; |
366 | 367 | ||
367 | inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); | 368 | inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); |
368 | 369 | ||
369 | inst->alg.cra_init = pcrypt_aead_init_tfm; | 370 | inst->alg.cra_init = pcrypt_aead_init_tfm; |
370 | inst->alg.cra_exit = pcrypt_aead_exit_tfm; | 371 | inst->alg.cra_exit = pcrypt_aead_exit_tfm; |
371 | 372 | ||
372 | inst->alg.cra_aead.setkey = pcrypt_aead_setkey; | 373 | inst->alg.cra_aead.setkey = pcrypt_aead_setkey; |
373 | inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; | 374 | inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; |
374 | inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; | 375 | inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; |
375 | inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; | 376 | inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; |
376 | inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; | 377 | inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; |
377 | 378 | ||
378 | out_put_alg: | 379 | out_put_alg: |
379 | crypto_mod_put(alg); | 380 | crypto_mod_put(alg); |
380 | return inst; | 381 | return inst; |
381 | } | 382 | } |
382 | 383 | ||
383 | static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) | 384 | static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) |
384 | { | 385 | { |
385 | struct crypto_attr_type *algt; | 386 | struct crypto_attr_type *algt; |
386 | 387 | ||
387 | algt = crypto_get_attr_type(tb); | 388 | algt = crypto_get_attr_type(tb); |
388 | if (IS_ERR(algt)) | 389 | if (IS_ERR(algt)) |
389 | return ERR_CAST(algt); | 390 | return ERR_CAST(algt); |
390 | 391 | ||
391 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 392 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
392 | case CRYPTO_ALG_TYPE_AEAD: | 393 | case CRYPTO_ALG_TYPE_AEAD: |
393 | return pcrypt_alloc_aead(tb, algt->type, algt->mask); | 394 | return pcrypt_alloc_aead(tb, algt->type, algt->mask); |
394 | } | 395 | } |
395 | 396 | ||
396 | return ERR_PTR(-EINVAL); | 397 | return ERR_PTR(-EINVAL); |
397 | } | 398 | } |
398 | 399 | ||
399 | static void pcrypt_free(struct crypto_instance *inst) | 400 | static void pcrypt_free(struct crypto_instance *inst) |
400 | { | 401 | { |
401 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); | 402 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); |
402 | 403 | ||
403 | crypto_drop_spawn(&ctx->spawn); | 404 | crypto_drop_spawn(&ctx->spawn); |
404 | kfree(inst); | 405 | kfree(inst); |
405 | } | 406 | } |
406 | 407 | ||
407 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, | 408 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, |
408 | unsigned long val, void *data) | 409 | unsigned long val, void *data) |
409 | { | 410 | { |
410 | struct padata_pcrypt *pcrypt; | 411 | struct padata_pcrypt *pcrypt; |
411 | struct pcrypt_cpumask *new_mask, *old_mask; | 412 | struct pcrypt_cpumask *new_mask, *old_mask; |
413 | struct padata_cpumask *cpumask = (struct padata_cpumask *)data; | ||
412 | 414 | ||
413 | if (!(val & PADATA_CPU_SERIAL)) | 415 | if (!(val & PADATA_CPU_SERIAL)) |
414 | return 0; | 416 | return 0; |
415 | 417 | ||
416 | pcrypt = container_of(self, struct padata_pcrypt, nblock); | 418 | pcrypt = container_of(self, struct padata_pcrypt, nblock); |
417 | new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); | 419 | new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); |
418 | if (!new_mask) | 420 | if (!new_mask) |
419 | return -ENOMEM; | 421 | return -ENOMEM; |
420 | if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { | 422 | if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { |
421 | kfree(new_mask); | 423 | kfree(new_mask); |
422 | return -ENOMEM; | 424 | return -ENOMEM; |
423 | } | 425 | } |
424 | 426 | ||
425 | old_mask = pcrypt->cb_cpumask; | 427 | old_mask = pcrypt->cb_cpumask; |
426 | 428 | ||
427 | padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, new_mask->mask); | 429 | cpumask_copy(new_mask->mask, cpumask->cbcpu); |
428 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); | 430 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); |
429 | synchronize_rcu_bh(); | 431 | synchronize_rcu_bh(); |
430 | 432 | ||
431 | free_cpumask_var(old_mask->mask); | 433 | free_cpumask_var(old_mask->mask); |
432 | kfree(old_mask); | 434 | kfree(old_mask); |
433 | return 0; | 435 | return 0; |
434 | } | 436 | } |
435 | 437 | ||
436 | static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) | 438 | static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) |
437 | { | 439 | { |
438 | int ret; | 440 | int ret; |
439 | 441 | ||
440 | pinst->kobj.kset = pcrypt_kset; | 442 | pinst->kobj.kset = pcrypt_kset; |
441 | ret = kobject_add(&pinst->kobj, NULL, name); | 443 | ret = kobject_add(&pinst->kobj, NULL, name); |
442 | if (!ret) | 444 | if (!ret) |
443 | kobject_uevent(&pinst->kobj, KOBJ_ADD); | 445 | kobject_uevent(&pinst->kobj, KOBJ_ADD); |
444 | 446 | ||
445 | return ret; | 447 | return ret; |
446 | } | 448 | } |
447 | 449 | ||
448 | static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, | 450 | static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, |
449 | const char *name) | 451 | const char *name) |
450 | { | 452 | { |
451 | int ret = -ENOMEM; | 453 | int ret = -ENOMEM; |
452 | struct pcrypt_cpumask *mask; | 454 | struct pcrypt_cpumask *mask; |
453 | 455 | ||
456 | get_online_cpus(); | ||
457 | |||
454 | pcrypt->wq = create_workqueue(name); | 458 | pcrypt->wq = create_workqueue(name); |
455 | if (!pcrypt->wq) | 459 | if (!pcrypt->wq) |
456 | goto err; | 460 | goto err; |
457 | 461 | ||
458 | pcrypt->pinst = padata_alloc_possible(pcrypt->wq); | 462 | pcrypt->pinst = padata_alloc_possible(pcrypt->wq); |
459 | if (!pcrypt->pinst) | 463 | if (!pcrypt->pinst) |
460 | goto err_destroy_workqueue; | 464 | goto err_destroy_workqueue; |
461 | 465 | ||
462 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | 466 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); |
463 | if (!mask) | 467 | if (!mask) |
464 | goto err_free_padata; | 468 | goto err_free_padata; |
465 | if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { | 469 | if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { |
466 | kfree(mask); | 470 | kfree(mask); |
467 | goto err_free_padata; | 471 | goto err_free_padata; |
468 | } | 472 | } |
469 | 473 | ||
470 | padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, mask->mask); | 474 | cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask); |
471 | rcu_assign_pointer(pcrypt->cb_cpumask, mask); | 475 | rcu_assign_pointer(pcrypt->cb_cpumask, mask); |
472 | 476 | ||
473 | pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; | 477 | pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; |
474 | ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | 478 | ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); |
475 | if (ret) | 479 | if (ret) |
476 | goto err_free_cpumask; | 480 | goto err_free_cpumask; |
477 | 481 | ||
478 | ret = pcrypt_sysfs_add(pcrypt->pinst, name); | 482 | ret = pcrypt_sysfs_add(pcrypt->pinst, name); |
479 | if (ret) | 483 | if (ret) |
480 | goto err_unregister_notifier; | 484 | goto err_unregister_notifier; |
481 | 485 | ||
486 | put_online_cpus(); | ||
487 | |||
482 | return ret; | 488 | return ret; |
489 | |||
483 | err_unregister_notifier: | 490 | err_unregister_notifier: |
484 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | 491 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); |
485 | err_free_cpumask: | 492 | err_free_cpumask: |
486 | free_cpumask_var(mask->mask); | 493 | free_cpumask_var(mask->mask); |
487 | kfree(mask); | 494 | kfree(mask); |
488 | err_free_padata: | 495 | err_free_padata: |
489 | padata_free(pcrypt->pinst); | 496 | padata_free(pcrypt->pinst); |
490 | err_destroy_workqueue: | 497 | err_destroy_workqueue: |
491 | destroy_workqueue(pcrypt->wq); | 498 | destroy_workqueue(pcrypt->wq); |
492 | err: | 499 | err: |
500 | put_online_cpus(); | ||
501 | |||
493 | return ret; | 502 | return ret; |
494 | } | 503 | } |
495 | 504 | ||
496 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | 505 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) |
497 | { | 506 | { |
498 | kobject_put(&pcrypt->pinst->kobj); | 507 | kobject_put(&pcrypt->pinst->kobj); |
499 | free_cpumask_var(pcrypt->cb_cpumask->mask); | 508 | free_cpumask_var(pcrypt->cb_cpumask->mask); |
500 | kfree(pcrypt->cb_cpumask); | 509 | kfree(pcrypt->cb_cpumask); |
501 | 510 | ||
502 | padata_stop(pcrypt->pinst); | 511 | padata_stop(pcrypt->pinst); |
503 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | 512 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); |
504 | destroy_workqueue(pcrypt->wq); | 513 | destroy_workqueue(pcrypt->wq); |
505 | padata_free(pcrypt->pinst); | 514 | padata_free(pcrypt->pinst); |
506 | } | 515 | } |
507 | 516 | ||
508 | static struct crypto_template pcrypt_tmpl = { | 517 | static struct crypto_template pcrypt_tmpl = { |
509 | .name = "pcrypt", | 518 | .name = "pcrypt", |
510 | .alloc = pcrypt_alloc, | 519 | .alloc = pcrypt_alloc, |
511 | .free = pcrypt_free, | 520 | .free = pcrypt_free, |
512 | .module = THIS_MODULE, | 521 | .module = THIS_MODULE, |
513 | }; | 522 | }; |
514 | 523 | ||
515 | static int __init pcrypt_init(void) | 524 | static int __init pcrypt_init(void) |
516 | { | 525 | { |
517 | int err = -ENOMEM; | 526 | int err = -ENOMEM; |
518 | 527 | ||
519 | pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); | 528 | pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); |
520 | if (!pcrypt_kset) | 529 | if (!pcrypt_kset) |
521 | goto err; | 530 | goto err; |
522 | 531 | ||
523 | err = pcrypt_init_padata(&pencrypt, "pencrypt"); | 532 | err = pcrypt_init_padata(&pencrypt, "pencrypt"); |
524 | if (err) | 533 | if (err) |
525 | goto err_unreg_kset; | 534 | goto err_unreg_kset; |
526 | 535 | ||
527 | err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); | 536 | err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); |
528 | if (err) | 537 | if (err) |
529 | goto err_deinit_pencrypt; | 538 | goto err_deinit_pencrypt; |
530 | 539 | ||
531 | padata_start(pencrypt.pinst); | 540 | padata_start(pencrypt.pinst); |
532 | padata_start(pdecrypt.pinst); | 541 | padata_start(pdecrypt.pinst); |
533 | 542 | ||
534 | return crypto_register_template(&pcrypt_tmpl); | 543 | return crypto_register_template(&pcrypt_tmpl); |
535 | 544 | ||
536 | err_deinit_pencrypt: | 545 | err_deinit_pencrypt: |
537 | pcrypt_fini_padata(&pencrypt); | 546 | pcrypt_fini_padata(&pencrypt); |
538 | err_unreg_kset: | 547 | err_unreg_kset: |
539 | kset_unregister(pcrypt_kset); | 548 | kset_unregister(pcrypt_kset); |
540 | err: | 549 | err: |
541 | return err; | 550 | return err; |
542 | } | 551 | } |
543 | 552 | ||
544 | static void __exit pcrypt_exit(void) | 553 | static void __exit pcrypt_exit(void) |
545 | { | 554 | { |
546 | pcrypt_fini_padata(&pencrypt); | 555 | pcrypt_fini_padata(&pencrypt); |
547 | pcrypt_fini_padata(&pdecrypt); | 556 | pcrypt_fini_padata(&pdecrypt); |
548 | 557 | ||
549 | kset_unregister(pcrypt_kset); | 558 | kset_unregister(pcrypt_kset); |
550 | crypto_unregister_template(&pcrypt_tmpl); | 559 | crypto_unregister_template(&pcrypt_tmpl); |
551 | } | 560 | } |
552 | 561 | ||
553 | module_init(pcrypt_init); | 562 | module_init(pcrypt_init); |
554 | module_exit(pcrypt_exit); | 563 | module_exit(pcrypt_exit); |
555 | 564 | ||
556 | MODULE_LICENSE("GPL"); | 565 | MODULE_LICENSE("GPL"); |
557 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); | 566 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); |
558 | MODULE_DESCRIPTION("Parallel crypto wrapper"); | 567 | MODULE_DESCRIPTION("Parallel crypto wrapper"); |
559 | 568 |