Commit e7cd2514ea506f06bd4f7b13a9b62afd60f9c73b

Authored by Herbert Xu
1 parent 4726204200

[CRYPTO] chainiv: Avoid lock spinning where possible

This patch makes chainiv avoid spinning by postponing requests on lock
contention if the user allows the use of asynchronous algorithms.  If
a synchronous algorithm is requested then we behave as before.

This should improve IPsec performance on SMP when two CPUs attempt to
transmit over the same SA.  Currently one of them will spin doing nothing
waiting for the other CPU to finish its encryption.  This patch makes it
postpone the request and get on with other work.

If only one CPU is transmitting for a given SA, then we will process
the request synchronously as before.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Showing 2 changed files with 213 additions and 8 deletions Side-by-side Diff

... ... @@ -16,16 +16,34 @@
16 16 #include <crypto/internal/skcipher.h>
17 17 #include <linux/err.h>
18 18 #include <linux/init.h>
  19 +#include <linux/kernel.h>
19 20 #include <linux/module.h>
20 21 #include <linux/random.h>
21 22 #include <linux/spinlock.h>
22 23 #include <linux/string.h>
  24 +#include <linux/workqueue.h>
23 25  
  26 +enum {
  27 + CHAINIV_STATE_INUSE = 0,
  28 +};
  29 +
24 30 struct chainiv_ctx {
25 31 spinlock_t lock;
26 32 char iv[];
27 33 };
28 34  
  35 +struct async_chainiv_ctx {
  36 + unsigned long state;
  37 +
  38 + spinlock_t lock;
  39 + int err;
  40 +
  41 + struct crypto_queue queue;
  42 + struct work_struct postponed;
  43 +
  44 + char iv[];
  45 +};
  46 +
29 47 static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
30 48 {
31 49 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
32 50  
33 51  
34 52  
35 53  
36 54  
37 55  
38 56  
... ... @@ -80,26 +98,187 @@
80 98 return chainiv_givencrypt(req);
81 99 }
82 100  
  101 +static int chainiv_init_common(struct crypto_tfm *tfm)
  102 +{
  103 + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
  104 +
  105 + return skcipher_geniv_init(tfm);
  106 +}
  107 +
83 108 static int chainiv_init(struct crypto_tfm *tfm)
84 109 {
85   - struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
86   - struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
  110 + struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
87 111  
88 112 spin_lock_init(&ctx->lock);
89 113  
90   - tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
  114 + return chainiv_init_common(tfm);
  115 +}
91 116  
92   - return skcipher_geniv_init(tfm);
  117 +static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
  118 +{
  119 + int queued;
  120 +
  121 + if (!ctx->queue.qlen) {
  122 + smp_mb__before_clear_bit();
  123 + clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
  124 +
  125 + if (!ctx->queue.qlen ||
  126 + test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
  127 + goto out;
  128 + }
  129 +
  130 + queued = schedule_work(&ctx->postponed);
  131 + BUG_ON(!queued);
  132 +
  133 +out:
  134 + return ctx->err;
93 135 }
94 136  
  137 +static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
  138 +{
  139 + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
  140 + struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
  141 + int err;
  142 +
  143 + spin_lock_bh(&ctx->lock);
  144 + err = skcipher_enqueue_givcrypt(&ctx->queue, req);
  145 + spin_unlock_bh(&ctx->lock);
  146 +
  147 + if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
  148 + return err;
  149 +
  150 + ctx->err = err;
  151 + return async_chainiv_schedule_work(ctx);
  152 +}
  153 +
  154 +static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
  155 +{
  156 + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
  157 + struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
  158 + struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
  159 + unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
  160 +
  161 + memcpy(req->giv, ctx->iv, ivsize);
  162 + memcpy(subreq->info, ctx->iv, ivsize);
  163 +
  164 + ctx->err = crypto_ablkcipher_encrypt(subreq);
  165 + if (ctx->err)
  166 + goto out;
  167 +
  168 + memcpy(ctx->iv, subreq->info, ivsize);
  169 +
  170 +out:
  171 + return async_chainiv_schedule_work(ctx);
  172 +}
  173 +
  174 +static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
  175 +{
  176 + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
  177 + struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
  178 + struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
  179 +
  180 + ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
  181 + ablkcipher_request_set_callback(subreq, req->creq.base.flags,
  182 + req->creq.base.complete,
  183 + req->creq.base.data);
  184 + ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
  185 + req->creq.nbytes, req->creq.info);
  186 +
  187 + if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
  188 + goto postpone;
  189 +
  190 + if (ctx->queue.qlen) {
  191 + clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
  192 + goto postpone;
  193 + }
  194 +
  195 + return async_chainiv_givencrypt_tail(req);
  196 +
  197 +postpone:
  198 + return async_chainiv_postpone_request(req);
  199 +}
  200 +
  201 +static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
  202 +{
  203 + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
  204 + struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
  205 +
  206 + if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
  207 + goto out;
  208 +
  209 + if (crypto_ablkcipher_crt(geniv)->givencrypt !=
  210 + async_chainiv_givencrypt_first)
  211 + goto unlock;
  212 +
  213 + crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
  214 + get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
  215 +
  216 +unlock:
  217 + clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
  218 +
  219 +out:
  220 + return async_chainiv_givencrypt(req);
  221 +}
  222 +
  223 +static void async_chainiv_do_postponed(struct work_struct *work)
  224 +{
  225 + struct async_chainiv_ctx *ctx = container_of(work,
  226 + struct async_chainiv_ctx,
  227 + postponed);
  228 + struct skcipher_givcrypt_request *req;
  229 + struct ablkcipher_request *subreq;
  230 +
  231 + /* Only handle one request at a time to avoid hogging keventd. */
  232 + spin_lock_bh(&ctx->lock);
  233 + req = skcipher_dequeue_givcrypt(&ctx->queue);
  234 + spin_unlock_bh(&ctx->lock);
  235 +
  236 + if (!req) {
  237 + async_chainiv_schedule_work(ctx);
  238 + return;
  239 + }
  240 +
  241 + subreq = skcipher_givcrypt_reqctx(req);
  242 + subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
  243 +
  244 + async_chainiv_givencrypt_tail(req);
  245 +}
  246 +
  247 +static int async_chainiv_init(struct crypto_tfm *tfm)
  248 +{
  249 + struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
  250 +
  251 + spin_lock_init(&ctx->lock);
  252 +
  253 + crypto_init_queue(&ctx->queue, 100);
  254 + INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
  255 +
  256 + return chainiv_init_common(tfm);
  257 +}
  258 +
  259 +static void async_chainiv_exit(struct crypto_tfm *tfm)
  260 +{
  261 + struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
  262 +
  263 + BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
  264 +
  265 + skcipher_geniv_exit(tfm);
  266 +}
  267 +
95 268 static struct crypto_template chainiv_tmpl;
96 269  
97 270 static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
98 271 {
  272 + struct crypto_attr_type *algt;
99 273 struct crypto_instance *inst;
  274 + int err;
100 275  
101   - inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0,
102   - CRYPTO_ALG_ASYNC);
  276 + algt = crypto_get_attr_type(tb);
  277 + err = PTR_ERR(algt);
  278 + if (IS_ERR(algt))
  279 + return ERR_PTR(err);
  280 +
  281 + inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
103 282 if (IS_ERR(inst))
104 283 goto out;
105 284  
... ... @@ -108,8 +287,21 @@
108 287 inst->alg.cra_init = chainiv_init;
109 288 inst->alg.cra_exit = skcipher_geniv_exit;
110 289  
111   - inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx) +
112   - inst->alg.cra_ablkcipher.ivsize;
  290 + inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
  291 +
  292 + if (!crypto_requires_sync(algt->type, algt->mask)) {
  293 + inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
  294 +
  295 + inst->alg.cra_ablkcipher.givencrypt =
  296 + async_chainiv_givencrypt_first;
  297 +
  298 + inst->alg.cra_init = async_chainiv_init;
  299 + inst->alg.cra_exit = async_chainiv_exit;
  300 +
  301 + inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
  302 + }
  303 +
  304 + inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
113 305  
114 306 out:
115 307 return inst;
include/crypto/internal/skcipher.h
... ... @@ -70,6 +70,19 @@
70 70 return crypto_ablkcipher_crt(geniv)->base;
71 71 }
72 72  
  73 +static inline int skcipher_enqueue_givcrypt(
  74 + struct crypto_queue *queue, struct skcipher_givcrypt_request *request)
  75 +{
  76 + return ablkcipher_enqueue_request(queue, &request->creq);
  77 +}
  78 +
  79 +static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
  80 + struct crypto_queue *queue)
  81 +{
  82 + return container_of(ablkcipher_dequeue_request(queue),
  83 + struct skcipher_givcrypt_request, creq);
  84 +}
  85 +
73 86 static inline void *skcipher_givcrypt_reqctx(
74 87 struct skcipher_givcrypt_request *req)
75 88 {