Commit 5cde0af2a9825dd1edaca233bd9590566579ef21

Authored by Herbert Xu
1 parent 5c64097aa0

[CRYPTO] cipher: Added block cipher type

This patch adds the new type of block ciphers.  Unlike current cipher
algorithms which operate on a single block at a time, block ciphers
operate on an arbitrarily long linear area of data.  As it is block-based,
it will skip any data remaining at the end which cannot form a block.

The block cipher has one major difference when compared to the existing
block cipher implementation.  The sg walking is now performed by the
algorithm rather than the cipher mid-layer.  This is needed for drivers
that directly support sg lists.  It also improves performance for all
algorithms as it reduces the total number of indirect calls by one.

In future the existing cipher algorithm will be converted to only have
a single-block interface.  This will be done after all existing users
have switched over to the new block cipher type.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Showing 5 changed files with 655 additions and 0 deletions Side-by-side Diff

... ... @@ -16,6 +16,10 @@
16 16 help
17 17 This option provides the API for cryptographic algorithms.
18 18  
  19 +config CRYPTO_BLKCIPHER
  20 + tristate
  21 + select CRYPTO_ALGAPI
  22 +
19 23 config CRYPTO_MANAGER
20 24 tristate "Cryptographic algorithm manager"
21 25 select CRYPTO_ALGAPI
... ... @@ -8,6 +8,8 @@
8 8 crypto_algapi-objs := algapi.o $(crypto_algapi-y)
9 9 obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
10 10  
  11 +obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o
  12 +
11 13 obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
12 14 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
13 15 obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
  1 +/*
  2 + * Block chaining cipher operations.
  3 + *
  4 + * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5 + * multiple page boundaries by using temporary blocks. In user context,
  6 + * the kernel is given a chance to schedule us once per page.
  7 + *
  8 + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9 + *
  10 + * This program is free software; you can redistribute it and/or modify it
  11 + * under the terms of the GNU General Public License as published by the Free
  12 + * Software Foundation; either version 2 of the License, or (at your option)
  13 + * any later version.
  14 + *
  15 + */
  16 +
  17 +#include <linux/crypto.h>
  18 +#include <linux/errno.h>
  19 +#include <linux/kernel.h>
  20 +#include <linux/io.h>
  21 +#include <linux/module.h>
  22 +#include <linux/scatterlist.h>
  23 +#include <linux/seq_file.h>
  24 +#include <linux/slab.h>
  25 +#include <linux/string.h>
  26 +
  27 +#include "internal.h"
  28 +#include "scatterwalk.h"
  29 +
  30 +enum {
  31 + BLKCIPHER_WALK_PHYS = 1 << 0,
  32 + BLKCIPHER_WALK_SLOW = 1 << 1,
  33 + BLKCIPHER_WALK_COPY = 1 << 2,
  34 + BLKCIPHER_WALK_DIFF = 1 << 3,
  35 +};
  36 +
  37 +static int blkcipher_walk_next(struct blkcipher_desc *desc,
  38 + struct blkcipher_walk *walk);
  39 +static int blkcipher_walk_first(struct blkcipher_desc *desc,
  40 + struct blkcipher_walk *walk);
  41 +
  42 +static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  43 +{
  44 + walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
  45 +}
  46 +
  47 +static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  48 +{
  49 + walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
  50 +}
  51 +
  52 +static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  53 +{
  54 + scatterwalk_unmap(walk->src.virt.addr, 0);
  55 +}
  56 +
  57 +static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  58 +{
  59 + scatterwalk_unmap(walk->dst.virt.addr, 1);
  60 +}
  61 +
  62 +static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  63 +{
  64 + if (offset_in_page(start + len) < len)
  65 + return (u8 *)((unsigned long)(start + len) & PAGE_MASK);
  66 + return start;
  67 +}
  68 +
  69 +static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
  70 + struct blkcipher_walk *walk,
  71 + unsigned int bsize)
  72 +{
  73 + u8 *addr;
  74 + unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  75 +
  76 + addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  77 + addr = blkcipher_get_spot(addr, bsize);
  78 + scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  79 + return bsize;
  80 +}
  81 +
  82 +static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
  83 + unsigned int n)
  84 +{
  85 + n = walk->nbytes - n;
  86 +
  87 + if (walk->flags & BLKCIPHER_WALK_COPY) {
  88 + blkcipher_map_dst(walk);
  89 + memcpy(walk->dst.virt.addr, walk->page, n);
  90 + blkcipher_unmap_dst(walk);
  91 + } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
  92 + blkcipher_unmap_src(walk);
  93 + if (walk->flags & BLKCIPHER_WALK_DIFF)
  94 + blkcipher_unmap_dst(walk);
  95 + }
  96 +
  97 + scatterwalk_advance(&walk->in, n);
  98 + scatterwalk_advance(&walk->out, n);
  99 +
  100 + return n;
  101 +}
  102 +
  103 +int blkcipher_walk_done(struct blkcipher_desc *desc,
  104 + struct blkcipher_walk *walk, int err)
  105 +{
  106 + struct crypto_blkcipher *tfm = desc->tfm;
  107 + unsigned int nbytes = 0;
  108 +
  109 + if (likely(err >= 0)) {
  110 + unsigned int bsize = crypto_blkcipher_blocksize(tfm);
  111 + unsigned int n;
  112 +
  113 + if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
  114 + n = blkcipher_done_fast(walk, err);
  115 + else
  116 + n = blkcipher_done_slow(tfm, walk, bsize);
  117 +
  118 + nbytes = walk->total - n;
  119 + err = 0;
  120 + }
  121 +
  122 + scatterwalk_done(&walk->in, 0, nbytes);
  123 + scatterwalk_done(&walk->out, 1, nbytes);
  124 +
  125 + walk->total = nbytes;
  126 + walk->nbytes = nbytes;
  127 +
  128 + if (nbytes) {
  129 + crypto_yield(desc->flags);
  130 + return blkcipher_walk_next(desc, walk);
  131 + }
  132 +
  133 + if (walk->iv != desc->info)
  134 + memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
  135 + if (walk->buffer != walk->page)
  136 + kfree(walk->buffer);
  137 + if (walk->page)
  138 + free_page((unsigned long)walk->page);
  139 +
  140 + return err;
  141 +}
  142 +EXPORT_SYMBOL_GPL(blkcipher_walk_done);
  143 +
  144 +static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
  145 + struct blkcipher_walk *walk,
  146 + unsigned int bsize,
  147 + unsigned int alignmask)
  148 +{
  149 + unsigned int n;
  150 +
  151 + if (walk->buffer)
  152 + goto ok;
  153 +
  154 + walk->buffer = walk->page;
  155 + if (walk->buffer)
  156 + goto ok;
  157 +
  158 + n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
  159 + walk->buffer = kmalloc(n, GFP_ATOMIC);
  160 + if (!walk->buffer)
  161 + return blkcipher_walk_done(desc, walk, -ENOMEM);
  162 +
  163 +ok:
  164 + walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
  165 + alignmask + 1);
  166 + walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
  167 + walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
  168 + bsize);
  169 +
  170 + scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
  171 +
  172 + walk->nbytes = bsize;
  173 + walk->flags |= BLKCIPHER_WALK_SLOW;
  174 +
  175 + return 0;
  176 +}
  177 +
  178 +static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
  179 +{
  180 + u8 *tmp = walk->page;
  181 +
  182 + blkcipher_map_src(walk);
  183 + memcpy(tmp, walk->src.virt.addr, walk->nbytes);
  184 + blkcipher_unmap_src(walk);
  185 +
  186 + walk->src.virt.addr = tmp;
  187 + walk->dst.virt.addr = tmp;
  188 +
  189 + return 0;
  190 +}
  191 +
  192 +static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
  193 + struct blkcipher_walk *walk)
  194 +{
  195 + unsigned long diff;
  196 +
  197 + walk->src.phys.page = scatterwalk_page(&walk->in);
  198 + walk->src.phys.offset = offset_in_page(walk->in.offset);
  199 + walk->dst.phys.page = scatterwalk_page(&walk->out);
  200 + walk->dst.phys.offset = offset_in_page(walk->out.offset);
  201 +
  202 + if (walk->flags & BLKCIPHER_WALK_PHYS)
  203 + return 0;
  204 +
  205 + diff = walk->src.phys.offset - walk->dst.phys.offset;
  206 + diff |= walk->src.virt.page - walk->dst.virt.page;
  207 +
  208 + blkcipher_map_src(walk);
  209 + walk->dst.virt.addr = walk->src.virt.addr;
  210 +
  211 + if (diff) {
  212 + walk->flags |= BLKCIPHER_WALK_DIFF;
  213 + blkcipher_map_dst(walk);
  214 + }
  215 +
  216 + return 0;
  217 +}
  218 +
  219 +static int blkcipher_walk_next(struct blkcipher_desc *desc,
  220 + struct blkcipher_walk *walk)
  221 +{
  222 + struct crypto_blkcipher *tfm = desc->tfm;
  223 + unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  224 + unsigned int bsize = crypto_blkcipher_blocksize(tfm);
  225 + unsigned int n;
  226 + int err;
  227 +
  228 + n = walk->total;
  229 + if (unlikely(n < bsize)) {
  230 + desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  231 + return blkcipher_walk_done(desc, walk, -EINVAL);
  232 + }
  233 +
  234 + walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
  235 + BLKCIPHER_WALK_DIFF);
  236 + if (!scatterwalk_aligned(&walk->in, alignmask) ||
  237 + !scatterwalk_aligned(&walk->out, alignmask)) {
  238 + walk->flags |= BLKCIPHER_WALK_COPY;
  239 + if (!walk->page) {
  240 + walk->page = (void *)__get_free_page(GFP_ATOMIC);
  241 + if (!walk->page)
  242 + n = 0;
  243 + }
  244 + }
  245 +
  246 + n = scatterwalk_clamp(&walk->in, n);
  247 + n = scatterwalk_clamp(&walk->out, n);
  248 +
  249 + if (unlikely(n < bsize)) {
  250 + err = blkcipher_next_slow(desc, walk, bsize, alignmask);
  251 + goto set_phys_lowmem;
  252 + }
  253 +
  254 + walk->nbytes = n;
  255 + if (walk->flags & BLKCIPHER_WALK_COPY) {
  256 + err = blkcipher_next_copy(walk);
  257 + goto set_phys_lowmem;
  258 + }
  259 +
  260 + return blkcipher_next_fast(desc, walk);
  261 +
  262 +set_phys_lowmem:
  263 + if (walk->flags & BLKCIPHER_WALK_PHYS) {
  264 + walk->src.phys.page = virt_to_page(walk->src.virt.addr);
  265 + walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
  266 + walk->src.phys.offset &= PAGE_SIZE - 1;
  267 + walk->dst.phys.offset &= PAGE_SIZE - 1;
  268 + }
  269 + return err;
  270 +}
  271 +
  272 +static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
  273 + struct crypto_blkcipher *tfm,
  274 + unsigned int alignmask)
  275 +{
  276 + unsigned bs = crypto_blkcipher_blocksize(tfm);
  277 + unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
  278 + unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
  279 + u8 *iv;
  280 +
  281 + size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  282 + walk->buffer = kmalloc(size, GFP_ATOMIC);
  283 + if (!walk->buffer)
  284 + return -ENOMEM;
  285 +
  286 + iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  287 + iv = blkcipher_get_spot(iv, bs) + bs;
  288 + iv = blkcipher_get_spot(iv, bs) + bs;
  289 + iv = blkcipher_get_spot(iv, ivsize);
  290 +
  291 + walk->iv = memcpy(iv, walk->iv, ivsize);
  292 + return 0;
  293 +}
  294 +
  295 +int blkcipher_walk_virt(struct blkcipher_desc *desc,
  296 + struct blkcipher_walk *walk)
  297 +{
  298 + walk->flags &= ~BLKCIPHER_WALK_PHYS;
  299 + return blkcipher_walk_first(desc, walk);
  300 +}
  301 +EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
  302 +
  303 +int blkcipher_walk_phys(struct blkcipher_desc *desc,
  304 + struct blkcipher_walk *walk)
  305 +{
  306 + walk->flags |= BLKCIPHER_WALK_PHYS;
  307 + return blkcipher_walk_first(desc, walk);
  308 +}
  309 +EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
  310 +
  311 +static int blkcipher_walk_first(struct blkcipher_desc *desc,
  312 + struct blkcipher_walk *walk)
  313 +{
  314 + struct crypto_blkcipher *tfm = desc->tfm;
  315 + unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  316 +
  317 + walk->nbytes = walk->total;
  318 + if (unlikely(!walk->total))
  319 + return 0;
  320 +
  321 + walk->buffer = NULL;
  322 + walk->iv = desc->info;
  323 + if (unlikely(((unsigned long)walk->iv & alignmask))) {
  324 + int err = blkcipher_copy_iv(walk, tfm, alignmask);
  325 + if (err)
  326 + return err;
  327 + }
  328 +
  329 + scatterwalk_start(&walk->in, walk->in.sg);
  330 + scatterwalk_start(&walk->out, walk->out.sg);
  331 + walk->page = NULL;
  332 +
  333 + return blkcipher_walk_next(desc, walk);
  334 +}
  335 +
  336 +static int setkey(struct crypto_tfm *tfm, const u8 *key,
  337 + unsigned int keylen)
  338 +{
  339 + struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  340 +
  341 + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  342 + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  343 + return -EINVAL;
  344 + }
  345 +
  346 + return cipher->setkey(tfm, key, keylen);
  347 +}
  348 +
  349 +static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
  350 +{
  351 + struct blkcipher_alg *cipher = &alg->cra_blkcipher;
  352 + unsigned int len = alg->cra_ctxsize;
  353 +
  354 + if (cipher->ivsize) {
  355 + len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
  356 + len += cipher->ivsize;
  357 + }
  358 +
  359 + return len;
  360 +}
  361 +
  362 +static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm)
  363 +{
  364 + struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
  365 + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  366 + unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
  367 + unsigned long addr;
  368 +
  369 + if (alg->ivsize > PAGE_SIZE / 8)
  370 + return -EINVAL;
  371 +
  372 + crt->setkey = setkey;
  373 + crt->encrypt = alg->encrypt;
  374 + crt->decrypt = alg->decrypt;
  375 +
  376 + addr = (unsigned long)crypto_tfm_ctx(tfm);
  377 + addr = ALIGN(addr, align);
  378 + addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
  379 + crt->iv = (void *)addr;
  380 +
  381 + return 0;
  382 +}
  383 +
  384 +static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  385 + __attribute_used__;
  386 +static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  387 +{
  388 + seq_printf(m, "type : blkcipher\n");
  389 + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  390 + seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
  391 + seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
  392 + seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
  393 +}
  394 +
  395 +const struct crypto_type crypto_blkcipher_type = {
  396 + .ctxsize = crypto_blkcipher_ctxsize,
  397 + .init = crypto_init_blkcipher_ops,
  398 +#ifdef CONFIG_PROC_FS
  399 + .show = crypto_blkcipher_show,
  400 +#endif
  401 +};
  402 +EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
  403 +
  404 +MODULE_LICENSE("GPL");
  405 +MODULE_DESCRIPTION("Generic block chaining cipher type");
include/crypto/algapi.h
... ... @@ -55,6 +55,34 @@
55 55 unsigned int offset;
56 56 };
57 57  
  58 +struct blkcipher_walk {
  59 + union {
  60 + struct {
  61 + struct page *page;
  62 + unsigned long offset;
  63 + } phys;
  64 +
  65 + struct {
  66 + u8 *page;
  67 + u8 *addr;
  68 + } virt;
  69 + } src, dst;
  70 +
  71 + struct scatter_walk in;
  72 + unsigned int nbytes;
  73 +
  74 + struct scatter_walk out;
  75 + unsigned int total;
  76 +
  77 + void *page;
  78 + u8 *buffer;
  79 + u8 *iv;
  80 +
  81 + int flags;
  82 +};
  83 +
  84 +extern const struct crypto_type crypto_blkcipher_type;
  85 +
58 86 int crypto_register_template(struct crypto_template *tmpl);
59 87 void crypto_unregister_template(struct crypto_template *tmpl);
60 88 struct crypto_template *crypto_lookup_template(const char *name);
61 89  
62 90  
... ... @@ -69,14 +97,51 @@
69 97 struct crypto_instance *crypto_alloc_instance(const char *name,
70 98 struct crypto_alg *alg);
71 99  
  100 +int blkcipher_walk_done(struct blkcipher_desc *desc,
  101 + struct blkcipher_walk *walk, int err);
  102 +int blkcipher_walk_virt(struct blkcipher_desc *desc,
  103 + struct blkcipher_walk *walk);
  104 +int blkcipher_walk_phys(struct blkcipher_desc *desc,
  105 + struct blkcipher_walk *walk);
  106 +
  107 +static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
  108 +{
  109 + unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
  110 + unsigned long align = crypto_tfm_alg_alignmask(tfm);
  111 +
  112 + if (align <= crypto_tfm_ctx_alignment())
  113 + align = 1;
  114 + return (void *)ALIGN(addr, align);
  115 +}
  116 +
72 117 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
73 118 {
74 119 return inst->__ctx;
75 120 }
76 121  
  122 +static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
  123 +{
  124 + return crypto_tfm_ctx(&tfm->base);
  125 +}
  126 +
  127 +static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
  128 +{
  129 + return crypto_tfm_ctx_aligned(&tfm->base);
  130 +}
  131 +
77 132 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
78 133 {
79 134 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
  135 +}
  136 +
  137 +static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
  138 + struct scatterlist *dst,
  139 + struct scatterlist *src,
  140 + unsigned int nbytes)
  141 +{
  142 + walk->in.sg = src;
  143 + walk->out.sg = dst;
  144 + walk->total = nbytes;
80 145 }
81 146  
82 147 #endif /* _CRYPTO_ALGAPI_H */
include/linux/crypto.h
... ... @@ -32,6 +32,7 @@
32 32 #define CRYPTO_ALG_TYPE_MASK 0x0000000f
33 33 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001
34 34 #define CRYPTO_ALG_TYPE_DIGEST 0x00000002
  35 +#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000003
35 36 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000004
36 37  
37 38 #define CRYPTO_ALG_LARVAL 0x00000010
38 39  
... ... @@ -89,9 +90,16 @@
89 90 #endif
90 91  
91 92 struct scatterlist;
  93 +struct crypto_blkcipher;
92 94 struct crypto_tfm;
93 95 struct crypto_type;
94 96  
  97 +struct blkcipher_desc {
  98 + struct crypto_blkcipher *tfm;
  99 + void *info;
  100 + u32 flags;
  101 +};
  102 +
95 103 struct cipher_desc {
96 104 struct crypto_tfm *tfm;
97 105 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
... ... @@ -104,6 +112,21 @@
104 112 * Algorithms: modular crypto algorithm implementations, managed
105 113 * via crypto_register_alg() and crypto_unregister_alg().
106 114 */
  115 +struct blkcipher_alg {
  116 + int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
  117 + unsigned int keylen);
  118 + int (*encrypt)(struct blkcipher_desc *desc,
  119 + struct scatterlist *dst, struct scatterlist *src,
  120 + unsigned int nbytes);
  121 + int (*decrypt)(struct blkcipher_desc *desc,
  122 + struct scatterlist *dst, struct scatterlist *src,
  123 + unsigned int nbytes);
  124 +
  125 + unsigned int min_keysize;
  126 + unsigned int max_keysize;
  127 + unsigned int ivsize;
  128 +};
  129 +
107 130 struct cipher_alg {
108 131 unsigned int cia_min_keysize;
109 132 unsigned int cia_max_keysize;
... ... @@ -143,6 +166,7 @@
143 166 unsigned int slen, u8 *dst, unsigned int *dlen);
144 167 };
145 168  
  169 +#define cra_blkcipher cra_u.blkcipher
146 170 #define cra_cipher cra_u.cipher
147 171 #define cra_digest cra_u.digest
148 172 #define cra_compress cra_u.compress
... ... @@ -165,6 +189,7 @@
165 189 const struct crypto_type *cra_type;
166 190  
167 191 union {
  192 + struct blkcipher_alg blkcipher;
168 193 struct cipher_alg cipher;
169 194 struct digest_alg digest;
170 195 struct compress_alg compress;
... ... @@ -201,6 +226,16 @@
201 226 * crypto_free_*(), as well as the various helpers below.
202 227 */
203 228  
  229 +struct blkcipher_tfm {
  230 + void *iv;
  231 + int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
  232 + unsigned int keylen);
  233 + int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
  234 + struct scatterlist *src, unsigned int nbytes);
  235 + int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
  236 + struct scatterlist *src, unsigned int nbytes);
  237 +};
  238 +
204 239 struct cipher_tfm {
205 240 void *cit_iv;
206 241 unsigned int cit_ivsize;
... ... @@ -251,6 +286,7 @@
251 286 u8 *dst, unsigned int *dlen);
252 287 };
253 288  
  289 +#define crt_blkcipher crt_u.blkcipher
254 290 #define crt_cipher crt_u.cipher
255 291 #define crt_digest crt_u.digest
256 292 #define crt_compress crt_u.compress
... ... @@ -260,6 +296,7 @@
260 296 u32 crt_flags;
261 297  
262 298 union {
  299 + struct blkcipher_tfm blkcipher;
263 300 struct cipher_tfm cipher;
264 301 struct digest_tfm digest;
265 302 struct compress_tfm compress;
... ... @@ -272,6 +309,10 @@
272 309  
273 310 #define crypto_cipher crypto_tfm
274 311  
  312 +struct crypto_blkcipher {
  313 + struct crypto_tfm base;
  314 +};
  315 +
275 316 enum {
276 317 CRYPTOA_UNSPEC,
277 318 CRYPTOA_ALG,
... ... @@ -380,6 +421,144 @@
380 421 /*
381 422 * API wrappers.
382 423 */
  424 +static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
  425 + struct crypto_tfm *tfm)
  426 +{
  427 + return (struct crypto_blkcipher *)tfm;
  428 +}
  429 +
  430 +static inline struct crypto_blkcipher *crypto_blkcipher_cast(
  431 + struct crypto_tfm *tfm)
  432 +{
  433 + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
  434 + return __crypto_blkcipher_cast(tfm);
  435 +}
  436 +
  437 +static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
  438 + const char *alg_name, u32 type, u32 mask)
  439 +{
  440 + type &= ~CRYPTO_ALG_TYPE_MASK;
  441 + type |= CRYPTO_ALG_TYPE_BLKCIPHER;
  442 + mask |= CRYPTO_ALG_TYPE_MASK;
  443 +
  444 + return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
  445 +}
  446 +
  447 +static inline struct crypto_tfm *crypto_blkcipher_tfm(
  448 + struct crypto_blkcipher *tfm)
  449 +{
  450 + return &tfm->base;
  451 +}
  452 +
  453 +static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
  454 +{
  455 + crypto_free_tfm(crypto_blkcipher_tfm(tfm));
  456 +}
  457 +
  458 +static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
  459 +{
  460 + return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
  461 +}
  462 +
  463 +static inline struct blkcipher_tfm *crypto_blkcipher_crt(
  464 + struct crypto_blkcipher *tfm)
  465 +{
  466 + return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
  467 +}
  468 +
  469 +static inline struct blkcipher_alg *crypto_blkcipher_alg(
  470 + struct crypto_blkcipher *tfm)
  471 +{
  472 + return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
  473 +}
  474 +
  475 +static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
  476 +{
  477 + return crypto_blkcipher_alg(tfm)->ivsize;
  478 +}
  479 +
  480 +static inline unsigned int crypto_blkcipher_blocksize(
  481 + struct crypto_blkcipher *tfm)
  482 +{
  483 + return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
  484 +}
  485 +
  486 +static inline unsigned int crypto_blkcipher_alignmask(
  487 + struct crypto_blkcipher *tfm)
  488 +{
  489 + return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
  490 +}
  491 +
  492 +static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
  493 +{
  494 + return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
  495 +}
  496 +
  497 +static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
  498 + u32 flags)
  499 +{
  500 + crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
  501 +}
  502 +
  503 +static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
  504 + u32 flags)
  505 +{
  506 + crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
  507 +}
  508 +
  509 +static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
  510 + const u8 *key, unsigned int keylen)
  511 +{
  512 + return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
  513 + key, keylen);
  514 +}
  515 +
  516 +static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
  517 + struct scatterlist *dst,
  518 + struct scatterlist *src,
  519 + unsigned int nbytes)
  520 +{
  521 + desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
  522 + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
  523 +}
  524 +
  525 +static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
  526 + struct scatterlist *dst,
  527 + struct scatterlist *src,
  528 + unsigned int nbytes)
  529 +{
  530 + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
  531 +}
  532 +
  533 +static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
  534 + struct scatterlist *dst,
  535 + struct scatterlist *src,
  536 + unsigned int nbytes)
  537 +{
  538 + desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
  539 + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
  540 +}
  541 +
  542 +static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
  543 + struct scatterlist *dst,
  544 + struct scatterlist *src,
  545 + unsigned int nbytes)
  546 +{
  547 + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
  548 +}
  549 +
  550 +static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
  551 + const u8 *src, unsigned int len)
  552 +{
  553 + memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
  554 +}
  555 +
  556 +static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
  557 + u8 *dst, unsigned int len)
  558 +{
  559 + memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
  560 +}
  561 +
383 562 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
384 563 {
385 564 return (struct crypto_cipher *)tfm;