Commit acc5ccb9fe1c1d3840d49e181ae30b924cfc28b5
Committed by
Greg Kroah-Hartman
1 parent
f2efa8653b
crypto: include crypto- module prefix in template
commit 4943ba16bbc2db05115707b3ff7b4874e9e3c560 upstream. This adds the module loading prefix "crypto-" to the template lookup as well. For example, attempting to load 'vfat(blowfish)' via AF_ALG now correctly includes the "crypto-" prefix at every level, correctly rejecting "vfat": net-pf-38 algif-hash crypto-vfat(blowfish) crypto-vfat(blowfish)-all crypto-vfat Reported-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Acked-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 23 changed files with 26 additions and 2 deletions Inline Diff
- arch/x86/crypto/fpu.c
- crypto/algapi.c
- crypto/authenc.c
- crypto/authencesn.c
- crypto/cbc.c
- crypto/ccm.c
- crypto/chainiv.c
- crypto/cmac.c
- crypto/cryptd.c
- crypto/ctr.c
- crypto/cts.c
- crypto/ecb.c
- crypto/eseqiv.c
- crypto/gcm.c
- crypto/hmac.c
- crypto/lrw.c
- crypto/mcryptd.c
- crypto/pcbc.c
- crypto/pcrypt.c
- crypto/seqiv.c
- crypto/vmac.c
- crypto/xcbc.c
- crypto/xts.c
arch/x86/crypto/fpu.c
1 | /* | 1 | /* |
2 | * FPU: Wrapper for blkcipher touching fpu | 2 | * FPU: Wrapper for blkcipher touching fpu |
3 | * | 3 | * |
4 | * Copyright (c) Intel Corp. | 4 | * Copyright (c) Intel Corp. |
5 | * Author: Huang Ying <ying.huang@intel.com> | 5 | * Author: Huang Ying <ying.huang@intel.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the Free | 8 | * under the terms of the GNU General Public License as published by the Free |
9 | * Software Foundation; either version 2 of the License, or (at your option) | 9 | * Software Foundation; either version 2 of the License, or (at your option) |
10 | * any later version. | 10 | * any later version. |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <crypto/algapi.h> | 14 | #include <crypto/algapi.h> |
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/crypto.h> | ||
20 | #include <asm/i387.h> | 21 | #include <asm/i387.h> |
21 | 22 | ||
22 | struct crypto_fpu_ctx { | 23 | struct crypto_fpu_ctx { |
23 | struct crypto_blkcipher *child; | 24 | struct crypto_blkcipher *child; |
24 | }; | 25 | }; |
25 | 26 | ||
26 | static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key, | 27 | static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key, |
27 | unsigned int keylen) | 28 | unsigned int keylen) |
28 | { | 29 | { |
29 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent); | 30 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent); |
30 | struct crypto_blkcipher *child = ctx->child; | 31 | struct crypto_blkcipher *child = ctx->child; |
31 | int err; | 32 | int err; |
32 | 33 | ||
33 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 34 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
34 | crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & | 35 | crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & |
35 | CRYPTO_TFM_REQ_MASK); | 36 | CRYPTO_TFM_REQ_MASK); |
36 | err = crypto_blkcipher_setkey(child, key, keylen); | 37 | err = crypto_blkcipher_setkey(child, key, keylen); |
37 | crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & | 38 | crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & |
38 | CRYPTO_TFM_RES_MASK); | 39 | CRYPTO_TFM_RES_MASK); |
39 | return err; | 40 | return err; |
40 | } | 41 | } |
41 | 42 | ||
42 | static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, | 43 | static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, |
43 | struct scatterlist *dst, struct scatterlist *src, | 44 | struct scatterlist *dst, struct scatterlist *src, |
44 | unsigned int nbytes) | 45 | unsigned int nbytes) |
45 | { | 46 | { |
46 | int err; | 47 | int err; |
47 | struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); | 48 | struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); |
48 | struct crypto_blkcipher *child = ctx->child; | 49 | struct crypto_blkcipher *child = ctx->child; |
49 | struct blkcipher_desc desc = { | 50 | struct blkcipher_desc desc = { |
50 | .tfm = child, | 51 | .tfm = child, |
51 | .info = desc_in->info, | 52 | .info = desc_in->info, |
52 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | 53 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
53 | }; | 54 | }; |
54 | 55 | ||
55 | kernel_fpu_begin(); | 56 | kernel_fpu_begin(); |
56 | err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes); | 57 | err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes); |
57 | kernel_fpu_end(); | 58 | kernel_fpu_end(); |
58 | return err; | 59 | return err; |
59 | } | 60 | } |
60 | 61 | ||
61 | static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, | 62 | static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, |
62 | struct scatterlist *dst, struct scatterlist *src, | 63 | struct scatterlist *dst, struct scatterlist *src, |
63 | unsigned int nbytes) | 64 | unsigned int nbytes) |
64 | { | 65 | { |
65 | int err; | 66 | int err; |
66 | struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); | 67 | struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); |
67 | struct crypto_blkcipher *child = ctx->child; | 68 | struct crypto_blkcipher *child = ctx->child; |
68 | struct blkcipher_desc desc = { | 69 | struct blkcipher_desc desc = { |
69 | .tfm = child, | 70 | .tfm = child, |
70 | .info = desc_in->info, | 71 | .info = desc_in->info, |
71 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | 72 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
72 | }; | 73 | }; |
73 | 74 | ||
74 | kernel_fpu_begin(); | 75 | kernel_fpu_begin(); |
75 | err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes); | 76 | err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes); |
76 | kernel_fpu_end(); | 77 | kernel_fpu_end(); |
77 | return err; | 78 | return err; |
78 | } | 79 | } |
79 | 80 | ||
80 | static int crypto_fpu_init_tfm(struct crypto_tfm *tfm) | 81 | static int crypto_fpu_init_tfm(struct crypto_tfm *tfm) |
81 | { | 82 | { |
82 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 83 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
83 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 84 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
84 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); | 85 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); |
85 | struct crypto_blkcipher *cipher; | 86 | struct crypto_blkcipher *cipher; |
86 | 87 | ||
87 | cipher = crypto_spawn_blkcipher(spawn); | 88 | cipher = crypto_spawn_blkcipher(spawn); |
88 | if (IS_ERR(cipher)) | 89 | if (IS_ERR(cipher)) |
89 | return PTR_ERR(cipher); | 90 | return PTR_ERR(cipher); |
90 | 91 | ||
91 | ctx->child = cipher; | 92 | ctx->child = cipher; |
92 | return 0; | 93 | return 0; |
93 | } | 94 | } |
94 | 95 | ||
95 | static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm) | 96 | static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm) |
96 | { | 97 | { |
97 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); | 98 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); |
98 | crypto_free_blkcipher(ctx->child); | 99 | crypto_free_blkcipher(ctx->child); |
99 | } | 100 | } |
100 | 101 | ||
101 | static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) | 102 | static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) |
102 | { | 103 | { |
103 | struct crypto_instance *inst; | 104 | struct crypto_instance *inst; |
104 | struct crypto_alg *alg; | 105 | struct crypto_alg *alg; |
105 | int err; | 106 | int err; |
106 | 107 | ||
107 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 108 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
108 | if (err) | 109 | if (err) |
109 | return ERR_PTR(err); | 110 | return ERR_PTR(err); |
110 | 111 | ||
111 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | 112 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
112 | CRYPTO_ALG_TYPE_MASK); | 113 | CRYPTO_ALG_TYPE_MASK); |
113 | if (IS_ERR(alg)) | 114 | if (IS_ERR(alg)) |
114 | return ERR_CAST(alg); | 115 | return ERR_CAST(alg); |
115 | 116 | ||
116 | inst = crypto_alloc_instance("fpu", alg); | 117 | inst = crypto_alloc_instance("fpu", alg); |
117 | if (IS_ERR(inst)) | 118 | if (IS_ERR(inst)) |
118 | goto out_put_alg; | 119 | goto out_put_alg; |
119 | 120 | ||
120 | inst->alg.cra_flags = alg->cra_flags; | 121 | inst->alg.cra_flags = alg->cra_flags; |
121 | inst->alg.cra_priority = alg->cra_priority; | 122 | inst->alg.cra_priority = alg->cra_priority; |
122 | inst->alg.cra_blocksize = alg->cra_blocksize; | 123 | inst->alg.cra_blocksize = alg->cra_blocksize; |
123 | inst->alg.cra_alignmask = alg->cra_alignmask; | 124 | inst->alg.cra_alignmask = alg->cra_alignmask; |
124 | inst->alg.cra_type = alg->cra_type; | 125 | inst->alg.cra_type = alg->cra_type; |
125 | inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize; | 126 | inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize; |
126 | inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | 127 | inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
127 | inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | 128 | inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
128 | inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx); | 129 | inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx); |
129 | inst->alg.cra_init = crypto_fpu_init_tfm; | 130 | inst->alg.cra_init = crypto_fpu_init_tfm; |
130 | inst->alg.cra_exit = crypto_fpu_exit_tfm; | 131 | inst->alg.cra_exit = crypto_fpu_exit_tfm; |
131 | inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey; | 132 | inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey; |
132 | inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt; | 133 | inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt; |
133 | inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt; | 134 | inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt; |
134 | 135 | ||
135 | out_put_alg: | 136 | out_put_alg: |
136 | crypto_mod_put(alg); | 137 | crypto_mod_put(alg); |
137 | return inst; | 138 | return inst; |
138 | } | 139 | } |
139 | 140 | ||
140 | static void crypto_fpu_free(struct crypto_instance *inst) | 141 | static void crypto_fpu_free(struct crypto_instance *inst) |
141 | { | 142 | { |
142 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 143 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
143 | kfree(inst); | 144 | kfree(inst); |
144 | } | 145 | } |
145 | 146 | ||
146 | static struct crypto_template crypto_fpu_tmpl = { | 147 | static struct crypto_template crypto_fpu_tmpl = { |
147 | .name = "fpu", | 148 | .name = "fpu", |
148 | .alloc = crypto_fpu_alloc, | 149 | .alloc = crypto_fpu_alloc, |
149 | .free = crypto_fpu_free, | 150 | .free = crypto_fpu_free, |
150 | .module = THIS_MODULE, | 151 | .module = THIS_MODULE, |
151 | }; | 152 | }; |
152 | 153 | ||
153 | int __init crypto_fpu_init(void) | 154 | int __init crypto_fpu_init(void) |
154 | { | 155 | { |
155 | return crypto_register_template(&crypto_fpu_tmpl); | 156 | return crypto_register_template(&crypto_fpu_tmpl); |
156 | } | 157 | } |
157 | 158 | ||
158 | void __exit crypto_fpu_exit(void) | 159 | void __exit crypto_fpu_exit(void) |
159 | { | 160 | { |
160 | crypto_unregister_template(&crypto_fpu_tmpl); | 161 | crypto_unregister_template(&crypto_fpu_tmpl); |
161 | } | 162 | } |
163 | |||
164 | MODULE_ALIAS_CRYPTO("fpu"); | ||
162 | 165 |
crypto/algapi.c
1 | /* | 1 | /* |
2 | * Cryptographic API for algorithms (i.e., low-level API). | 2 | * Cryptographic API for algorithms (i.e., low-level API). |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | 22 | ||
23 | #include "internal.h" | 23 | #include "internal.h" |
24 | 24 | ||
25 | static LIST_HEAD(crypto_template_list); | 25 | static LIST_HEAD(crypto_template_list); |
26 | 26 | ||
27 | static inline int crypto_set_driver_name(struct crypto_alg *alg) | 27 | static inline int crypto_set_driver_name(struct crypto_alg *alg) |
28 | { | 28 | { |
29 | static const char suffix[] = "-generic"; | 29 | static const char suffix[] = "-generic"; |
30 | char *driver_name = alg->cra_driver_name; | 30 | char *driver_name = alg->cra_driver_name; |
31 | int len; | 31 | int len; |
32 | 32 | ||
33 | if (*driver_name) | 33 | if (*driver_name) |
34 | return 0; | 34 | return 0; |
35 | 35 | ||
36 | len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 36 | len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
37 | if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) | 37 | if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) |
38 | return -ENAMETOOLONG; | 38 | return -ENAMETOOLONG; |
39 | 39 | ||
40 | memcpy(driver_name + len, suffix, sizeof(suffix)); | 40 | memcpy(driver_name + len, suffix, sizeof(suffix)); |
41 | return 0; | 41 | return 0; |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void crypto_check_module_sig(struct module *mod) | 44 | static inline void crypto_check_module_sig(struct module *mod) |
45 | { | 45 | { |
46 | #ifdef CONFIG_CRYPTO_FIPS | 46 | #ifdef CONFIG_CRYPTO_FIPS |
47 | if (fips_enabled && mod && !mod->sig_ok) | 47 | if (fips_enabled && mod && !mod->sig_ok) |
48 | panic("Module %s signature verification failed in FIPS mode\n", | 48 | panic("Module %s signature verification failed in FIPS mode\n", |
49 | mod->name); | 49 | mod->name); |
50 | #endif | 50 | #endif |
51 | return; | 51 | return; |
52 | } | 52 | } |
53 | 53 | ||
54 | static int crypto_check_alg(struct crypto_alg *alg) | 54 | static int crypto_check_alg(struct crypto_alg *alg) |
55 | { | 55 | { |
56 | crypto_check_module_sig(alg->cra_module); | 56 | crypto_check_module_sig(alg->cra_module); |
57 | 57 | ||
58 | if (alg->cra_alignmask & (alg->cra_alignmask + 1)) | 58 | if (alg->cra_alignmask & (alg->cra_alignmask + 1)) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | 60 | ||
61 | if (alg->cra_blocksize > PAGE_SIZE / 8) | 61 | if (alg->cra_blocksize > PAGE_SIZE / 8) |
62 | return -EINVAL; | 62 | return -EINVAL; |
63 | 63 | ||
64 | if (alg->cra_priority < 0) | 64 | if (alg->cra_priority < 0) |
65 | return -EINVAL; | 65 | return -EINVAL; |
66 | 66 | ||
67 | return crypto_set_driver_name(alg); | 67 | return crypto_set_driver_name(alg); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void crypto_destroy_instance(struct crypto_alg *alg) | 70 | static void crypto_destroy_instance(struct crypto_alg *alg) |
71 | { | 71 | { |
72 | struct crypto_instance *inst = (void *)alg; | 72 | struct crypto_instance *inst = (void *)alg; |
73 | struct crypto_template *tmpl = inst->tmpl; | 73 | struct crypto_template *tmpl = inst->tmpl; |
74 | 74 | ||
75 | tmpl->free(inst); | 75 | tmpl->free(inst); |
76 | crypto_tmpl_put(tmpl); | 76 | crypto_tmpl_put(tmpl); |
77 | } | 77 | } |
78 | 78 | ||
79 | static struct list_head *crypto_more_spawns(struct crypto_alg *alg, | 79 | static struct list_head *crypto_more_spawns(struct crypto_alg *alg, |
80 | struct list_head *stack, | 80 | struct list_head *stack, |
81 | struct list_head *top, | 81 | struct list_head *top, |
82 | struct list_head *secondary_spawns) | 82 | struct list_head *secondary_spawns) |
83 | { | 83 | { |
84 | struct crypto_spawn *spawn, *n; | 84 | struct crypto_spawn *spawn, *n; |
85 | 85 | ||
86 | if (list_empty(stack)) | 86 | if (list_empty(stack)) |
87 | return NULL; | 87 | return NULL; |
88 | 88 | ||
89 | spawn = list_first_entry(stack, struct crypto_spawn, list); | 89 | spawn = list_first_entry(stack, struct crypto_spawn, list); |
90 | n = list_entry(spawn->list.next, struct crypto_spawn, list); | 90 | n = list_entry(spawn->list.next, struct crypto_spawn, list); |
91 | 91 | ||
92 | if (spawn->alg && &n->list != stack && !n->alg) | 92 | if (spawn->alg && &n->list != stack && !n->alg) |
93 | n->alg = (n->list.next == stack) ? alg : | 93 | n->alg = (n->list.next == stack) ? alg : |
94 | &list_entry(n->list.next, struct crypto_spawn, | 94 | &list_entry(n->list.next, struct crypto_spawn, |
95 | list)->inst->alg; | 95 | list)->inst->alg; |
96 | 96 | ||
97 | list_move(&spawn->list, secondary_spawns); | 97 | list_move(&spawn->list, secondary_spawns); |
98 | 98 | ||
99 | return &n->list == stack ? top : &n->inst->alg.cra_users; | 99 | return &n->list == stack ? top : &n->inst->alg.cra_users; |
100 | } | 100 | } |
101 | 101 | ||
102 | static void crypto_remove_spawn(struct crypto_spawn *spawn, | 102 | static void crypto_remove_spawn(struct crypto_spawn *spawn, |
103 | struct list_head *list) | 103 | struct list_head *list) |
104 | { | 104 | { |
105 | struct crypto_instance *inst = spawn->inst; | 105 | struct crypto_instance *inst = spawn->inst; |
106 | struct crypto_template *tmpl = inst->tmpl; | 106 | struct crypto_template *tmpl = inst->tmpl; |
107 | 107 | ||
108 | if (crypto_is_dead(&inst->alg)) | 108 | if (crypto_is_dead(&inst->alg)) |
109 | return; | 109 | return; |
110 | 110 | ||
111 | inst->alg.cra_flags |= CRYPTO_ALG_DEAD; | 111 | inst->alg.cra_flags |= CRYPTO_ALG_DEAD; |
112 | if (hlist_unhashed(&inst->list)) | 112 | if (hlist_unhashed(&inst->list)) |
113 | return; | 113 | return; |
114 | 114 | ||
115 | if (!tmpl || !crypto_tmpl_get(tmpl)) | 115 | if (!tmpl || !crypto_tmpl_get(tmpl)) |
116 | return; | 116 | return; |
117 | 117 | ||
118 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); | 118 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); |
119 | list_move(&inst->alg.cra_list, list); | 119 | list_move(&inst->alg.cra_list, list); |
120 | hlist_del(&inst->list); | 120 | hlist_del(&inst->list); |
121 | inst->alg.cra_destroy = crypto_destroy_instance; | 121 | inst->alg.cra_destroy = crypto_destroy_instance; |
122 | 122 | ||
123 | BUG_ON(!list_empty(&inst->alg.cra_users)); | 123 | BUG_ON(!list_empty(&inst->alg.cra_users)); |
124 | } | 124 | } |
125 | 125 | ||
126 | void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, | 126 | void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, |
127 | struct crypto_alg *nalg) | 127 | struct crypto_alg *nalg) |
128 | { | 128 | { |
129 | u32 new_type = (nalg ?: alg)->cra_flags; | 129 | u32 new_type = (nalg ?: alg)->cra_flags; |
130 | struct crypto_spawn *spawn, *n; | 130 | struct crypto_spawn *spawn, *n; |
131 | LIST_HEAD(secondary_spawns); | 131 | LIST_HEAD(secondary_spawns); |
132 | struct list_head *spawns; | 132 | struct list_head *spawns; |
133 | LIST_HEAD(stack); | 133 | LIST_HEAD(stack); |
134 | LIST_HEAD(top); | 134 | LIST_HEAD(top); |
135 | 135 | ||
136 | spawns = &alg->cra_users; | 136 | spawns = &alg->cra_users; |
137 | list_for_each_entry_safe(spawn, n, spawns, list) { | 137 | list_for_each_entry_safe(spawn, n, spawns, list) { |
138 | if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) | 138 | if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) |
139 | continue; | 139 | continue; |
140 | 140 | ||
141 | list_move(&spawn->list, &top); | 141 | list_move(&spawn->list, &top); |
142 | } | 142 | } |
143 | 143 | ||
144 | spawns = ⊤ | 144 | spawns = ⊤ |
145 | do { | 145 | do { |
146 | while (!list_empty(spawns)) { | 146 | while (!list_empty(spawns)) { |
147 | struct crypto_instance *inst; | 147 | struct crypto_instance *inst; |
148 | 148 | ||
149 | spawn = list_first_entry(spawns, struct crypto_spawn, | 149 | spawn = list_first_entry(spawns, struct crypto_spawn, |
150 | list); | 150 | list); |
151 | inst = spawn->inst; | 151 | inst = spawn->inst; |
152 | 152 | ||
153 | BUG_ON(&inst->alg == alg); | 153 | BUG_ON(&inst->alg == alg); |
154 | 154 | ||
155 | list_move(&spawn->list, &stack); | 155 | list_move(&spawn->list, &stack); |
156 | 156 | ||
157 | if (&inst->alg == nalg) | 157 | if (&inst->alg == nalg) |
158 | break; | 158 | break; |
159 | 159 | ||
160 | spawn->alg = NULL; | 160 | spawn->alg = NULL; |
161 | spawns = &inst->alg.cra_users; | 161 | spawns = &inst->alg.cra_users; |
162 | } | 162 | } |
163 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, | 163 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, |
164 | &secondary_spawns))); | 164 | &secondary_spawns))); |
165 | 165 | ||
166 | list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { | 166 | list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { |
167 | if (spawn->alg) | 167 | if (spawn->alg) |
168 | list_move(&spawn->list, &spawn->alg->cra_users); | 168 | list_move(&spawn->list, &spawn->alg->cra_users); |
169 | else | 169 | else |
170 | crypto_remove_spawn(spawn, list); | 170 | crypto_remove_spawn(spawn, list); |
171 | } | 171 | } |
172 | } | 172 | } |
173 | EXPORT_SYMBOL_GPL(crypto_remove_spawns); | 173 | EXPORT_SYMBOL_GPL(crypto_remove_spawns); |
174 | 174 | ||
175 | static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) | 175 | static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) |
176 | { | 176 | { |
177 | struct crypto_alg *q; | 177 | struct crypto_alg *q; |
178 | struct crypto_larval *larval; | 178 | struct crypto_larval *larval; |
179 | int ret = -EAGAIN; | 179 | int ret = -EAGAIN; |
180 | 180 | ||
181 | if (crypto_is_dead(alg)) | 181 | if (crypto_is_dead(alg)) |
182 | goto err; | 182 | goto err; |
183 | 183 | ||
184 | INIT_LIST_HEAD(&alg->cra_users); | 184 | INIT_LIST_HEAD(&alg->cra_users); |
185 | 185 | ||
186 | /* No cheating! */ | 186 | /* No cheating! */ |
187 | alg->cra_flags &= ~CRYPTO_ALG_TESTED; | 187 | alg->cra_flags &= ~CRYPTO_ALG_TESTED; |
188 | 188 | ||
189 | ret = -EEXIST; | 189 | ret = -EEXIST; |
190 | 190 | ||
191 | atomic_set(&alg->cra_refcnt, 1); | 191 | atomic_set(&alg->cra_refcnt, 1); |
192 | list_for_each_entry(q, &crypto_alg_list, cra_list) { | 192 | list_for_each_entry(q, &crypto_alg_list, cra_list) { |
193 | if (q == alg) | 193 | if (q == alg) |
194 | goto err; | 194 | goto err; |
195 | 195 | ||
196 | if (crypto_is_moribund(q)) | 196 | if (crypto_is_moribund(q)) |
197 | continue; | 197 | continue; |
198 | 198 | ||
199 | if (crypto_is_larval(q)) { | 199 | if (crypto_is_larval(q)) { |
200 | if (!strcmp(alg->cra_driver_name, q->cra_driver_name)) | 200 | if (!strcmp(alg->cra_driver_name, q->cra_driver_name)) |
201 | goto err; | 201 | goto err; |
202 | continue; | 202 | continue; |
203 | } | 203 | } |
204 | 204 | ||
205 | if (!strcmp(q->cra_driver_name, alg->cra_name) || | 205 | if (!strcmp(q->cra_driver_name, alg->cra_name) || |
206 | !strcmp(q->cra_name, alg->cra_driver_name)) | 206 | !strcmp(q->cra_name, alg->cra_driver_name)) |
207 | goto err; | 207 | goto err; |
208 | } | 208 | } |
209 | 209 | ||
210 | larval = crypto_larval_alloc(alg->cra_name, | 210 | larval = crypto_larval_alloc(alg->cra_name, |
211 | alg->cra_flags | CRYPTO_ALG_TESTED, 0); | 211 | alg->cra_flags | CRYPTO_ALG_TESTED, 0); |
212 | if (IS_ERR(larval)) | 212 | if (IS_ERR(larval)) |
213 | goto out; | 213 | goto out; |
214 | 214 | ||
215 | ret = -ENOENT; | 215 | ret = -ENOENT; |
216 | larval->adult = crypto_mod_get(alg); | 216 | larval->adult = crypto_mod_get(alg); |
217 | if (!larval->adult) | 217 | if (!larval->adult) |
218 | goto free_larval; | 218 | goto free_larval; |
219 | 219 | ||
220 | atomic_set(&larval->alg.cra_refcnt, 1); | 220 | atomic_set(&larval->alg.cra_refcnt, 1); |
221 | memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, | 221 | memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, |
222 | CRYPTO_MAX_ALG_NAME); | 222 | CRYPTO_MAX_ALG_NAME); |
223 | larval->alg.cra_priority = alg->cra_priority; | 223 | larval->alg.cra_priority = alg->cra_priority; |
224 | 224 | ||
225 | list_add(&alg->cra_list, &crypto_alg_list); | 225 | list_add(&alg->cra_list, &crypto_alg_list); |
226 | list_add(&larval->alg.cra_list, &crypto_alg_list); | 226 | list_add(&larval->alg.cra_list, &crypto_alg_list); |
227 | 227 | ||
228 | out: | 228 | out: |
229 | return larval; | 229 | return larval; |
230 | 230 | ||
231 | free_larval: | 231 | free_larval: |
232 | kfree(larval); | 232 | kfree(larval); |
233 | err: | 233 | err: |
234 | larval = ERR_PTR(ret); | 234 | larval = ERR_PTR(ret); |
235 | goto out; | 235 | goto out; |
236 | } | 236 | } |
237 | 237 | ||
238 | void crypto_alg_tested(const char *name, int err) | 238 | void crypto_alg_tested(const char *name, int err) |
239 | { | 239 | { |
240 | struct crypto_larval *test; | 240 | struct crypto_larval *test; |
241 | struct crypto_alg *alg; | 241 | struct crypto_alg *alg; |
242 | struct crypto_alg *q; | 242 | struct crypto_alg *q; |
243 | LIST_HEAD(list); | 243 | LIST_HEAD(list); |
244 | 244 | ||
245 | down_write(&crypto_alg_sem); | 245 | down_write(&crypto_alg_sem); |
246 | list_for_each_entry(q, &crypto_alg_list, cra_list) { | 246 | list_for_each_entry(q, &crypto_alg_list, cra_list) { |
247 | if (crypto_is_moribund(q) || !crypto_is_larval(q)) | 247 | if (crypto_is_moribund(q) || !crypto_is_larval(q)) |
248 | continue; | 248 | continue; |
249 | 249 | ||
250 | test = (struct crypto_larval *)q; | 250 | test = (struct crypto_larval *)q; |
251 | 251 | ||
252 | if (!strcmp(q->cra_driver_name, name)) | 252 | if (!strcmp(q->cra_driver_name, name)) |
253 | goto found; | 253 | goto found; |
254 | } | 254 | } |
255 | 255 | ||
256 | printk(KERN_ERR "alg: Unexpected test result for %s: %d\n", name, err); | 256 | printk(KERN_ERR "alg: Unexpected test result for %s: %d\n", name, err); |
257 | goto unlock; | 257 | goto unlock; |
258 | 258 | ||
259 | found: | 259 | found: |
260 | q->cra_flags |= CRYPTO_ALG_DEAD; | 260 | q->cra_flags |= CRYPTO_ALG_DEAD; |
261 | alg = test->adult; | 261 | alg = test->adult; |
262 | if (err || list_empty(&alg->cra_list)) | 262 | if (err || list_empty(&alg->cra_list)) |
263 | goto complete; | 263 | goto complete; |
264 | 264 | ||
265 | alg->cra_flags |= CRYPTO_ALG_TESTED; | 265 | alg->cra_flags |= CRYPTO_ALG_TESTED; |
266 | 266 | ||
267 | list_for_each_entry(q, &crypto_alg_list, cra_list) { | 267 | list_for_each_entry(q, &crypto_alg_list, cra_list) { |
268 | if (q == alg) | 268 | if (q == alg) |
269 | continue; | 269 | continue; |
270 | 270 | ||
271 | if (crypto_is_moribund(q)) | 271 | if (crypto_is_moribund(q)) |
272 | continue; | 272 | continue; |
273 | 273 | ||
274 | if (crypto_is_larval(q)) { | 274 | if (crypto_is_larval(q)) { |
275 | struct crypto_larval *larval = (void *)q; | 275 | struct crypto_larval *larval = (void *)q; |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * Check to see if either our generic name or | 278 | * Check to see if either our generic name or |
279 | * specific name can satisfy the name requested | 279 | * specific name can satisfy the name requested |
280 | * by the larval entry q. | 280 | * by the larval entry q. |
281 | */ | 281 | */ |
282 | if (strcmp(alg->cra_name, q->cra_name) && | 282 | if (strcmp(alg->cra_name, q->cra_name) && |
283 | strcmp(alg->cra_driver_name, q->cra_name)) | 283 | strcmp(alg->cra_driver_name, q->cra_name)) |
284 | continue; | 284 | continue; |
285 | 285 | ||
286 | if (larval->adult) | 286 | if (larval->adult) |
287 | continue; | 287 | continue; |
288 | if ((q->cra_flags ^ alg->cra_flags) & larval->mask) | 288 | if ((q->cra_flags ^ alg->cra_flags) & larval->mask) |
289 | continue; | 289 | continue; |
290 | if (!crypto_mod_get(alg)) | 290 | if (!crypto_mod_get(alg)) |
291 | continue; | 291 | continue; |
292 | 292 | ||
293 | larval->adult = alg; | 293 | larval->adult = alg; |
294 | continue; | 294 | continue; |
295 | } | 295 | } |
296 | 296 | ||
297 | if (strcmp(alg->cra_name, q->cra_name)) | 297 | if (strcmp(alg->cra_name, q->cra_name)) |
298 | continue; | 298 | continue; |
299 | 299 | ||
300 | if (strcmp(alg->cra_driver_name, q->cra_driver_name) && | 300 | if (strcmp(alg->cra_driver_name, q->cra_driver_name) && |
301 | q->cra_priority > alg->cra_priority) | 301 | q->cra_priority > alg->cra_priority) |
302 | continue; | 302 | continue; |
303 | 303 | ||
304 | crypto_remove_spawns(q, &list, alg); | 304 | crypto_remove_spawns(q, &list, alg); |
305 | } | 305 | } |
306 | 306 | ||
307 | complete: | 307 | complete: |
308 | complete_all(&test->completion); | 308 | complete_all(&test->completion); |
309 | 309 | ||
310 | unlock: | 310 | unlock: |
311 | up_write(&crypto_alg_sem); | 311 | up_write(&crypto_alg_sem); |
312 | 312 | ||
313 | crypto_remove_final(&list); | 313 | crypto_remove_final(&list); |
314 | } | 314 | } |
315 | EXPORT_SYMBOL_GPL(crypto_alg_tested); | 315 | EXPORT_SYMBOL_GPL(crypto_alg_tested); |
316 | 316 | ||
317 | void crypto_remove_final(struct list_head *list) | 317 | void crypto_remove_final(struct list_head *list) |
318 | { | 318 | { |
319 | struct crypto_alg *alg; | 319 | struct crypto_alg *alg; |
320 | struct crypto_alg *n; | 320 | struct crypto_alg *n; |
321 | 321 | ||
322 | list_for_each_entry_safe(alg, n, list, cra_list) { | 322 | list_for_each_entry_safe(alg, n, list, cra_list) { |
323 | list_del_init(&alg->cra_list); | 323 | list_del_init(&alg->cra_list); |
324 | crypto_alg_put(alg); | 324 | crypto_alg_put(alg); |
325 | } | 325 | } |
326 | } | 326 | } |
327 | EXPORT_SYMBOL_GPL(crypto_remove_final); | 327 | EXPORT_SYMBOL_GPL(crypto_remove_final); |
328 | 328 | ||
329 | static void crypto_wait_for_test(struct crypto_larval *larval) | 329 | static void crypto_wait_for_test(struct crypto_larval *larval) |
330 | { | 330 | { |
331 | int err; | 331 | int err; |
332 | 332 | ||
333 | err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); | 333 | err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); |
334 | if (err != NOTIFY_STOP) { | 334 | if (err != NOTIFY_STOP) { |
335 | if (WARN_ON(err != NOTIFY_DONE)) | 335 | if (WARN_ON(err != NOTIFY_DONE)) |
336 | goto out; | 336 | goto out; |
337 | crypto_alg_tested(larval->alg.cra_driver_name, 0); | 337 | crypto_alg_tested(larval->alg.cra_driver_name, 0); |
338 | } | 338 | } |
339 | 339 | ||
340 | err = wait_for_completion_interruptible(&larval->completion); | 340 | err = wait_for_completion_interruptible(&larval->completion); |
341 | WARN_ON(err); | 341 | WARN_ON(err); |
342 | 342 | ||
343 | out: | 343 | out: |
344 | crypto_larval_kill(&larval->alg); | 344 | crypto_larval_kill(&larval->alg); |
345 | } | 345 | } |
346 | 346 | ||
347 | int crypto_register_alg(struct crypto_alg *alg) | 347 | int crypto_register_alg(struct crypto_alg *alg) |
348 | { | 348 | { |
349 | struct crypto_larval *larval; | 349 | struct crypto_larval *larval; |
350 | int err; | 350 | int err; |
351 | 351 | ||
352 | err = crypto_check_alg(alg); | 352 | err = crypto_check_alg(alg); |
353 | if (err) | 353 | if (err) |
354 | return err; | 354 | return err; |
355 | 355 | ||
356 | down_write(&crypto_alg_sem); | 356 | down_write(&crypto_alg_sem); |
357 | larval = __crypto_register_alg(alg); | 357 | larval = __crypto_register_alg(alg); |
358 | up_write(&crypto_alg_sem); | 358 | up_write(&crypto_alg_sem); |
359 | 359 | ||
360 | if (IS_ERR(larval)) | 360 | if (IS_ERR(larval)) |
361 | return PTR_ERR(larval); | 361 | return PTR_ERR(larval); |
362 | 362 | ||
363 | crypto_wait_for_test(larval); | 363 | crypto_wait_for_test(larval); |
364 | return 0; | 364 | return 0; |
365 | } | 365 | } |
366 | EXPORT_SYMBOL_GPL(crypto_register_alg); | 366 | EXPORT_SYMBOL_GPL(crypto_register_alg); |
367 | 367 | ||
368 | static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) | 368 | static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) |
369 | { | 369 | { |
370 | if (unlikely(list_empty(&alg->cra_list))) | 370 | if (unlikely(list_empty(&alg->cra_list))) |
371 | return -ENOENT; | 371 | return -ENOENT; |
372 | 372 | ||
373 | alg->cra_flags |= CRYPTO_ALG_DEAD; | 373 | alg->cra_flags |= CRYPTO_ALG_DEAD; |
374 | 374 | ||
375 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); | 375 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); |
376 | list_del_init(&alg->cra_list); | 376 | list_del_init(&alg->cra_list); |
377 | crypto_remove_spawns(alg, list, NULL); | 377 | crypto_remove_spawns(alg, list, NULL); |
378 | 378 | ||
379 | return 0; | 379 | return 0; |
380 | } | 380 | } |
381 | 381 | ||
382 | int crypto_unregister_alg(struct crypto_alg *alg) | 382 | int crypto_unregister_alg(struct crypto_alg *alg) |
383 | { | 383 | { |
384 | int ret; | 384 | int ret; |
385 | LIST_HEAD(list); | 385 | LIST_HEAD(list); |
386 | 386 | ||
387 | down_write(&crypto_alg_sem); | 387 | down_write(&crypto_alg_sem); |
388 | ret = crypto_remove_alg(alg, &list); | 388 | ret = crypto_remove_alg(alg, &list); |
389 | up_write(&crypto_alg_sem); | 389 | up_write(&crypto_alg_sem); |
390 | 390 | ||
391 | if (ret) | 391 | if (ret) |
392 | return ret; | 392 | return ret; |
393 | 393 | ||
394 | BUG_ON(atomic_read(&alg->cra_refcnt) != 1); | 394 | BUG_ON(atomic_read(&alg->cra_refcnt) != 1); |
395 | if (alg->cra_destroy) | 395 | if (alg->cra_destroy) |
396 | alg->cra_destroy(alg); | 396 | alg->cra_destroy(alg); |
397 | 397 | ||
398 | crypto_remove_final(&list); | 398 | crypto_remove_final(&list); |
399 | return 0; | 399 | return 0; |
400 | } | 400 | } |
401 | EXPORT_SYMBOL_GPL(crypto_unregister_alg); | 401 | EXPORT_SYMBOL_GPL(crypto_unregister_alg); |
402 | 402 | ||
403 | int crypto_register_algs(struct crypto_alg *algs, int count) | 403 | int crypto_register_algs(struct crypto_alg *algs, int count) |
404 | { | 404 | { |
405 | int i, ret; | 405 | int i, ret; |
406 | 406 | ||
407 | for (i = 0; i < count; i++) { | 407 | for (i = 0; i < count; i++) { |
408 | ret = crypto_register_alg(&algs[i]); | 408 | ret = crypto_register_alg(&algs[i]); |
409 | if (ret) | 409 | if (ret) |
410 | goto err; | 410 | goto err; |
411 | } | 411 | } |
412 | 412 | ||
413 | return 0; | 413 | return 0; |
414 | 414 | ||
415 | err: | 415 | err: |
416 | for (--i; i >= 0; --i) | 416 | for (--i; i >= 0; --i) |
417 | crypto_unregister_alg(&algs[i]); | 417 | crypto_unregister_alg(&algs[i]); |
418 | 418 | ||
419 | return ret; | 419 | return ret; |
420 | } | 420 | } |
421 | EXPORT_SYMBOL_GPL(crypto_register_algs); | 421 | EXPORT_SYMBOL_GPL(crypto_register_algs); |
422 | 422 | ||
423 | int crypto_unregister_algs(struct crypto_alg *algs, int count) | 423 | int crypto_unregister_algs(struct crypto_alg *algs, int count) |
424 | { | 424 | { |
425 | int i, ret; | 425 | int i, ret; |
426 | 426 | ||
427 | for (i = 0; i < count; i++) { | 427 | for (i = 0; i < count; i++) { |
428 | ret = crypto_unregister_alg(&algs[i]); | 428 | ret = crypto_unregister_alg(&algs[i]); |
429 | if (ret) | 429 | if (ret) |
430 | pr_err("Failed to unregister %s %s: %d\n", | 430 | pr_err("Failed to unregister %s %s: %d\n", |
431 | algs[i].cra_driver_name, algs[i].cra_name, ret); | 431 | algs[i].cra_driver_name, algs[i].cra_name, ret); |
432 | } | 432 | } |
433 | 433 | ||
434 | return 0; | 434 | return 0; |
435 | } | 435 | } |
436 | EXPORT_SYMBOL_GPL(crypto_unregister_algs); | 436 | EXPORT_SYMBOL_GPL(crypto_unregister_algs); |
437 | 437 | ||
438 | int crypto_register_template(struct crypto_template *tmpl) | 438 | int crypto_register_template(struct crypto_template *tmpl) |
439 | { | 439 | { |
440 | struct crypto_template *q; | 440 | struct crypto_template *q; |
441 | int err = -EEXIST; | 441 | int err = -EEXIST; |
442 | 442 | ||
443 | down_write(&crypto_alg_sem); | 443 | down_write(&crypto_alg_sem); |
444 | 444 | ||
445 | crypto_check_module_sig(tmpl->module); | 445 | crypto_check_module_sig(tmpl->module); |
446 | 446 | ||
447 | list_for_each_entry(q, &crypto_template_list, list) { | 447 | list_for_each_entry(q, &crypto_template_list, list) { |
448 | if (q == tmpl) | 448 | if (q == tmpl) |
449 | goto out; | 449 | goto out; |
450 | } | 450 | } |
451 | 451 | ||
452 | list_add(&tmpl->list, &crypto_template_list); | 452 | list_add(&tmpl->list, &crypto_template_list); |
453 | crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl); | 453 | crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl); |
454 | err = 0; | 454 | err = 0; |
455 | out: | 455 | out: |
456 | up_write(&crypto_alg_sem); | 456 | up_write(&crypto_alg_sem); |
457 | return err; | 457 | return err; |
458 | } | 458 | } |
459 | EXPORT_SYMBOL_GPL(crypto_register_template); | 459 | EXPORT_SYMBOL_GPL(crypto_register_template); |
460 | 460 | ||
461 | void crypto_unregister_template(struct crypto_template *tmpl) | 461 | void crypto_unregister_template(struct crypto_template *tmpl) |
462 | { | 462 | { |
463 | struct crypto_instance *inst; | 463 | struct crypto_instance *inst; |
464 | struct hlist_node *n; | 464 | struct hlist_node *n; |
465 | struct hlist_head *list; | 465 | struct hlist_head *list; |
466 | LIST_HEAD(users); | 466 | LIST_HEAD(users); |
467 | 467 | ||
468 | down_write(&crypto_alg_sem); | 468 | down_write(&crypto_alg_sem); |
469 | 469 | ||
470 | BUG_ON(list_empty(&tmpl->list)); | 470 | BUG_ON(list_empty(&tmpl->list)); |
471 | list_del_init(&tmpl->list); | 471 | list_del_init(&tmpl->list); |
472 | 472 | ||
473 | list = &tmpl->instances; | 473 | list = &tmpl->instances; |
474 | hlist_for_each_entry(inst, list, list) { | 474 | hlist_for_each_entry(inst, list, list) { |
475 | int err = crypto_remove_alg(&inst->alg, &users); | 475 | int err = crypto_remove_alg(&inst->alg, &users); |
476 | BUG_ON(err); | 476 | BUG_ON(err); |
477 | } | 477 | } |
478 | 478 | ||
479 | crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl); | 479 | crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl); |
480 | 480 | ||
481 | up_write(&crypto_alg_sem); | 481 | up_write(&crypto_alg_sem); |
482 | 482 | ||
483 | hlist_for_each_entry_safe(inst, n, list, list) { | 483 | hlist_for_each_entry_safe(inst, n, list, list) { |
484 | BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); | 484 | BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); |
485 | tmpl->free(inst); | 485 | tmpl->free(inst); |
486 | } | 486 | } |
487 | crypto_remove_final(&users); | 487 | crypto_remove_final(&users); |
488 | } | 488 | } |
489 | EXPORT_SYMBOL_GPL(crypto_unregister_template); | 489 | EXPORT_SYMBOL_GPL(crypto_unregister_template); |
490 | 490 | ||
491 | static struct crypto_template *__crypto_lookup_template(const char *name) | 491 | static struct crypto_template *__crypto_lookup_template(const char *name) |
492 | { | 492 | { |
493 | struct crypto_template *q, *tmpl = NULL; | 493 | struct crypto_template *q, *tmpl = NULL; |
494 | 494 | ||
495 | down_read(&crypto_alg_sem); | 495 | down_read(&crypto_alg_sem); |
496 | list_for_each_entry(q, &crypto_template_list, list) { | 496 | list_for_each_entry(q, &crypto_template_list, list) { |
497 | if (strcmp(q->name, name)) | 497 | if (strcmp(q->name, name)) |
498 | continue; | 498 | continue; |
499 | if (unlikely(!crypto_tmpl_get(q))) | 499 | if (unlikely(!crypto_tmpl_get(q))) |
500 | continue; | 500 | continue; |
501 | 501 | ||
502 | tmpl = q; | 502 | tmpl = q; |
503 | break; | 503 | break; |
504 | } | 504 | } |
505 | up_read(&crypto_alg_sem); | 505 | up_read(&crypto_alg_sem); |
506 | 506 | ||
507 | return tmpl; | 507 | return tmpl; |
508 | } | 508 | } |
509 | 509 | ||
510 | struct crypto_template *crypto_lookup_template(const char *name) | 510 | struct crypto_template *crypto_lookup_template(const char *name) |
511 | { | 511 | { |
512 | return try_then_request_module(__crypto_lookup_template(name), "%s", | 512 | return try_then_request_module(__crypto_lookup_template(name), |
513 | name); | 513 | "crypto-%s", name); |
514 | } | 514 | } |
515 | EXPORT_SYMBOL_GPL(crypto_lookup_template); | 515 | EXPORT_SYMBOL_GPL(crypto_lookup_template); |
516 | 516 | ||
517 | int crypto_register_instance(struct crypto_template *tmpl, | 517 | int crypto_register_instance(struct crypto_template *tmpl, |
518 | struct crypto_instance *inst) | 518 | struct crypto_instance *inst) |
519 | { | 519 | { |
520 | struct crypto_larval *larval; | 520 | struct crypto_larval *larval; |
521 | int err; | 521 | int err; |
522 | 522 | ||
523 | err = crypto_check_alg(&inst->alg); | 523 | err = crypto_check_alg(&inst->alg); |
524 | if (err) | 524 | if (err) |
525 | goto err; | 525 | goto err; |
526 | 526 | ||
527 | inst->alg.cra_module = tmpl->module; | 527 | inst->alg.cra_module = tmpl->module; |
528 | inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; | 528 | inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; |
529 | 529 | ||
530 | down_write(&crypto_alg_sem); | 530 | down_write(&crypto_alg_sem); |
531 | 531 | ||
532 | larval = __crypto_register_alg(&inst->alg); | 532 | larval = __crypto_register_alg(&inst->alg); |
533 | if (IS_ERR(larval)) | 533 | if (IS_ERR(larval)) |
534 | goto unlock; | 534 | goto unlock; |
535 | 535 | ||
536 | hlist_add_head(&inst->list, &tmpl->instances); | 536 | hlist_add_head(&inst->list, &tmpl->instances); |
537 | inst->tmpl = tmpl; | 537 | inst->tmpl = tmpl; |
538 | 538 | ||
539 | unlock: | 539 | unlock: |
540 | up_write(&crypto_alg_sem); | 540 | up_write(&crypto_alg_sem); |
541 | 541 | ||
542 | err = PTR_ERR(larval); | 542 | err = PTR_ERR(larval); |
543 | if (IS_ERR(larval)) | 543 | if (IS_ERR(larval)) |
544 | goto err; | 544 | goto err; |
545 | 545 | ||
546 | crypto_wait_for_test(larval); | 546 | crypto_wait_for_test(larval); |
547 | err = 0; | 547 | err = 0; |
548 | 548 | ||
549 | err: | 549 | err: |
550 | return err; | 550 | return err; |
551 | } | 551 | } |
552 | EXPORT_SYMBOL_GPL(crypto_register_instance); | 552 | EXPORT_SYMBOL_GPL(crypto_register_instance); |
553 | 553 | ||
554 | int crypto_unregister_instance(struct crypto_alg *alg) | 554 | int crypto_unregister_instance(struct crypto_alg *alg) |
555 | { | 555 | { |
556 | int err; | 556 | int err; |
557 | struct crypto_instance *inst = (void *)alg; | 557 | struct crypto_instance *inst = (void *)alg; |
558 | struct crypto_template *tmpl = inst->tmpl; | 558 | struct crypto_template *tmpl = inst->tmpl; |
559 | LIST_HEAD(users); | 559 | LIST_HEAD(users); |
560 | 560 | ||
561 | if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE)) | 561 | if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE)) |
562 | return -EINVAL; | 562 | return -EINVAL; |
563 | 563 | ||
564 | BUG_ON(atomic_read(&alg->cra_refcnt) != 1); | 564 | BUG_ON(atomic_read(&alg->cra_refcnt) != 1); |
565 | 565 | ||
566 | down_write(&crypto_alg_sem); | 566 | down_write(&crypto_alg_sem); |
567 | 567 | ||
568 | hlist_del_init(&inst->list); | 568 | hlist_del_init(&inst->list); |
569 | err = crypto_remove_alg(alg, &users); | 569 | err = crypto_remove_alg(alg, &users); |
570 | 570 | ||
571 | up_write(&crypto_alg_sem); | 571 | up_write(&crypto_alg_sem); |
572 | 572 | ||
573 | if (err) | 573 | if (err) |
574 | return err; | 574 | return err; |
575 | 575 | ||
576 | tmpl->free(inst); | 576 | tmpl->free(inst); |
577 | crypto_remove_final(&users); | 577 | crypto_remove_final(&users); |
578 | 578 | ||
579 | return 0; | 579 | return 0; |
580 | } | 580 | } |
581 | EXPORT_SYMBOL_GPL(crypto_unregister_instance); | 581 | EXPORT_SYMBOL_GPL(crypto_unregister_instance); |
582 | 582 | ||
583 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, | 583 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, |
584 | struct crypto_instance *inst, u32 mask) | 584 | struct crypto_instance *inst, u32 mask) |
585 | { | 585 | { |
586 | int err = -EAGAIN; | 586 | int err = -EAGAIN; |
587 | 587 | ||
588 | spawn->inst = inst; | 588 | spawn->inst = inst; |
589 | spawn->mask = mask; | 589 | spawn->mask = mask; |
590 | 590 | ||
591 | down_write(&crypto_alg_sem); | 591 | down_write(&crypto_alg_sem); |
592 | if (!crypto_is_moribund(alg)) { | 592 | if (!crypto_is_moribund(alg)) { |
593 | list_add(&spawn->list, &alg->cra_users); | 593 | list_add(&spawn->list, &alg->cra_users); |
594 | spawn->alg = alg; | 594 | spawn->alg = alg; |
595 | err = 0; | 595 | err = 0; |
596 | } | 596 | } |
597 | up_write(&crypto_alg_sem); | 597 | up_write(&crypto_alg_sem); |
598 | 598 | ||
599 | return err; | 599 | return err; |
600 | } | 600 | } |
601 | EXPORT_SYMBOL_GPL(crypto_init_spawn); | 601 | EXPORT_SYMBOL_GPL(crypto_init_spawn); |
602 | 602 | ||
603 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | 603 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, |
604 | struct crypto_instance *inst, | 604 | struct crypto_instance *inst, |
605 | const struct crypto_type *frontend) | 605 | const struct crypto_type *frontend) |
606 | { | 606 | { |
607 | int err = -EINVAL; | 607 | int err = -EINVAL; |
608 | 608 | ||
609 | if ((alg->cra_flags ^ frontend->type) & frontend->maskset) | 609 | if ((alg->cra_flags ^ frontend->type) & frontend->maskset) |
610 | goto out; | 610 | goto out; |
611 | 611 | ||
612 | spawn->frontend = frontend; | 612 | spawn->frontend = frontend; |
613 | err = crypto_init_spawn(spawn, alg, inst, frontend->maskset); | 613 | err = crypto_init_spawn(spawn, alg, inst, frontend->maskset); |
614 | 614 | ||
615 | out: | 615 | out: |
616 | return err; | 616 | return err; |
617 | } | 617 | } |
618 | EXPORT_SYMBOL_GPL(crypto_init_spawn2); | 618 | EXPORT_SYMBOL_GPL(crypto_init_spawn2); |
619 | 619 | ||
620 | void crypto_drop_spawn(struct crypto_spawn *spawn) | 620 | void crypto_drop_spawn(struct crypto_spawn *spawn) |
621 | { | 621 | { |
622 | if (!spawn->alg) | 622 | if (!spawn->alg) |
623 | return; | 623 | return; |
624 | 624 | ||
625 | down_write(&crypto_alg_sem); | 625 | down_write(&crypto_alg_sem); |
626 | list_del(&spawn->list); | 626 | list_del(&spawn->list); |
627 | up_write(&crypto_alg_sem); | 627 | up_write(&crypto_alg_sem); |
628 | } | 628 | } |
629 | EXPORT_SYMBOL_GPL(crypto_drop_spawn); | 629 | EXPORT_SYMBOL_GPL(crypto_drop_spawn); |
630 | 630 | ||
631 | static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) | 631 | static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) |
632 | { | 632 | { |
633 | struct crypto_alg *alg; | 633 | struct crypto_alg *alg; |
634 | struct crypto_alg *alg2; | 634 | struct crypto_alg *alg2; |
635 | 635 | ||
636 | down_read(&crypto_alg_sem); | 636 | down_read(&crypto_alg_sem); |
637 | alg = spawn->alg; | 637 | alg = spawn->alg; |
638 | alg2 = alg; | 638 | alg2 = alg; |
639 | if (alg2) | 639 | if (alg2) |
640 | alg2 = crypto_mod_get(alg2); | 640 | alg2 = crypto_mod_get(alg2); |
641 | up_read(&crypto_alg_sem); | 641 | up_read(&crypto_alg_sem); |
642 | 642 | ||
643 | if (!alg2) { | 643 | if (!alg2) { |
644 | if (alg) | 644 | if (alg) |
645 | crypto_shoot_alg(alg); | 645 | crypto_shoot_alg(alg); |
646 | return ERR_PTR(-EAGAIN); | 646 | return ERR_PTR(-EAGAIN); |
647 | } | 647 | } |
648 | 648 | ||
649 | return alg; | 649 | return alg; |
650 | } | 650 | } |
651 | 651 | ||
652 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | 652 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, |
653 | u32 mask) | 653 | u32 mask) |
654 | { | 654 | { |
655 | struct crypto_alg *alg; | 655 | struct crypto_alg *alg; |
656 | struct crypto_tfm *tfm; | 656 | struct crypto_tfm *tfm; |
657 | 657 | ||
658 | alg = crypto_spawn_alg(spawn); | 658 | alg = crypto_spawn_alg(spawn); |
659 | if (IS_ERR(alg)) | 659 | if (IS_ERR(alg)) |
660 | return ERR_CAST(alg); | 660 | return ERR_CAST(alg); |
661 | 661 | ||
662 | tfm = ERR_PTR(-EINVAL); | 662 | tfm = ERR_PTR(-EINVAL); |
663 | if (unlikely((alg->cra_flags ^ type) & mask)) | 663 | if (unlikely((alg->cra_flags ^ type) & mask)) |
664 | goto out_put_alg; | 664 | goto out_put_alg; |
665 | 665 | ||
666 | tfm = __crypto_alloc_tfm(alg, type, mask); | 666 | tfm = __crypto_alloc_tfm(alg, type, mask); |
667 | if (IS_ERR(tfm)) | 667 | if (IS_ERR(tfm)) |
668 | goto out_put_alg; | 668 | goto out_put_alg; |
669 | 669 | ||
670 | return tfm; | 670 | return tfm; |
671 | 671 | ||
672 | out_put_alg: | 672 | out_put_alg: |
673 | crypto_mod_put(alg); | 673 | crypto_mod_put(alg); |
674 | return tfm; | 674 | return tfm; |
675 | } | 675 | } |
676 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm); | 676 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm); |
677 | 677 | ||
678 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn) | 678 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn) |
679 | { | 679 | { |
680 | struct crypto_alg *alg; | 680 | struct crypto_alg *alg; |
681 | struct crypto_tfm *tfm; | 681 | struct crypto_tfm *tfm; |
682 | 682 | ||
683 | alg = crypto_spawn_alg(spawn); | 683 | alg = crypto_spawn_alg(spawn); |
684 | if (IS_ERR(alg)) | 684 | if (IS_ERR(alg)) |
685 | return ERR_CAST(alg); | 685 | return ERR_CAST(alg); |
686 | 686 | ||
687 | tfm = crypto_create_tfm(alg, spawn->frontend); | 687 | tfm = crypto_create_tfm(alg, spawn->frontend); |
688 | if (IS_ERR(tfm)) | 688 | if (IS_ERR(tfm)) |
689 | goto out_put_alg; | 689 | goto out_put_alg; |
690 | 690 | ||
691 | return tfm; | 691 | return tfm; |
692 | 692 | ||
693 | out_put_alg: | 693 | out_put_alg: |
694 | crypto_mod_put(alg); | 694 | crypto_mod_put(alg); |
695 | return tfm; | 695 | return tfm; |
696 | } | 696 | } |
697 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); | 697 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); |
698 | 698 | ||
699 | int crypto_register_notifier(struct notifier_block *nb) | 699 | int crypto_register_notifier(struct notifier_block *nb) |
700 | { | 700 | { |
701 | return blocking_notifier_chain_register(&crypto_chain, nb); | 701 | return blocking_notifier_chain_register(&crypto_chain, nb); |
702 | } | 702 | } |
703 | EXPORT_SYMBOL_GPL(crypto_register_notifier); | 703 | EXPORT_SYMBOL_GPL(crypto_register_notifier); |
704 | 704 | ||
705 | int crypto_unregister_notifier(struct notifier_block *nb) | 705 | int crypto_unregister_notifier(struct notifier_block *nb) |
706 | { | 706 | { |
707 | return blocking_notifier_chain_unregister(&crypto_chain, nb); | 707 | return blocking_notifier_chain_unregister(&crypto_chain, nb); |
708 | } | 708 | } |
709 | EXPORT_SYMBOL_GPL(crypto_unregister_notifier); | 709 | EXPORT_SYMBOL_GPL(crypto_unregister_notifier); |
710 | 710 | ||
711 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) | 711 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) |
712 | { | 712 | { |
713 | struct rtattr *rta = tb[0]; | 713 | struct rtattr *rta = tb[0]; |
714 | struct crypto_attr_type *algt; | 714 | struct crypto_attr_type *algt; |
715 | 715 | ||
716 | if (!rta) | 716 | if (!rta) |
717 | return ERR_PTR(-ENOENT); | 717 | return ERR_PTR(-ENOENT); |
718 | if (RTA_PAYLOAD(rta) < sizeof(*algt)) | 718 | if (RTA_PAYLOAD(rta) < sizeof(*algt)) |
719 | return ERR_PTR(-EINVAL); | 719 | return ERR_PTR(-EINVAL); |
720 | if (rta->rta_type != CRYPTOA_TYPE) | 720 | if (rta->rta_type != CRYPTOA_TYPE) |
721 | return ERR_PTR(-EINVAL); | 721 | return ERR_PTR(-EINVAL); |
722 | 722 | ||
723 | algt = RTA_DATA(rta); | 723 | algt = RTA_DATA(rta); |
724 | 724 | ||
725 | return algt; | 725 | return algt; |
726 | } | 726 | } |
727 | EXPORT_SYMBOL_GPL(crypto_get_attr_type); | 727 | EXPORT_SYMBOL_GPL(crypto_get_attr_type); |
728 | 728 | ||
729 | int crypto_check_attr_type(struct rtattr **tb, u32 type) | 729 | int crypto_check_attr_type(struct rtattr **tb, u32 type) |
730 | { | 730 | { |
731 | struct crypto_attr_type *algt; | 731 | struct crypto_attr_type *algt; |
732 | 732 | ||
733 | algt = crypto_get_attr_type(tb); | 733 | algt = crypto_get_attr_type(tb); |
734 | if (IS_ERR(algt)) | 734 | if (IS_ERR(algt)) |
735 | return PTR_ERR(algt); | 735 | return PTR_ERR(algt); |
736 | 736 | ||
737 | if ((algt->type ^ type) & algt->mask) | 737 | if ((algt->type ^ type) & algt->mask) |
738 | return -EINVAL; | 738 | return -EINVAL; |
739 | 739 | ||
740 | return 0; | 740 | return 0; |
741 | } | 741 | } |
742 | EXPORT_SYMBOL_GPL(crypto_check_attr_type); | 742 | EXPORT_SYMBOL_GPL(crypto_check_attr_type); |
743 | 743 | ||
744 | const char *crypto_attr_alg_name(struct rtattr *rta) | 744 | const char *crypto_attr_alg_name(struct rtattr *rta) |
745 | { | 745 | { |
746 | struct crypto_attr_alg *alga; | 746 | struct crypto_attr_alg *alga; |
747 | 747 | ||
748 | if (!rta) | 748 | if (!rta) |
749 | return ERR_PTR(-ENOENT); | 749 | return ERR_PTR(-ENOENT); |
750 | if (RTA_PAYLOAD(rta) < sizeof(*alga)) | 750 | if (RTA_PAYLOAD(rta) < sizeof(*alga)) |
751 | return ERR_PTR(-EINVAL); | 751 | return ERR_PTR(-EINVAL); |
752 | if (rta->rta_type != CRYPTOA_ALG) | 752 | if (rta->rta_type != CRYPTOA_ALG) |
753 | return ERR_PTR(-EINVAL); | 753 | return ERR_PTR(-EINVAL); |
754 | 754 | ||
755 | alga = RTA_DATA(rta); | 755 | alga = RTA_DATA(rta); |
756 | alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0; | 756 | alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0; |
757 | 757 | ||
758 | return alga->name; | 758 | return alga->name; |
759 | } | 759 | } |
760 | EXPORT_SYMBOL_GPL(crypto_attr_alg_name); | 760 | EXPORT_SYMBOL_GPL(crypto_attr_alg_name); |
761 | 761 | ||
762 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, | 762 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, |
763 | const struct crypto_type *frontend, | 763 | const struct crypto_type *frontend, |
764 | u32 type, u32 mask) | 764 | u32 type, u32 mask) |
765 | { | 765 | { |
766 | const char *name; | 766 | const char *name; |
767 | 767 | ||
768 | name = crypto_attr_alg_name(rta); | 768 | name = crypto_attr_alg_name(rta); |
769 | if (IS_ERR(name)) | 769 | if (IS_ERR(name)) |
770 | return ERR_CAST(name); | 770 | return ERR_CAST(name); |
771 | 771 | ||
772 | return crypto_find_alg(name, frontend, type, mask); | 772 | return crypto_find_alg(name, frontend, type, mask); |
773 | } | 773 | } |
774 | EXPORT_SYMBOL_GPL(crypto_attr_alg2); | 774 | EXPORT_SYMBOL_GPL(crypto_attr_alg2); |
775 | 775 | ||
776 | int crypto_attr_u32(struct rtattr *rta, u32 *num) | 776 | int crypto_attr_u32(struct rtattr *rta, u32 *num) |
777 | { | 777 | { |
778 | struct crypto_attr_u32 *nu32; | 778 | struct crypto_attr_u32 *nu32; |
779 | 779 | ||
780 | if (!rta) | 780 | if (!rta) |
781 | return -ENOENT; | 781 | return -ENOENT; |
782 | if (RTA_PAYLOAD(rta) < sizeof(*nu32)) | 782 | if (RTA_PAYLOAD(rta) < sizeof(*nu32)) |
783 | return -EINVAL; | 783 | return -EINVAL; |
784 | if (rta->rta_type != CRYPTOA_U32) | 784 | if (rta->rta_type != CRYPTOA_U32) |
785 | return -EINVAL; | 785 | return -EINVAL; |
786 | 786 | ||
787 | nu32 = RTA_DATA(rta); | 787 | nu32 = RTA_DATA(rta); |
788 | *num = nu32->num; | 788 | *num = nu32->num; |
789 | 789 | ||
790 | return 0; | 790 | return 0; |
791 | } | 791 | } |
792 | EXPORT_SYMBOL_GPL(crypto_attr_u32); | 792 | EXPORT_SYMBOL_GPL(crypto_attr_u32); |
793 | 793 | ||
794 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, | 794 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, |
795 | unsigned int head) | 795 | unsigned int head) |
796 | { | 796 | { |
797 | struct crypto_instance *inst; | 797 | struct crypto_instance *inst; |
798 | char *p; | 798 | char *p; |
799 | int err; | 799 | int err; |
800 | 800 | ||
801 | p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), | 801 | p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), |
802 | GFP_KERNEL); | 802 | GFP_KERNEL); |
803 | if (!p) | 803 | if (!p) |
804 | return ERR_PTR(-ENOMEM); | 804 | return ERR_PTR(-ENOMEM); |
805 | 805 | ||
806 | inst = (void *)(p + head); | 806 | inst = (void *)(p + head); |
807 | 807 | ||
808 | err = -ENAMETOOLONG; | 808 | err = -ENAMETOOLONG; |
809 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, | 809 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, |
810 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) | 810 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) |
811 | goto err_free_inst; | 811 | goto err_free_inst; |
812 | 812 | ||
813 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", | 813 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", |
814 | name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 814 | name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
815 | goto err_free_inst; | 815 | goto err_free_inst; |
816 | 816 | ||
817 | return p; | 817 | return p; |
818 | 818 | ||
819 | err_free_inst: | 819 | err_free_inst: |
820 | kfree(p); | 820 | kfree(p); |
821 | return ERR_PTR(err); | 821 | return ERR_PTR(err); |
822 | } | 822 | } |
823 | EXPORT_SYMBOL_GPL(crypto_alloc_instance2); | 823 | EXPORT_SYMBOL_GPL(crypto_alloc_instance2); |
824 | 824 | ||
825 | struct crypto_instance *crypto_alloc_instance(const char *name, | 825 | struct crypto_instance *crypto_alloc_instance(const char *name, |
826 | struct crypto_alg *alg) | 826 | struct crypto_alg *alg) |
827 | { | 827 | { |
828 | struct crypto_instance *inst; | 828 | struct crypto_instance *inst; |
829 | struct crypto_spawn *spawn; | 829 | struct crypto_spawn *spawn; |
830 | int err; | 830 | int err; |
831 | 831 | ||
832 | inst = crypto_alloc_instance2(name, alg, 0); | 832 | inst = crypto_alloc_instance2(name, alg, 0); |
833 | if (IS_ERR(inst)) | 833 | if (IS_ERR(inst)) |
834 | goto out; | 834 | goto out; |
835 | 835 | ||
836 | spawn = crypto_instance_ctx(inst); | 836 | spawn = crypto_instance_ctx(inst); |
837 | err = crypto_init_spawn(spawn, alg, inst, | 837 | err = crypto_init_spawn(spawn, alg, inst, |
838 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 838 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
839 | 839 | ||
840 | if (err) | 840 | if (err) |
841 | goto err_free_inst; | 841 | goto err_free_inst; |
842 | 842 | ||
843 | return inst; | 843 | return inst; |
844 | 844 | ||
845 | err_free_inst: | 845 | err_free_inst: |
846 | kfree(inst); | 846 | kfree(inst); |
847 | inst = ERR_PTR(err); | 847 | inst = ERR_PTR(err); |
848 | 848 | ||
849 | out: | 849 | out: |
850 | return inst; | 850 | return inst; |
851 | } | 851 | } |
852 | EXPORT_SYMBOL_GPL(crypto_alloc_instance); | 852 | EXPORT_SYMBOL_GPL(crypto_alloc_instance); |
853 | 853 | ||
854 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) | 854 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) |
855 | { | 855 | { |
856 | INIT_LIST_HEAD(&queue->list); | 856 | INIT_LIST_HEAD(&queue->list); |
857 | queue->backlog = &queue->list; | 857 | queue->backlog = &queue->list; |
858 | queue->qlen = 0; | 858 | queue->qlen = 0; |
859 | queue->max_qlen = max_qlen; | 859 | queue->max_qlen = max_qlen; |
860 | } | 860 | } |
861 | EXPORT_SYMBOL_GPL(crypto_init_queue); | 861 | EXPORT_SYMBOL_GPL(crypto_init_queue); |
862 | 862 | ||
863 | int crypto_enqueue_request(struct crypto_queue *queue, | 863 | int crypto_enqueue_request(struct crypto_queue *queue, |
864 | struct crypto_async_request *request) | 864 | struct crypto_async_request *request) |
865 | { | 865 | { |
866 | int err = -EINPROGRESS; | 866 | int err = -EINPROGRESS; |
867 | 867 | ||
868 | if (unlikely(queue->qlen >= queue->max_qlen)) { | 868 | if (unlikely(queue->qlen >= queue->max_qlen)) { |
869 | err = -EBUSY; | 869 | err = -EBUSY; |
870 | if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 870 | if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
871 | goto out; | 871 | goto out; |
872 | if (queue->backlog == &queue->list) | 872 | if (queue->backlog == &queue->list) |
873 | queue->backlog = &request->list; | 873 | queue->backlog = &request->list; |
874 | } | 874 | } |
875 | 875 | ||
876 | queue->qlen++; | 876 | queue->qlen++; |
877 | list_add_tail(&request->list, &queue->list); | 877 | list_add_tail(&request->list, &queue->list); |
878 | 878 | ||
879 | out: | 879 | out: |
880 | return err; | 880 | return err; |
881 | } | 881 | } |
882 | EXPORT_SYMBOL_GPL(crypto_enqueue_request); | 882 | EXPORT_SYMBOL_GPL(crypto_enqueue_request); |
883 | 883 | ||
884 | void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset) | 884 | void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset) |
885 | { | 885 | { |
886 | struct list_head *request; | 886 | struct list_head *request; |
887 | 887 | ||
888 | if (unlikely(!queue->qlen)) | 888 | if (unlikely(!queue->qlen)) |
889 | return NULL; | 889 | return NULL; |
890 | 890 | ||
891 | queue->qlen--; | 891 | queue->qlen--; |
892 | 892 | ||
893 | if (queue->backlog != &queue->list) | 893 | if (queue->backlog != &queue->list) |
894 | queue->backlog = queue->backlog->next; | 894 | queue->backlog = queue->backlog->next; |
895 | 895 | ||
896 | request = queue->list.next; | 896 | request = queue->list.next; |
897 | list_del(request); | 897 | list_del(request); |
898 | 898 | ||
899 | return (char *)list_entry(request, struct crypto_async_request, list) - | 899 | return (char *)list_entry(request, struct crypto_async_request, list) - |
900 | offset; | 900 | offset; |
901 | } | 901 | } |
902 | EXPORT_SYMBOL_GPL(__crypto_dequeue_request); | 902 | EXPORT_SYMBOL_GPL(__crypto_dequeue_request); |
903 | 903 | ||
904 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | 904 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) |
905 | { | 905 | { |
906 | return __crypto_dequeue_request(queue, 0); | 906 | return __crypto_dequeue_request(queue, 0); |
907 | } | 907 | } |
908 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); | 908 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); |
909 | 909 | ||
910 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) | 910 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) |
911 | { | 911 | { |
912 | struct crypto_async_request *req; | 912 | struct crypto_async_request *req; |
913 | 913 | ||
914 | list_for_each_entry(req, &queue->list, list) { | 914 | list_for_each_entry(req, &queue->list, list) { |
915 | if (req->tfm == tfm) | 915 | if (req->tfm == tfm) |
916 | return 1; | 916 | return 1; |
917 | } | 917 | } |
918 | 918 | ||
919 | return 0; | 919 | return 0; |
920 | } | 920 | } |
921 | EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); | 921 | EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); |
922 | 922 | ||
923 | static inline void crypto_inc_byte(u8 *a, unsigned int size) | 923 | static inline void crypto_inc_byte(u8 *a, unsigned int size) |
924 | { | 924 | { |
925 | u8 *b = (a + size); | 925 | u8 *b = (a + size); |
926 | u8 c; | 926 | u8 c; |
927 | 927 | ||
928 | for (; size; size--) { | 928 | for (; size; size--) { |
929 | c = *--b + 1; | 929 | c = *--b + 1; |
930 | *b = c; | 930 | *b = c; |
931 | if (c) | 931 | if (c) |
932 | break; | 932 | break; |
933 | } | 933 | } |
934 | } | 934 | } |
935 | 935 | ||
936 | void crypto_inc(u8 *a, unsigned int size) | 936 | void crypto_inc(u8 *a, unsigned int size) |
937 | { | 937 | { |
938 | __be32 *b = (__be32 *)(a + size); | 938 | __be32 *b = (__be32 *)(a + size); |
939 | u32 c; | 939 | u32 c; |
940 | 940 | ||
941 | for (; size >= 4; size -= 4) { | 941 | for (; size >= 4; size -= 4) { |
942 | c = be32_to_cpu(*--b) + 1; | 942 | c = be32_to_cpu(*--b) + 1; |
943 | *b = cpu_to_be32(c); | 943 | *b = cpu_to_be32(c); |
944 | if (c) | 944 | if (c) |
945 | return; | 945 | return; |
946 | } | 946 | } |
947 | 947 | ||
948 | crypto_inc_byte(a, size); | 948 | crypto_inc_byte(a, size); |
949 | } | 949 | } |
950 | EXPORT_SYMBOL_GPL(crypto_inc); | 950 | EXPORT_SYMBOL_GPL(crypto_inc); |
951 | 951 | ||
952 | static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size) | 952 | static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size) |
953 | { | 953 | { |
954 | for (; size; size--) | 954 | for (; size; size--) |
955 | *a++ ^= *b++; | 955 | *a++ ^= *b++; |
956 | } | 956 | } |
957 | 957 | ||
958 | void crypto_xor(u8 *dst, const u8 *src, unsigned int size) | 958 | void crypto_xor(u8 *dst, const u8 *src, unsigned int size) |
959 | { | 959 | { |
960 | u32 *a = (u32 *)dst; | 960 | u32 *a = (u32 *)dst; |
961 | u32 *b = (u32 *)src; | 961 | u32 *b = (u32 *)src; |
962 | 962 | ||
963 | for (; size >= 4; size -= 4) | 963 | for (; size >= 4; size -= 4) |
964 | *a++ ^= *b++; | 964 | *a++ ^= *b++; |
965 | 965 | ||
966 | crypto_xor_byte((u8 *)a, (u8 *)b, size); | 966 | crypto_xor_byte((u8 *)a, (u8 *)b, size); |
967 | } | 967 | } |
968 | EXPORT_SYMBOL_GPL(crypto_xor); | 968 | EXPORT_SYMBOL_GPL(crypto_xor); |
969 | 969 | ||
970 | static int __init crypto_algapi_init(void) | 970 | static int __init crypto_algapi_init(void) |
971 | { | 971 | { |
972 | crypto_init_proc(); | 972 | crypto_init_proc(); |
973 | return 0; | 973 | return 0; |
974 | } | 974 | } |
975 | 975 | ||
976 | static void __exit crypto_algapi_exit(void) | 976 | static void __exit crypto_algapi_exit(void) |
977 | { | 977 | { |
978 | crypto_exit_proc(); | 978 | crypto_exit_proc(); |
979 | } | 979 | } |
980 | 980 | ||
981 | module_init(crypto_algapi_init); | 981 | module_init(crypto_algapi_init); |
982 | module_exit(crypto_algapi_exit); | 982 | module_exit(crypto_algapi_exit); |
983 | 983 | ||
984 | MODULE_LICENSE("GPL"); | 984 | MODULE_LICENSE("GPL"); |
985 | MODULE_DESCRIPTION("Cryptographic algorithms API"); | 985 | MODULE_DESCRIPTION("Cryptographic algorithms API"); |
986 | 986 |
crypto/authenc.c
1 | /* | 1 | /* |
2 | * Authenc: Simple AEAD wrapper for IPsec | 2 | * Authenc: Simple AEAD wrapper for IPsec |
3 | * | 3 | * |
4 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/aead.h> | 13 | #include <crypto/aead.h> |
14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
15 | #include <crypto/internal/skcipher.h> | 15 | #include <crypto/internal/skcipher.h> |
16 | #include <crypto/authenc.h> | 16 | #include <crypto/authenc.h> |
17 | #include <crypto/scatterwalk.h> | 17 | #include <crypto/scatterwalk.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/rtnetlink.h> | 22 | #include <linux/rtnetlink.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | 25 | ||
26 | typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags); | 26 | typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags); |
27 | 27 | ||
28 | struct authenc_instance_ctx { | 28 | struct authenc_instance_ctx { |
29 | struct crypto_ahash_spawn auth; | 29 | struct crypto_ahash_spawn auth; |
30 | struct crypto_skcipher_spawn enc; | 30 | struct crypto_skcipher_spawn enc; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct crypto_authenc_ctx { | 33 | struct crypto_authenc_ctx { |
34 | unsigned int reqoff; | 34 | unsigned int reqoff; |
35 | struct crypto_ahash *auth; | 35 | struct crypto_ahash *auth; |
36 | struct crypto_ablkcipher *enc; | 36 | struct crypto_ablkcipher *enc; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct authenc_request_ctx { | 39 | struct authenc_request_ctx { |
40 | unsigned int cryptlen; | 40 | unsigned int cryptlen; |
41 | struct scatterlist *sg; | 41 | struct scatterlist *sg; |
42 | struct scatterlist asg[2]; | 42 | struct scatterlist asg[2]; |
43 | struct scatterlist cipher[2]; | 43 | struct scatterlist cipher[2]; |
44 | crypto_completion_t complete; | 44 | crypto_completion_t complete; |
45 | crypto_completion_t update_complete; | 45 | crypto_completion_t update_complete; |
46 | char tail[]; | 46 | char tail[]; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static void authenc_request_complete(struct aead_request *req, int err) | 49 | static void authenc_request_complete(struct aead_request *req, int err) |
50 | { | 50 | { |
51 | if (err != -EINPROGRESS) | 51 | if (err != -EINPROGRESS) |
52 | aead_request_complete(req, err); | 52 | aead_request_complete(req, err); |
53 | } | 53 | } |
54 | 54 | ||
55 | int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, | 55 | int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, |
56 | unsigned int keylen) | 56 | unsigned int keylen) |
57 | { | 57 | { |
58 | struct rtattr *rta = (struct rtattr *)key; | 58 | struct rtattr *rta = (struct rtattr *)key; |
59 | struct crypto_authenc_key_param *param; | 59 | struct crypto_authenc_key_param *param; |
60 | 60 | ||
61 | if (!RTA_OK(rta, keylen)) | 61 | if (!RTA_OK(rta, keylen)) |
62 | return -EINVAL; | 62 | return -EINVAL; |
63 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 63 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) |
64 | return -EINVAL; | 64 | return -EINVAL; |
65 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 65 | if (RTA_PAYLOAD(rta) < sizeof(*param)) |
66 | return -EINVAL; | 66 | return -EINVAL; |
67 | 67 | ||
68 | param = RTA_DATA(rta); | 68 | param = RTA_DATA(rta); |
69 | keys->enckeylen = be32_to_cpu(param->enckeylen); | 69 | keys->enckeylen = be32_to_cpu(param->enckeylen); |
70 | 70 | ||
71 | key += RTA_ALIGN(rta->rta_len); | 71 | key += RTA_ALIGN(rta->rta_len); |
72 | keylen -= RTA_ALIGN(rta->rta_len); | 72 | keylen -= RTA_ALIGN(rta->rta_len); |
73 | 73 | ||
74 | if (keylen < keys->enckeylen) | 74 | if (keylen < keys->enckeylen) |
75 | return -EINVAL; | 75 | return -EINVAL; |
76 | 76 | ||
77 | keys->authkeylen = keylen - keys->enckeylen; | 77 | keys->authkeylen = keylen - keys->enckeylen; |
78 | keys->authkey = key; | 78 | keys->authkey = key; |
79 | keys->enckey = key + keys->authkeylen; | 79 | keys->enckey = key + keys->authkeylen; |
80 | 80 | ||
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
83 | EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys); | 83 | EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys); |
84 | 84 | ||
85 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | 85 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, |
86 | unsigned int keylen) | 86 | unsigned int keylen) |
87 | { | 87 | { |
88 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 88 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
89 | struct crypto_ahash *auth = ctx->auth; | 89 | struct crypto_ahash *auth = ctx->auth; |
90 | struct crypto_ablkcipher *enc = ctx->enc; | 90 | struct crypto_ablkcipher *enc = ctx->enc; |
91 | struct crypto_authenc_keys keys; | 91 | struct crypto_authenc_keys keys; |
92 | int err = -EINVAL; | 92 | int err = -EINVAL; |
93 | 93 | ||
94 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 94 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
95 | goto badkey; | 95 | goto badkey; |
96 | 96 | ||
97 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); | 97 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); |
98 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & | 98 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & |
99 | CRYPTO_TFM_REQ_MASK); | 99 | CRYPTO_TFM_REQ_MASK); |
100 | err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); | 100 | err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); |
101 | crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & | 101 | crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & |
102 | CRYPTO_TFM_RES_MASK); | 102 | CRYPTO_TFM_RES_MASK); |
103 | 103 | ||
104 | if (err) | 104 | if (err) |
105 | goto out; | 105 | goto out; |
106 | 106 | ||
107 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); | 107 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); |
108 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & | 108 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & |
109 | CRYPTO_TFM_REQ_MASK); | 109 | CRYPTO_TFM_REQ_MASK); |
110 | err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); | 110 | err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); |
111 | crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & | 111 | crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & |
112 | CRYPTO_TFM_RES_MASK); | 112 | CRYPTO_TFM_RES_MASK); |
113 | 113 | ||
114 | out: | 114 | out: |
115 | return err; | 115 | return err; |
116 | 116 | ||
117 | badkey: | 117 | badkey: |
118 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | 118 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); |
119 | goto out; | 119 | goto out; |
120 | } | 120 | } |
121 | 121 | ||
122 | static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, | 122 | static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, |
123 | int err) | 123 | int err) |
124 | { | 124 | { |
125 | struct aead_request *req = areq->data; | 125 | struct aead_request *req = areq->data; |
126 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 126 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
127 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 127 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
128 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 128 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
129 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 129 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
130 | 130 | ||
131 | if (err) | 131 | if (err) |
132 | goto out; | 132 | goto out; |
133 | 133 | ||
134 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | 134 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, |
135 | areq_ctx->cryptlen); | 135 | areq_ctx->cryptlen); |
136 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 136 | ahash_request_set_callback(ahreq, aead_request_flags(req) & |
137 | CRYPTO_TFM_REQ_MAY_SLEEP, | 137 | CRYPTO_TFM_REQ_MAY_SLEEP, |
138 | areq_ctx->complete, req); | 138 | areq_ctx->complete, req); |
139 | 139 | ||
140 | err = crypto_ahash_finup(ahreq); | 140 | err = crypto_ahash_finup(ahreq); |
141 | if (err) | 141 | if (err) |
142 | goto out; | 142 | goto out; |
143 | 143 | ||
144 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | 144 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, |
145 | areq_ctx->cryptlen, | 145 | areq_ctx->cryptlen, |
146 | crypto_aead_authsize(authenc), 1); | 146 | crypto_aead_authsize(authenc), 1); |
147 | 147 | ||
148 | out: | 148 | out: |
149 | authenc_request_complete(req, err); | 149 | authenc_request_complete(req, err); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) | 152 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) |
153 | { | 153 | { |
154 | struct aead_request *req = areq->data; | 154 | struct aead_request *req = areq->data; |
155 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 155 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
156 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 156 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
157 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 157 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
158 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 158 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
159 | 159 | ||
160 | if (err) | 160 | if (err) |
161 | goto out; | 161 | goto out; |
162 | 162 | ||
163 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | 163 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, |
164 | areq_ctx->cryptlen, | 164 | areq_ctx->cryptlen, |
165 | crypto_aead_authsize(authenc), 1); | 165 | crypto_aead_authsize(authenc), 1); |
166 | 166 | ||
167 | out: | 167 | out: |
168 | aead_request_complete(req, err); | 168 | aead_request_complete(req, err); |
169 | } | 169 | } |
170 | 170 | ||
171 | static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | 171 | static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, |
172 | int err) | 172 | int err) |
173 | { | 173 | { |
174 | u8 *ihash; | 174 | u8 *ihash; |
175 | unsigned int authsize; | 175 | unsigned int authsize; |
176 | struct ablkcipher_request *abreq; | 176 | struct ablkcipher_request *abreq; |
177 | struct aead_request *req = areq->data; | 177 | struct aead_request *req = areq->data; |
178 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 178 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
179 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 179 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
180 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 180 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
181 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 181 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
182 | unsigned int cryptlen = req->cryptlen; | 182 | unsigned int cryptlen = req->cryptlen; |
183 | 183 | ||
184 | if (err) | 184 | if (err) |
185 | goto out; | 185 | goto out; |
186 | 186 | ||
187 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | 187 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, |
188 | areq_ctx->cryptlen); | 188 | areq_ctx->cryptlen); |
189 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 189 | ahash_request_set_callback(ahreq, aead_request_flags(req) & |
190 | CRYPTO_TFM_REQ_MAY_SLEEP, | 190 | CRYPTO_TFM_REQ_MAY_SLEEP, |
191 | areq_ctx->complete, req); | 191 | areq_ctx->complete, req); |
192 | 192 | ||
193 | err = crypto_ahash_finup(ahreq); | 193 | err = crypto_ahash_finup(ahreq); |
194 | if (err) | 194 | if (err) |
195 | goto out; | 195 | goto out; |
196 | 196 | ||
197 | authsize = crypto_aead_authsize(authenc); | 197 | authsize = crypto_aead_authsize(authenc); |
198 | cryptlen -= authsize; | 198 | cryptlen -= authsize; |
199 | ihash = ahreq->result + authsize; | 199 | ihash = ahreq->result + authsize; |
200 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 200 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
201 | authsize, 0); | 201 | authsize, 0); |
202 | 202 | ||
203 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 203 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
204 | if (err) | 204 | if (err) |
205 | goto out; | 205 | goto out; |
206 | 206 | ||
207 | abreq = aead_request_ctx(req); | 207 | abreq = aead_request_ctx(req); |
208 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 208 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
209 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 209 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
210 | req->base.complete, req->base.data); | 210 | req->base.complete, req->base.data); |
211 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | 211 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, |
212 | cryptlen, req->iv); | 212 | cryptlen, req->iv); |
213 | 213 | ||
214 | err = crypto_ablkcipher_decrypt(abreq); | 214 | err = crypto_ablkcipher_decrypt(abreq); |
215 | 215 | ||
216 | out: | 216 | out: |
217 | authenc_request_complete(req, err); | 217 | authenc_request_complete(req, err); |
218 | } | 218 | } |
219 | 219 | ||
220 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, | 220 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, |
221 | int err) | 221 | int err) |
222 | { | 222 | { |
223 | u8 *ihash; | 223 | u8 *ihash; |
224 | unsigned int authsize; | 224 | unsigned int authsize; |
225 | struct ablkcipher_request *abreq; | 225 | struct ablkcipher_request *abreq; |
226 | struct aead_request *req = areq->data; | 226 | struct aead_request *req = areq->data; |
227 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 227 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
228 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 228 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
229 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 229 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
230 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 230 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
231 | unsigned int cryptlen = req->cryptlen; | 231 | unsigned int cryptlen = req->cryptlen; |
232 | 232 | ||
233 | if (err) | 233 | if (err) |
234 | goto out; | 234 | goto out; |
235 | 235 | ||
236 | authsize = crypto_aead_authsize(authenc); | 236 | authsize = crypto_aead_authsize(authenc); |
237 | cryptlen -= authsize; | 237 | cryptlen -= authsize; |
238 | ihash = ahreq->result + authsize; | 238 | ihash = ahreq->result + authsize; |
239 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 239 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
240 | authsize, 0); | 240 | authsize, 0); |
241 | 241 | ||
242 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 242 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
243 | if (err) | 243 | if (err) |
244 | goto out; | 244 | goto out; |
245 | 245 | ||
246 | abreq = aead_request_ctx(req); | 246 | abreq = aead_request_ctx(req); |
247 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 247 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
248 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 248 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
249 | req->base.complete, req->base.data); | 249 | req->base.complete, req->base.data); |
250 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | 250 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, |
251 | cryptlen, req->iv); | 251 | cryptlen, req->iv); |
252 | 252 | ||
253 | err = crypto_ablkcipher_decrypt(abreq); | 253 | err = crypto_ablkcipher_decrypt(abreq); |
254 | 254 | ||
255 | out: | 255 | out: |
256 | authenc_request_complete(req, err); | 256 | authenc_request_complete(req, err); |
257 | } | 257 | } |
258 | 258 | ||
259 | static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) | 259 | static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) |
260 | { | 260 | { |
261 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 261 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
262 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 262 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
263 | struct crypto_ahash *auth = ctx->auth; | 263 | struct crypto_ahash *auth = ctx->auth; |
264 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 264 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
265 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 265 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
266 | u8 *hash = areq_ctx->tail; | 266 | u8 *hash = areq_ctx->tail; |
267 | int err; | 267 | int err; |
268 | 268 | ||
269 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | 269 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), |
270 | crypto_ahash_alignmask(auth) + 1); | 270 | crypto_ahash_alignmask(auth) + 1); |
271 | 271 | ||
272 | ahash_request_set_tfm(ahreq, auth); | 272 | ahash_request_set_tfm(ahreq, auth); |
273 | 273 | ||
274 | err = crypto_ahash_init(ahreq); | 274 | err = crypto_ahash_init(ahreq); |
275 | if (err) | 275 | if (err) |
276 | return ERR_PTR(err); | 276 | return ERR_PTR(err); |
277 | 277 | ||
278 | ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); | 278 | ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); |
279 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | 279 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, |
280 | areq_ctx->update_complete, req); | 280 | areq_ctx->update_complete, req); |
281 | 281 | ||
282 | err = crypto_ahash_update(ahreq); | 282 | err = crypto_ahash_update(ahreq); |
283 | if (err) | 283 | if (err) |
284 | return ERR_PTR(err); | 284 | return ERR_PTR(err); |
285 | 285 | ||
286 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | 286 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, |
287 | areq_ctx->cryptlen); | 287 | areq_ctx->cryptlen); |
288 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | 288 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, |
289 | areq_ctx->complete, req); | 289 | areq_ctx->complete, req); |
290 | 290 | ||
291 | err = crypto_ahash_finup(ahreq); | 291 | err = crypto_ahash_finup(ahreq); |
292 | if (err) | 292 | if (err) |
293 | return ERR_PTR(err); | 293 | return ERR_PTR(err); |
294 | 294 | ||
295 | return hash; | 295 | return hash; |
296 | } | 296 | } |
297 | 297 | ||
298 | static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) | 298 | static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) |
299 | { | 299 | { |
300 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 300 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
301 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 301 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
302 | struct crypto_ahash *auth = ctx->auth; | 302 | struct crypto_ahash *auth = ctx->auth; |
303 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 303 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
304 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 304 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
305 | u8 *hash = areq_ctx->tail; | 305 | u8 *hash = areq_ctx->tail; |
306 | int err; | 306 | int err; |
307 | 307 | ||
308 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | 308 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), |
309 | crypto_ahash_alignmask(auth) + 1); | 309 | crypto_ahash_alignmask(auth) + 1); |
310 | 310 | ||
311 | ahash_request_set_tfm(ahreq, auth); | 311 | ahash_request_set_tfm(ahreq, auth); |
312 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | 312 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, |
313 | areq_ctx->cryptlen); | 313 | areq_ctx->cryptlen); |
314 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | 314 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, |
315 | areq_ctx->complete, req); | 315 | areq_ctx->complete, req); |
316 | 316 | ||
317 | err = crypto_ahash_digest(ahreq); | 317 | err = crypto_ahash_digest(ahreq); |
318 | if (err) | 318 | if (err) |
319 | return ERR_PTR(err); | 319 | return ERR_PTR(err); |
320 | 320 | ||
321 | return hash; | 321 | return hash; |
322 | } | 322 | } |
323 | 323 | ||
324 | static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | 324 | static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, |
325 | unsigned int flags) | 325 | unsigned int flags) |
326 | { | 326 | { |
327 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 327 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
328 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 328 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
329 | struct scatterlist *dst = req->dst; | 329 | struct scatterlist *dst = req->dst; |
330 | struct scatterlist *assoc = req->assoc; | 330 | struct scatterlist *assoc = req->assoc; |
331 | struct scatterlist *cipher = areq_ctx->cipher; | 331 | struct scatterlist *cipher = areq_ctx->cipher; |
332 | struct scatterlist *asg = areq_ctx->asg; | 332 | struct scatterlist *asg = areq_ctx->asg; |
333 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 333 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
334 | unsigned int cryptlen = req->cryptlen; | 334 | unsigned int cryptlen = req->cryptlen; |
335 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | 335 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; |
336 | struct page *dstp; | 336 | struct page *dstp; |
337 | u8 *vdst; | 337 | u8 *vdst; |
338 | u8 *hash; | 338 | u8 *hash; |
339 | 339 | ||
340 | dstp = sg_page(dst); | 340 | dstp = sg_page(dst); |
341 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; | 341 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; |
342 | 342 | ||
343 | if (ivsize) { | 343 | if (ivsize) { |
344 | sg_init_table(cipher, 2); | 344 | sg_init_table(cipher, 2); |
345 | sg_set_buf(cipher, iv, ivsize); | 345 | sg_set_buf(cipher, iv, ivsize); |
346 | scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); | 346 | scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); |
347 | dst = cipher; | 347 | dst = cipher; |
348 | cryptlen += ivsize; | 348 | cryptlen += ivsize; |
349 | } | 349 | } |
350 | 350 | ||
351 | if (req->assoclen && sg_is_last(assoc)) { | 351 | if (req->assoclen && sg_is_last(assoc)) { |
352 | authenc_ahash_fn = crypto_authenc_ahash; | 352 | authenc_ahash_fn = crypto_authenc_ahash; |
353 | sg_init_table(asg, 2); | 353 | sg_init_table(asg, 2); |
354 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | 354 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); |
355 | scatterwalk_crypto_chain(asg, dst, 0, 2); | 355 | scatterwalk_crypto_chain(asg, dst, 0, 2); |
356 | dst = asg; | 356 | dst = asg; |
357 | cryptlen += req->assoclen; | 357 | cryptlen += req->assoclen; |
358 | } | 358 | } |
359 | 359 | ||
360 | areq_ctx->cryptlen = cryptlen; | 360 | areq_ctx->cryptlen = cryptlen; |
361 | areq_ctx->sg = dst; | 361 | areq_ctx->sg = dst; |
362 | 362 | ||
363 | areq_ctx->complete = authenc_geniv_ahash_done; | 363 | areq_ctx->complete = authenc_geniv_ahash_done; |
364 | areq_ctx->update_complete = authenc_geniv_ahash_update_done; | 364 | areq_ctx->update_complete = authenc_geniv_ahash_update_done; |
365 | 365 | ||
366 | hash = authenc_ahash_fn(req, flags); | 366 | hash = authenc_ahash_fn(req, flags); |
367 | if (IS_ERR(hash)) | 367 | if (IS_ERR(hash)) |
368 | return PTR_ERR(hash); | 368 | return PTR_ERR(hash); |
369 | 369 | ||
370 | scatterwalk_map_and_copy(hash, dst, cryptlen, | 370 | scatterwalk_map_and_copy(hash, dst, cryptlen, |
371 | crypto_aead_authsize(authenc), 1); | 371 | crypto_aead_authsize(authenc), 1); |
372 | return 0; | 372 | return 0; |
373 | } | 373 | } |
374 | 374 | ||
375 | static void crypto_authenc_encrypt_done(struct crypto_async_request *req, | 375 | static void crypto_authenc_encrypt_done(struct crypto_async_request *req, |
376 | int err) | 376 | int err) |
377 | { | 377 | { |
378 | struct aead_request *areq = req->data; | 378 | struct aead_request *areq = req->data; |
379 | 379 | ||
380 | if (!err) { | 380 | if (!err) { |
381 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 381 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
382 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 382 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
383 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq); | 383 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq); |
384 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail | 384 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail |
385 | + ctx->reqoff); | 385 | + ctx->reqoff); |
386 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc); | 386 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc); |
387 | 387 | ||
388 | err = crypto_authenc_genicv(areq, iv, 0); | 388 | err = crypto_authenc_genicv(areq, iv, 0); |
389 | } | 389 | } |
390 | 390 | ||
391 | authenc_request_complete(areq, err); | 391 | authenc_request_complete(areq, err); |
392 | } | 392 | } |
393 | 393 | ||
394 | static int crypto_authenc_encrypt(struct aead_request *req) | 394 | static int crypto_authenc_encrypt(struct aead_request *req) |
395 | { | 395 | { |
396 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 396 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
397 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 397 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
398 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 398 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
399 | struct crypto_ablkcipher *enc = ctx->enc; | 399 | struct crypto_ablkcipher *enc = ctx->enc; |
400 | struct scatterlist *dst = req->dst; | 400 | struct scatterlist *dst = req->dst; |
401 | unsigned int cryptlen = req->cryptlen; | 401 | unsigned int cryptlen = req->cryptlen; |
402 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail | 402 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail |
403 | + ctx->reqoff); | 403 | + ctx->reqoff); |
404 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); | 404 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); |
405 | int err; | 405 | int err; |
406 | 406 | ||
407 | ablkcipher_request_set_tfm(abreq, enc); | 407 | ablkcipher_request_set_tfm(abreq, enc); |
408 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 408 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
409 | crypto_authenc_encrypt_done, req); | 409 | crypto_authenc_encrypt_done, req); |
410 | ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); | 410 | ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); |
411 | 411 | ||
412 | memcpy(iv, req->iv, crypto_aead_ivsize(authenc)); | 412 | memcpy(iv, req->iv, crypto_aead_ivsize(authenc)); |
413 | 413 | ||
414 | err = crypto_ablkcipher_encrypt(abreq); | 414 | err = crypto_ablkcipher_encrypt(abreq); |
415 | if (err) | 415 | if (err) |
416 | return err; | 416 | return err; |
417 | 417 | ||
418 | return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | 418 | return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); |
419 | } | 419 | } |
420 | 420 | ||
421 | static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, | 421 | static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, |
422 | int err) | 422 | int err) |
423 | { | 423 | { |
424 | struct aead_request *areq = req->data; | 424 | struct aead_request *areq = req->data; |
425 | 425 | ||
426 | if (!err) { | 426 | if (!err) { |
427 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | 427 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); |
428 | 428 | ||
429 | err = crypto_authenc_genicv(areq, greq->giv, 0); | 429 | err = crypto_authenc_genicv(areq, greq->giv, 0); |
430 | } | 430 | } |
431 | 431 | ||
432 | authenc_request_complete(areq, err); | 432 | authenc_request_complete(areq, err); |
433 | } | 433 | } |
434 | 434 | ||
435 | static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) | 435 | static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) |
436 | { | 436 | { |
437 | struct crypto_aead *authenc = aead_givcrypt_reqtfm(req); | 437 | struct crypto_aead *authenc = aead_givcrypt_reqtfm(req); |
438 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 438 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
439 | struct aead_request *areq = &req->areq; | 439 | struct aead_request *areq = &req->areq; |
440 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | 440 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); |
441 | u8 *iv = req->giv; | 441 | u8 *iv = req->giv; |
442 | int err; | 442 | int err; |
443 | 443 | ||
444 | skcipher_givcrypt_set_tfm(greq, ctx->enc); | 444 | skcipher_givcrypt_set_tfm(greq, ctx->enc); |
445 | skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), | 445 | skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), |
446 | crypto_authenc_givencrypt_done, areq); | 446 | crypto_authenc_givencrypt_done, areq); |
447 | skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, | 447 | skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, |
448 | areq->iv); | 448 | areq->iv); |
449 | skcipher_givcrypt_set_giv(greq, iv, req->seq); | 449 | skcipher_givcrypt_set_giv(greq, iv, req->seq); |
450 | 450 | ||
451 | err = crypto_skcipher_givencrypt(greq); | 451 | err = crypto_skcipher_givencrypt(greq); |
452 | if (err) | 452 | if (err) |
453 | return err; | 453 | return err; |
454 | 454 | ||
455 | return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | 455 | return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); |
456 | } | 456 | } |
457 | 457 | ||
458 | static int crypto_authenc_verify(struct aead_request *req, | 458 | static int crypto_authenc_verify(struct aead_request *req, |
459 | authenc_ahash_t authenc_ahash_fn) | 459 | authenc_ahash_t authenc_ahash_fn) |
460 | { | 460 | { |
461 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 461 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
462 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 462 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
463 | u8 *ohash; | 463 | u8 *ohash; |
464 | u8 *ihash; | 464 | u8 *ihash; |
465 | unsigned int authsize; | 465 | unsigned int authsize; |
466 | 466 | ||
467 | areq_ctx->complete = authenc_verify_ahash_done; | 467 | areq_ctx->complete = authenc_verify_ahash_done; |
468 | areq_ctx->update_complete = authenc_verify_ahash_update_done; | 468 | areq_ctx->update_complete = authenc_verify_ahash_update_done; |
469 | 469 | ||
470 | ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); | 470 | ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); |
471 | if (IS_ERR(ohash)) | 471 | if (IS_ERR(ohash)) |
472 | return PTR_ERR(ohash); | 472 | return PTR_ERR(ohash); |
473 | 473 | ||
474 | authsize = crypto_aead_authsize(authenc); | 474 | authsize = crypto_aead_authsize(authenc); |
475 | ihash = ohash + authsize; | 475 | ihash = ohash + authsize; |
476 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 476 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
477 | authsize, 0); | 477 | authsize, 0); |
478 | return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; | 478 | return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; |
479 | } | 479 | } |
480 | 480 | ||
481 | static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | 481 | static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, |
482 | unsigned int cryptlen) | 482 | unsigned int cryptlen) |
483 | { | 483 | { |
484 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 484 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
485 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 485 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
486 | struct scatterlist *src = req->src; | 486 | struct scatterlist *src = req->src; |
487 | struct scatterlist *assoc = req->assoc; | 487 | struct scatterlist *assoc = req->assoc; |
488 | struct scatterlist *cipher = areq_ctx->cipher; | 488 | struct scatterlist *cipher = areq_ctx->cipher; |
489 | struct scatterlist *asg = areq_ctx->asg; | 489 | struct scatterlist *asg = areq_ctx->asg; |
490 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 490 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
491 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | 491 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; |
492 | struct page *srcp; | 492 | struct page *srcp; |
493 | u8 *vsrc; | 493 | u8 *vsrc; |
494 | 494 | ||
495 | srcp = sg_page(src); | 495 | srcp = sg_page(src); |
496 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; | 496 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; |
497 | 497 | ||
498 | if (ivsize) { | 498 | if (ivsize) { |
499 | sg_init_table(cipher, 2); | 499 | sg_init_table(cipher, 2); |
500 | sg_set_buf(cipher, iv, ivsize); | 500 | sg_set_buf(cipher, iv, ivsize); |
501 | scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); | 501 | scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); |
502 | src = cipher; | 502 | src = cipher; |
503 | cryptlen += ivsize; | 503 | cryptlen += ivsize; |
504 | } | 504 | } |
505 | 505 | ||
506 | if (req->assoclen && sg_is_last(assoc)) { | 506 | if (req->assoclen && sg_is_last(assoc)) { |
507 | authenc_ahash_fn = crypto_authenc_ahash; | 507 | authenc_ahash_fn = crypto_authenc_ahash; |
508 | sg_init_table(asg, 2); | 508 | sg_init_table(asg, 2); |
509 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | 509 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); |
510 | scatterwalk_crypto_chain(asg, src, 0, 2); | 510 | scatterwalk_crypto_chain(asg, src, 0, 2); |
511 | src = asg; | 511 | src = asg; |
512 | cryptlen += req->assoclen; | 512 | cryptlen += req->assoclen; |
513 | } | 513 | } |
514 | 514 | ||
515 | areq_ctx->cryptlen = cryptlen; | 515 | areq_ctx->cryptlen = cryptlen; |
516 | areq_ctx->sg = src; | 516 | areq_ctx->sg = src; |
517 | 517 | ||
518 | return crypto_authenc_verify(req, authenc_ahash_fn); | 518 | return crypto_authenc_verify(req, authenc_ahash_fn); |
519 | } | 519 | } |
520 | 520 | ||
521 | static int crypto_authenc_decrypt(struct aead_request *req) | 521 | static int crypto_authenc_decrypt(struct aead_request *req) |
522 | { | 522 | { |
523 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 523 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
524 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 524 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
525 | struct ablkcipher_request *abreq = aead_request_ctx(req); | 525 | struct ablkcipher_request *abreq = aead_request_ctx(req); |
526 | unsigned int cryptlen = req->cryptlen; | 526 | unsigned int cryptlen = req->cryptlen; |
527 | unsigned int authsize = crypto_aead_authsize(authenc); | 527 | unsigned int authsize = crypto_aead_authsize(authenc); |
528 | u8 *iv = req->iv; | 528 | u8 *iv = req->iv; |
529 | int err; | 529 | int err; |
530 | 530 | ||
531 | if (cryptlen < authsize) | 531 | if (cryptlen < authsize) |
532 | return -EINVAL; | 532 | return -EINVAL; |
533 | cryptlen -= authsize; | 533 | cryptlen -= authsize; |
534 | 534 | ||
535 | err = crypto_authenc_iverify(req, iv, cryptlen); | 535 | err = crypto_authenc_iverify(req, iv, cryptlen); |
536 | if (err) | 536 | if (err) |
537 | return err; | 537 | return err; |
538 | 538 | ||
539 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 539 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
540 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 540 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
541 | req->base.complete, req->base.data); | 541 | req->base.complete, req->base.data); |
542 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); | 542 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); |
543 | 543 | ||
544 | return crypto_ablkcipher_decrypt(abreq); | 544 | return crypto_ablkcipher_decrypt(abreq); |
545 | } | 545 | } |
546 | 546 | ||
547 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) | 547 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) |
548 | { | 548 | { |
549 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 549 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
550 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); | 550 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); |
551 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 551 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); |
552 | struct crypto_ahash *auth; | 552 | struct crypto_ahash *auth; |
553 | struct crypto_ablkcipher *enc; | 553 | struct crypto_ablkcipher *enc; |
554 | int err; | 554 | int err; |
555 | 555 | ||
556 | auth = crypto_spawn_ahash(&ictx->auth); | 556 | auth = crypto_spawn_ahash(&ictx->auth); |
557 | if (IS_ERR(auth)) | 557 | if (IS_ERR(auth)) |
558 | return PTR_ERR(auth); | 558 | return PTR_ERR(auth); |
559 | 559 | ||
560 | enc = crypto_spawn_skcipher(&ictx->enc); | 560 | enc = crypto_spawn_skcipher(&ictx->enc); |
561 | err = PTR_ERR(enc); | 561 | err = PTR_ERR(enc); |
562 | if (IS_ERR(enc)) | 562 | if (IS_ERR(enc)) |
563 | goto err_free_ahash; | 563 | goto err_free_ahash; |
564 | 564 | ||
565 | ctx->auth = auth; | 565 | ctx->auth = auth; |
566 | ctx->enc = enc; | 566 | ctx->enc = enc; |
567 | 567 | ||
568 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + | 568 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + |
569 | crypto_ahash_alignmask(auth), | 569 | crypto_ahash_alignmask(auth), |
570 | crypto_ahash_alignmask(auth) + 1) + | 570 | crypto_ahash_alignmask(auth) + 1) + |
571 | crypto_ablkcipher_ivsize(enc); | 571 | crypto_ablkcipher_ivsize(enc); |
572 | 572 | ||
573 | tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) + | 573 | tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) + |
574 | ctx->reqoff + | 574 | ctx->reqoff + |
575 | max_t(unsigned int, | 575 | max_t(unsigned int, |
576 | crypto_ahash_reqsize(auth) + | 576 | crypto_ahash_reqsize(auth) + |
577 | sizeof(struct ahash_request), | 577 | sizeof(struct ahash_request), |
578 | sizeof(struct skcipher_givcrypt_request) + | 578 | sizeof(struct skcipher_givcrypt_request) + |
579 | crypto_ablkcipher_reqsize(enc)); | 579 | crypto_ablkcipher_reqsize(enc)); |
580 | 580 | ||
581 | return 0; | 581 | return 0; |
582 | 582 | ||
583 | err_free_ahash: | 583 | err_free_ahash: |
584 | crypto_free_ahash(auth); | 584 | crypto_free_ahash(auth); |
585 | return err; | 585 | return err; |
586 | } | 586 | } |
587 | 587 | ||
588 | static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) | 588 | static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) |
589 | { | 589 | { |
590 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 590 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); |
591 | 591 | ||
592 | crypto_free_ahash(ctx->auth); | 592 | crypto_free_ahash(ctx->auth); |
593 | crypto_free_ablkcipher(ctx->enc); | 593 | crypto_free_ablkcipher(ctx->enc); |
594 | } | 594 | } |
595 | 595 | ||
596 | static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | 596 | static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) |
597 | { | 597 | { |
598 | struct crypto_attr_type *algt; | 598 | struct crypto_attr_type *algt; |
599 | struct crypto_instance *inst; | 599 | struct crypto_instance *inst; |
600 | struct hash_alg_common *auth; | 600 | struct hash_alg_common *auth; |
601 | struct crypto_alg *auth_base; | 601 | struct crypto_alg *auth_base; |
602 | struct crypto_alg *enc; | 602 | struct crypto_alg *enc; |
603 | struct authenc_instance_ctx *ctx; | 603 | struct authenc_instance_ctx *ctx; |
604 | const char *enc_name; | 604 | const char *enc_name; |
605 | int err; | 605 | int err; |
606 | 606 | ||
607 | algt = crypto_get_attr_type(tb); | 607 | algt = crypto_get_attr_type(tb); |
608 | if (IS_ERR(algt)) | 608 | if (IS_ERR(algt)) |
609 | return ERR_CAST(algt); | 609 | return ERR_CAST(algt); |
610 | 610 | ||
611 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 611 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
612 | return ERR_PTR(-EINVAL); | 612 | return ERR_PTR(-EINVAL); |
613 | 613 | ||
614 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 614 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
615 | CRYPTO_ALG_TYPE_AHASH_MASK); | 615 | CRYPTO_ALG_TYPE_AHASH_MASK); |
616 | if (IS_ERR(auth)) | 616 | if (IS_ERR(auth)) |
617 | return ERR_CAST(auth); | 617 | return ERR_CAST(auth); |
618 | 618 | ||
619 | auth_base = &auth->base; | 619 | auth_base = &auth->base; |
620 | 620 | ||
621 | enc_name = crypto_attr_alg_name(tb[2]); | 621 | enc_name = crypto_attr_alg_name(tb[2]); |
622 | err = PTR_ERR(enc_name); | 622 | err = PTR_ERR(enc_name); |
623 | if (IS_ERR(enc_name)) | 623 | if (IS_ERR(enc_name)) |
624 | goto out_put_auth; | 624 | goto out_put_auth; |
625 | 625 | ||
626 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 626 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
627 | err = -ENOMEM; | 627 | err = -ENOMEM; |
628 | if (!inst) | 628 | if (!inst) |
629 | goto out_put_auth; | 629 | goto out_put_auth; |
630 | 630 | ||
631 | ctx = crypto_instance_ctx(inst); | 631 | ctx = crypto_instance_ctx(inst); |
632 | 632 | ||
633 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); | 633 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); |
634 | if (err) | 634 | if (err) |
635 | goto err_free_inst; | 635 | goto err_free_inst; |
636 | 636 | ||
637 | crypto_set_skcipher_spawn(&ctx->enc, inst); | 637 | crypto_set_skcipher_spawn(&ctx->enc, inst); |
638 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, | 638 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
639 | crypto_requires_sync(algt->type, | 639 | crypto_requires_sync(algt->type, |
640 | algt->mask)); | 640 | algt->mask)); |
641 | if (err) | 641 | if (err) |
642 | goto err_drop_auth; | 642 | goto err_drop_auth; |
643 | 643 | ||
644 | enc = crypto_skcipher_spawn_alg(&ctx->enc); | 644 | enc = crypto_skcipher_spawn_alg(&ctx->enc); |
645 | 645 | ||
646 | err = -ENAMETOOLONG; | 646 | err = -ENAMETOOLONG; |
647 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 647 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, |
648 | "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= | 648 | "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= |
649 | CRYPTO_MAX_ALG_NAME) | 649 | CRYPTO_MAX_ALG_NAME) |
650 | goto err_drop_enc; | 650 | goto err_drop_enc; |
651 | 651 | ||
652 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 652 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
653 | "authenc(%s,%s)", auth_base->cra_driver_name, | 653 | "authenc(%s,%s)", auth_base->cra_driver_name, |
654 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 654 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
655 | goto err_drop_enc; | 655 | goto err_drop_enc; |
656 | 656 | ||
657 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 657 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
658 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; | 658 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; |
659 | inst->alg.cra_priority = enc->cra_priority * | 659 | inst->alg.cra_priority = enc->cra_priority * |
660 | 10 + auth_base->cra_priority; | 660 | 10 + auth_base->cra_priority; |
661 | inst->alg.cra_blocksize = enc->cra_blocksize; | 661 | inst->alg.cra_blocksize = enc->cra_blocksize; |
662 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; | 662 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; |
663 | inst->alg.cra_type = &crypto_aead_type; | 663 | inst->alg.cra_type = &crypto_aead_type; |
664 | 664 | ||
665 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; | 665 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; |
666 | inst->alg.cra_aead.maxauthsize = auth->digestsize; | 666 | inst->alg.cra_aead.maxauthsize = auth->digestsize; |
667 | 667 | ||
668 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); | 668 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); |
669 | 669 | ||
670 | inst->alg.cra_init = crypto_authenc_init_tfm; | 670 | inst->alg.cra_init = crypto_authenc_init_tfm; |
671 | inst->alg.cra_exit = crypto_authenc_exit_tfm; | 671 | inst->alg.cra_exit = crypto_authenc_exit_tfm; |
672 | 672 | ||
673 | inst->alg.cra_aead.setkey = crypto_authenc_setkey; | 673 | inst->alg.cra_aead.setkey = crypto_authenc_setkey; |
674 | inst->alg.cra_aead.encrypt = crypto_authenc_encrypt; | 674 | inst->alg.cra_aead.encrypt = crypto_authenc_encrypt; |
675 | inst->alg.cra_aead.decrypt = crypto_authenc_decrypt; | 675 | inst->alg.cra_aead.decrypt = crypto_authenc_decrypt; |
676 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; | 676 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; |
677 | 677 | ||
678 | out: | 678 | out: |
679 | crypto_mod_put(auth_base); | 679 | crypto_mod_put(auth_base); |
680 | return inst; | 680 | return inst; |
681 | 681 | ||
682 | err_drop_enc: | 682 | err_drop_enc: |
683 | crypto_drop_skcipher(&ctx->enc); | 683 | crypto_drop_skcipher(&ctx->enc); |
684 | err_drop_auth: | 684 | err_drop_auth: |
685 | crypto_drop_ahash(&ctx->auth); | 685 | crypto_drop_ahash(&ctx->auth); |
686 | err_free_inst: | 686 | err_free_inst: |
687 | kfree(inst); | 687 | kfree(inst); |
688 | out_put_auth: | 688 | out_put_auth: |
689 | inst = ERR_PTR(err); | 689 | inst = ERR_PTR(err); |
690 | goto out; | 690 | goto out; |
691 | } | 691 | } |
692 | 692 | ||
693 | static void crypto_authenc_free(struct crypto_instance *inst) | 693 | static void crypto_authenc_free(struct crypto_instance *inst) |
694 | { | 694 | { |
695 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); | 695 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); |
696 | 696 | ||
697 | crypto_drop_skcipher(&ctx->enc); | 697 | crypto_drop_skcipher(&ctx->enc); |
698 | crypto_drop_ahash(&ctx->auth); | 698 | crypto_drop_ahash(&ctx->auth); |
699 | kfree(inst); | 699 | kfree(inst); |
700 | } | 700 | } |
701 | 701 | ||
702 | static struct crypto_template crypto_authenc_tmpl = { | 702 | static struct crypto_template crypto_authenc_tmpl = { |
703 | .name = "authenc", | 703 | .name = "authenc", |
704 | .alloc = crypto_authenc_alloc, | 704 | .alloc = crypto_authenc_alloc, |
705 | .free = crypto_authenc_free, | 705 | .free = crypto_authenc_free, |
706 | .module = THIS_MODULE, | 706 | .module = THIS_MODULE, |
707 | }; | 707 | }; |
708 | 708 | ||
709 | static int __init crypto_authenc_module_init(void) | 709 | static int __init crypto_authenc_module_init(void) |
710 | { | 710 | { |
711 | return crypto_register_template(&crypto_authenc_tmpl); | 711 | return crypto_register_template(&crypto_authenc_tmpl); |
712 | } | 712 | } |
713 | 713 | ||
714 | static void __exit crypto_authenc_module_exit(void) | 714 | static void __exit crypto_authenc_module_exit(void) |
715 | { | 715 | { |
716 | crypto_unregister_template(&crypto_authenc_tmpl); | 716 | crypto_unregister_template(&crypto_authenc_tmpl); |
717 | } | 717 | } |
718 | 718 | ||
719 | module_init(crypto_authenc_module_init); | 719 | module_init(crypto_authenc_module_init); |
720 | module_exit(crypto_authenc_module_exit); | 720 | module_exit(crypto_authenc_module_exit); |
721 | 721 | ||
722 | MODULE_LICENSE("GPL"); | 722 | MODULE_LICENSE("GPL"); |
723 | MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec"); | 723 | MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec"); |
724 | MODULE_ALIAS_CRYPTO("authenc"); | ||
724 | 725 |
crypto/authencesn.c
1 | /* | 1 | /* |
2 | * authencesn.c - AEAD wrapper for IPsec with extended sequence numbers, | 2 | * authencesn.c - AEAD wrapper for IPsec with extended sequence numbers, |
3 | * derived from authenc.c | 3 | * derived from authenc.c |
4 | * | 4 | * |
5 | * Copyright (C) 2010 secunet Security Networks AG | 5 | * Copyright (C) 2010 secunet Security Networks AG |
6 | * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> | 6 | * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free | 9 | * under the terms of the GNU General Public License as published by the Free |
10 | * Software Foundation; either version 2 of the License, or (at your option) | 10 | * Software Foundation; either version 2 of the License, or (at your option) |
11 | * any later version. | 11 | * any later version. |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/aead.h> | 15 | #include <crypto/aead.h> |
16 | #include <crypto/internal/hash.h> | 16 | #include <crypto/internal/hash.h> |
17 | #include <crypto/internal/skcipher.h> | 17 | #include <crypto/internal/skcipher.h> |
18 | #include <crypto/authenc.h> | 18 | #include <crypto/authenc.h> |
19 | #include <crypto/scatterwalk.h> | 19 | #include <crypto/scatterwalk.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/rtnetlink.h> | 24 | #include <linux/rtnetlink.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | 27 | ||
28 | struct authenc_esn_instance_ctx { | 28 | struct authenc_esn_instance_ctx { |
29 | struct crypto_ahash_spawn auth; | 29 | struct crypto_ahash_spawn auth; |
30 | struct crypto_skcipher_spawn enc; | 30 | struct crypto_skcipher_spawn enc; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct crypto_authenc_esn_ctx { | 33 | struct crypto_authenc_esn_ctx { |
34 | unsigned int reqoff; | 34 | unsigned int reqoff; |
35 | struct crypto_ahash *auth; | 35 | struct crypto_ahash *auth; |
36 | struct crypto_ablkcipher *enc; | 36 | struct crypto_ablkcipher *enc; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct authenc_esn_request_ctx { | 39 | struct authenc_esn_request_ctx { |
40 | unsigned int cryptlen; | 40 | unsigned int cryptlen; |
41 | unsigned int headlen; | 41 | unsigned int headlen; |
42 | unsigned int trailen; | 42 | unsigned int trailen; |
43 | struct scatterlist *sg; | 43 | struct scatterlist *sg; |
44 | struct scatterlist hsg[2]; | 44 | struct scatterlist hsg[2]; |
45 | struct scatterlist tsg[1]; | 45 | struct scatterlist tsg[1]; |
46 | struct scatterlist cipher[2]; | 46 | struct scatterlist cipher[2]; |
47 | crypto_completion_t complete; | 47 | crypto_completion_t complete; |
48 | crypto_completion_t update_complete; | 48 | crypto_completion_t update_complete; |
49 | crypto_completion_t update_complete2; | 49 | crypto_completion_t update_complete2; |
50 | char tail[]; | 50 | char tail[]; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static void authenc_esn_request_complete(struct aead_request *req, int err) | 53 | static void authenc_esn_request_complete(struct aead_request *req, int err) |
54 | { | 54 | { |
55 | if (err != -EINPROGRESS) | 55 | if (err != -EINPROGRESS) |
56 | aead_request_complete(req, err); | 56 | aead_request_complete(req, err); |
57 | } | 57 | } |
58 | 58 | ||
59 | static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, | 59 | static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, |
60 | unsigned int keylen) | 60 | unsigned int keylen) |
61 | { | 61 | { |
62 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 62 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
63 | struct crypto_ahash *auth = ctx->auth; | 63 | struct crypto_ahash *auth = ctx->auth; |
64 | struct crypto_ablkcipher *enc = ctx->enc; | 64 | struct crypto_ablkcipher *enc = ctx->enc; |
65 | struct crypto_authenc_keys keys; | 65 | struct crypto_authenc_keys keys; |
66 | int err = -EINVAL; | 66 | int err = -EINVAL; |
67 | 67 | ||
68 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 68 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
69 | goto badkey; | 69 | goto badkey; |
70 | 70 | ||
71 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); | 71 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); |
72 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & | 72 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & |
73 | CRYPTO_TFM_REQ_MASK); | 73 | CRYPTO_TFM_REQ_MASK); |
74 | err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); | 74 | err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); |
75 | crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & | 75 | crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & |
76 | CRYPTO_TFM_RES_MASK); | 76 | CRYPTO_TFM_RES_MASK); |
77 | 77 | ||
78 | if (err) | 78 | if (err) |
79 | goto out; | 79 | goto out; |
80 | 80 | ||
81 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); | 81 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); |
82 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & | 82 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & |
83 | CRYPTO_TFM_REQ_MASK); | 83 | CRYPTO_TFM_REQ_MASK); |
84 | err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); | 84 | err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); |
85 | crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & | 85 | crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & |
86 | CRYPTO_TFM_RES_MASK); | 86 | CRYPTO_TFM_RES_MASK); |
87 | 87 | ||
88 | out: | 88 | out: |
89 | return err; | 89 | return err; |
90 | 90 | ||
91 | badkey: | 91 | badkey: |
92 | crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN); | 92 | crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN); |
93 | goto out; | 93 | goto out; |
94 | } | 94 | } |
95 | 95 | ||
96 | static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq, | 96 | static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq, |
97 | int err) | 97 | int err) |
98 | { | 98 | { |
99 | struct aead_request *req = areq->data; | 99 | struct aead_request *req = areq->data; |
100 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 100 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
101 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 101 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
102 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 102 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
103 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 103 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
104 | 104 | ||
105 | if (err) | 105 | if (err) |
106 | goto out; | 106 | goto out; |
107 | 107 | ||
108 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | 108 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, |
109 | areq_ctx->cryptlen); | 109 | areq_ctx->cryptlen); |
110 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 110 | ahash_request_set_callback(ahreq, aead_request_flags(req) & |
111 | CRYPTO_TFM_REQ_MAY_SLEEP, | 111 | CRYPTO_TFM_REQ_MAY_SLEEP, |
112 | areq_ctx->update_complete2, req); | 112 | areq_ctx->update_complete2, req); |
113 | 113 | ||
114 | err = crypto_ahash_update(ahreq); | 114 | err = crypto_ahash_update(ahreq); |
115 | if (err) | 115 | if (err) |
116 | goto out; | 116 | goto out; |
117 | 117 | ||
118 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | 118 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, |
119 | areq_ctx->trailen); | 119 | areq_ctx->trailen); |
120 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 120 | ahash_request_set_callback(ahreq, aead_request_flags(req) & |
121 | CRYPTO_TFM_REQ_MAY_SLEEP, | 121 | CRYPTO_TFM_REQ_MAY_SLEEP, |
122 | areq_ctx->complete, req); | 122 | areq_ctx->complete, req); |
123 | 123 | ||
124 | err = crypto_ahash_finup(ahreq); | 124 | err = crypto_ahash_finup(ahreq); |
125 | if (err) | 125 | if (err) |
126 | goto out; | 126 | goto out; |
127 | 127 | ||
128 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | 128 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, |
129 | areq_ctx->cryptlen, | 129 | areq_ctx->cryptlen, |
130 | crypto_aead_authsize(authenc_esn), 1); | 130 | crypto_aead_authsize(authenc_esn), 1); |
131 | 131 | ||
132 | out: | 132 | out: |
133 | authenc_esn_request_complete(req, err); | 133 | authenc_esn_request_complete(req, err); |
134 | } | 134 | } |
135 | 135 | ||
136 | static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq, | 136 | static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq, |
137 | int err) | 137 | int err) |
138 | { | 138 | { |
139 | struct aead_request *req = areq->data; | 139 | struct aead_request *req = areq->data; |
140 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 140 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
141 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 141 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
142 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 142 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
143 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 143 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
144 | 144 | ||
145 | if (err) | 145 | if (err) |
146 | goto out; | 146 | goto out; |
147 | 147 | ||
148 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | 148 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, |
149 | areq_ctx->trailen); | 149 | areq_ctx->trailen); |
150 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 150 | ahash_request_set_callback(ahreq, aead_request_flags(req) & |
151 | CRYPTO_TFM_REQ_MAY_SLEEP, | 151 | CRYPTO_TFM_REQ_MAY_SLEEP, |
152 | areq_ctx->complete, req); | 152 | areq_ctx->complete, req); |
153 | 153 | ||
154 | err = crypto_ahash_finup(ahreq); | 154 | err = crypto_ahash_finup(ahreq); |
155 | if (err) | 155 | if (err) |
156 | goto out; | 156 | goto out; |
157 | 157 | ||
158 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | 158 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, |
159 | areq_ctx->cryptlen, | 159 | areq_ctx->cryptlen, |
160 | crypto_aead_authsize(authenc_esn), 1); | 160 | crypto_aead_authsize(authenc_esn), 1); |
161 | 161 | ||
162 | out: | 162 | out: |
163 | authenc_esn_request_complete(req, err); | 163 | authenc_esn_request_complete(req, err); |
164 | } | 164 | } |
165 | 165 | ||
166 | 166 | ||
167 | static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq, | 167 | static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq, |
168 | int err) | 168 | int err) |
169 | { | 169 | { |
170 | struct aead_request *req = areq->data; | 170 | struct aead_request *req = areq->data; |
171 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 171 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
172 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 172 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
173 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 173 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
174 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 174 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
175 | 175 | ||
176 | if (err) | 176 | if (err) |
177 | goto out; | 177 | goto out; |
178 | 178 | ||
179 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | 179 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, |
180 | areq_ctx->cryptlen, | 180 | areq_ctx->cryptlen, |
181 | crypto_aead_authsize(authenc_esn), 1); | 181 | crypto_aead_authsize(authenc_esn), 1); |
182 | 182 | ||
183 | out: | 183 | out: |
184 | aead_request_complete(req, err); | 184 | aead_request_complete(req, err); |
185 | } | 185 | } |
186 | 186 | ||
187 | 187 | ||
188 | static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq, | 188 | static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq, |
189 | int err) | 189 | int err) |
190 | { | 190 | { |
191 | u8 *ihash; | 191 | u8 *ihash; |
192 | unsigned int authsize; | 192 | unsigned int authsize; |
193 | struct ablkcipher_request *abreq; | 193 | struct ablkcipher_request *abreq; |
194 | struct aead_request *req = areq->data; | 194 | struct aead_request *req = areq->data; |
195 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 195 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
196 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 196 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
197 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 197 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
198 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 198 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
199 | unsigned int cryptlen = req->cryptlen; | 199 | unsigned int cryptlen = req->cryptlen; |
200 | 200 | ||
201 | if (err) | 201 | if (err) |
202 | goto out; | 202 | goto out; |
203 | 203 | ||
204 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | 204 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, |
205 | areq_ctx->cryptlen); | 205 | areq_ctx->cryptlen); |
206 | 206 | ||
207 | ahash_request_set_callback(ahreq, | 207 | ahash_request_set_callback(ahreq, |
208 | aead_request_flags(req) & | 208 | aead_request_flags(req) & |
209 | CRYPTO_TFM_REQ_MAY_SLEEP, | 209 | CRYPTO_TFM_REQ_MAY_SLEEP, |
210 | areq_ctx->update_complete2, req); | 210 | areq_ctx->update_complete2, req); |
211 | 211 | ||
212 | err = crypto_ahash_update(ahreq); | 212 | err = crypto_ahash_update(ahreq); |
213 | if (err) | 213 | if (err) |
214 | goto out; | 214 | goto out; |
215 | 215 | ||
216 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | 216 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, |
217 | areq_ctx->trailen); | 217 | areq_ctx->trailen); |
218 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 218 | ahash_request_set_callback(ahreq, aead_request_flags(req) & |
219 | CRYPTO_TFM_REQ_MAY_SLEEP, | 219 | CRYPTO_TFM_REQ_MAY_SLEEP, |
220 | areq_ctx->complete, req); | 220 | areq_ctx->complete, req); |
221 | 221 | ||
222 | err = crypto_ahash_finup(ahreq); | 222 | err = crypto_ahash_finup(ahreq); |
223 | if (err) | 223 | if (err) |
224 | goto out; | 224 | goto out; |
225 | 225 | ||
226 | authsize = crypto_aead_authsize(authenc_esn); | 226 | authsize = crypto_aead_authsize(authenc_esn); |
227 | cryptlen -= authsize; | 227 | cryptlen -= authsize; |
228 | ihash = ahreq->result + authsize; | 228 | ihash = ahreq->result + authsize; |
229 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 229 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
230 | authsize, 0); | 230 | authsize, 0); |
231 | 231 | ||
232 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 232 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
233 | if (err) | 233 | if (err) |
234 | goto out; | 234 | goto out; |
235 | 235 | ||
236 | abreq = aead_request_ctx(req); | 236 | abreq = aead_request_ctx(req); |
237 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 237 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
238 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 238 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
239 | req->base.complete, req->base.data); | 239 | req->base.complete, req->base.data); |
240 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | 240 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, |
241 | cryptlen, req->iv); | 241 | cryptlen, req->iv); |
242 | 242 | ||
243 | err = crypto_ablkcipher_decrypt(abreq); | 243 | err = crypto_ablkcipher_decrypt(abreq); |
244 | 244 | ||
245 | out: | 245 | out: |
246 | authenc_esn_request_complete(req, err); | 246 | authenc_esn_request_complete(req, err); |
247 | } | 247 | } |
248 | 248 | ||
249 | static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq, | 249 | static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq, |
250 | int err) | 250 | int err) |
251 | { | 251 | { |
252 | u8 *ihash; | 252 | u8 *ihash; |
253 | unsigned int authsize; | 253 | unsigned int authsize; |
254 | struct ablkcipher_request *abreq; | 254 | struct ablkcipher_request *abreq; |
255 | struct aead_request *req = areq->data; | 255 | struct aead_request *req = areq->data; |
256 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 256 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
257 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 257 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
258 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 258 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
259 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 259 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
260 | unsigned int cryptlen = req->cryptlen; | 260 | unsigned int cryptlen = req->cryptlen; |
261 | 261 | ||
262 | if (err) | 262 | if (err) |
263 | goto out; | 263 | goto out; |
264 | 264 | ||
265 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | 265 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, |
266 | areq_ctx->trailen); | 266 | areq_ctx->trailen); |
267 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 267 | ahash_request_set_callback(ahreq, aead_request_flags(req) & |
268 | CRYPTO_TFM_REQ_MAY_SLEEP, | 268 | CRYPTO_TFM_REQ_MAY_SLEEP, |
269 | areq_ctx->complete, req); | 269 | areq_ctx->complete, req); |
270 | 270 | ||
271 | err = crypto_ahash_finup(ahreq); | 271 | err = crypto_ahash_finup(ahreq); |
272 | if (err) | 272 | if (err) |
273 | goto out; | 273 | goto out; |
274 | 274 | ||
275 | authsize = crypto_aead_authsize(authenc_esn); | 275 | authsize = crypto_aead_authsize(authenc_esn); |
276 | cryptlen -= authsize; | 276 | cryptlen -= authsize; |
277 | ihash = ahreq->result + authsize; | 277 | ihash = ahreq->result + authsize; |
278 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 278 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
279 | authsize, 0); | 279 | authsize, 0); |
280 | 280 | ||
281 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 281 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
282 | if (err) | 282 | if (err) |
283 | goto out; | 283 | goto out; |
284 | 284 | ||
285 | abreq = aead_request_ctx(req); | 285 | abreq = aead_request_ctx(req); |
286 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 286 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
287 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 287 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
288 | req->base.complete, req->base.data); | 288 | req->base.complete, req->base.data); |
289 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | 289 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, |
290 | cryptlen, req->iv); | 290 | cryptlen, req->iv); |
291 | 291 | ||
292 | err = crypto_ablkcipher_decrypt(abreq); | 292 | err = crypto_ablkcipher_decrypt(abreq); |
293 | 293 | ||
294 | out: | 294 | out: |
295 | authenc_esn_request_complete(req, err); | 295 | authenc_esn_request_complete(req, err); |
296 | } | 296 | } |
297 | 297 | ||
298 | 298 | ||
299 | static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, | 299 | static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, |
300 | int err) | 300 | int err) |
301 | { | 301 | { |
302 | u8 *ihash; | 302 | u8 *ihash; |
303 | unsigned int authsize; | 303 | unsigned int authsize; |
304 | struct ablkcipher_request *abreq; | 304 | struct ablkcipher_request *abreq; |
305 | struct aead_request *req = areq->data; | 305 | struct aead_request *req = areq->data; |
306 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 306 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
307 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 307 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
308 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 308 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
309 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 309 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
310 | unsigned int cryptlen = req->cryptlen; | 310 | unsigned int cryptlen = req->cryptlen; |
311 | 311 | ||
312 | if (err) | 312 | if (err) |
313 | goto out; | 313 | goto out; |
314 | 314 | ||
315 | authsize = crypto_aead_authsize(authenc_esn); | 315 | authsize = crypto_aead_authsize(authenc_esn); |
316 | cryptlen -= authsize; | 316 | cryptlen -= authsize; |
317 | ihash = ahreq->result + authsize; | 317 | ihash = ahreq->result + authsize; |
318 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 318 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
319 | authsize, 0); | 319 | authsize, 0); |
320 | 320 | ||
321 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 321 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
322 | if (err) | 322 | if (err) |
323 | goto out; | 323 | goto out; |
324 | 324 | ||
325 | abreq = aead_request_ctx(req); | 325 | abreq = aead_request_ctx(req); |
326 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 326 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
327 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 327 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
328 | req->base.complete, req->base.data); | 328 | req->base.complete, req->base.data); |
329 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | 329 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, |
330 | cryptlen, req->iv); | 330 | cryptlen, req->iv); |
331 | 331 | ||
332 | err = crypto_ablkcipher_decrypt(abreq); | 332 | err = crypto_ablkcipher_decrypt(abreq); |
333 | 333 | ||
334 | out: | 334 | out: |
335 | authenc_esn_request_complete(req, err); | 335 | authenc_esn_request_complete(req, err); |
336 | } | 336 | } |
337 | 337 | ||
338 | static u8 *crypto_authenc_esn_ahash(struct aead_request *req, | 338 | static u8 *crypto_authenc_esn_ahash(struct aead_request *req, |
339 | unsigned int flags) | 339 | unsigned int flags) |
340 | { | 340 | { |
341 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 341 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
342 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 342 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
343 | struct crypto_ahash *auth = ctx->auth; | 343 | struct crypto_ahash *auth = ctx->auth; |
344 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 344 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
345 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 345 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
346 | u8 *hash = areq_ctx->tail; | 346 | u8 *hash = areq_ctx->tail; |
347 | int err; | 347 | int err; |
348 | 348 | ||
349 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | 349 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), |
350 | crypto_ahash_alignmask(auth) + 1); | 350 | crypto_ahash_alignmask(auth) + 1); |
351 | 351 | ||
352 | ahash_request_set_tfm(ahreq, auth); | 352 | ahash_request_set_tfm(ahreq, auth); |
353 | 353 | ||
354 | err = crypto_ahash_init(ahreq); | 354 | err = crypto_ahash_init(ahreq); |
355 | if (err) | 355 | if (err) |
356 | return ERR_PTR(err); | 356 | return ERR_PTR(err); |
357 | 357 | ||
358 | ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen); | 358 | ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen); |
359 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | 359 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, |
360 | areq_ctx->update_complete, req); | 360 | areq_ctx->update_complete, req); |
361 | 361 | ||
362 | err = crypto_ahash_update(ahreq); | 362 | err = crypto_ahash_update(ahreq); |
363 | if (err) | 363 | if (err) |
364 | return ERR_PTR(err); | 364 | return ERR_PTR(err); |
365 | 365 | ||
366 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen); | 366 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen); |
367 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | 367 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, |
368 | areq_ctx->update_complete2, req); | 368 | areq_ctx->update_complete2, req); |
369 | 369 | ||
370 | err = crypto_ahash_update(ahreq); | 370 | err = crypto_ahash_update(ahreq); |
371 | if (err) | 371 | if (err) |
372 | return ERR_PTR(err); | 372 | return ERR_PTR(err); |
373 | 373 | ||
374 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash, | 374 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash, |
375 | areq_ctx->trailen); | 375 | areq_ctx->trailen); |
376 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | 376 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, |
377 | areq_ctx->complete, req); | 377 | areq_ctx->complete, req); |
378 | 378 | ||
379 | err = crypto_ahash_finup(ahreq); | 379 | err = crypto_ahash_finup(ahreq); |
380 | if (err) | 380 | if (err) |
381 | return ERR_PTR(err); | 381 | return ERR_PTR(err); |
382 | 382 | ||
383 | return hash; | 383 | return hash; |
384 | } | 384 | } |
385 | 385 | ||
386 | static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv, | 386 | static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv, |
387 | unsigned int flags) | 387 | unsigned int flags) |
388 | { | 388 | { |
389 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 389 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
390 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 390 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
391 | struct scatterlist *dst = req->dst; | 391 | struct scatterlist *dst = req->dst; |
392 | struct scatterlist *assoc = req->assoc; | 392 | struct scatterlist *assoc = req->assoc; |
393 | struct scatterlist *cipher = areq_ctx->cipher; | 393 | struct scatterlist *cipher = areq_ctx->cipher; |
394 | struct scatterlist *hsg = areq_ctx->hsg; | 394 | struct scatterlist *hsg = areq_ctx->hsg; |
395 | struct scatterlist *tsg = areq_ctx->tsg; | 395 | struct scatterlist *tsg = areq_ctx->tsg; |
396 | struct scatterlist *assoc1; | 396 | struct scatterlist *assoc1; |
397 | struct scatterlist *assoc2; | 397 | struct scatterlist *assoc2; |
398 | unsigned int ivsize = crypto_aead_ivsize(authenc_esn); | 398 | unsigned int ivsize = crypto_aead_ivsize(authenc_esn); |
399 | unsigned int cryptlen = req->cryptlen; | 399 | unsigned int cryptlen = req->cryptlen; |
400 | struct page *dstp; | 400 | struct page *dstp; |
401 | u8 *vdst; | 401 | u8 *vdst; |
402 | u8 *hash; | 402 | u8 *hash; |
403 | 403 | ||
404 | dstp = sg_page(dst); | 404 | dstp = sg_page(dst); |
405 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; | 405 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; |
406 | 406 | ||
407 | if (ivsize) { | 407 | if (ivsize) { |
408 | sg_init_table(cipher, 2); | 408 | sg_init_table(cipher, 2); |
409 | sg_set_buf(cipher, iv, ivsize); | 409 | sg_set_buf(cipher, iv, ivsize); |
410 | scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); | 410 | scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); |
411 | dst = cipher; | 411 | dst = cipher; |
412 | cryptlen += ivsize; | 412 | cryptlen += ivsize; |
413 | } | 413 | } |
414 | 414 | ||
415 | if (sg_is_last(assoc)) | 415 | if (sg_is_last(assoc)) |
416 | return -EINVAL; | 416 | return -EINVAL; |
417 | 417 | ||
418 | assoc1 = assoc + 1; | 418 | assoc1 = assoc + 1; |
419 | if (sg_is_last(assoc1)) | 419 | if (sg_is_last(assoc1)) |
420 | return -EINVAL; | 420 | return -EINVAL; |
421 | 421 | ||
422 | assoc2 = assoc + 2; | 422 | assoc2 = assoc + 2; |
423 | if (!sg_is_last(assoc2)) | 423 | if (!sg_is_last(assoc2)) |
424 | return -EINVAL; | 424 | return -EINVAL; |
425 | 425 | ||
426 | sg_init_table(hsg, 2); | 426 | sg_init_table(hsg, 2); |
427 | sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); | 427 | sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); |
428 | sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); | 428 | sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); |
429 | 429 | ||
430 | sg_init_table(tsg, 1); | 430 | sg_init_table(tsg, 1); |
431 | sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); | 431 | sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); |
432 | 432 | ||
433 | areq_ctx->cryptlen = cryptlen; | 433 | areq_ctx->cryptlen = cryptlen; |
434 | areq_ctx->headlen = assoc->length + assoc2->length; | 434 | areq_ctx->headlen = assoc->length + assoc2->length; |
435 | areq_ctx->trailen = assoc1->length; | 435 | areq_ctx->trailen = assoc1->length; |
436 | areq_ctx->sg = dst; | 436 | areq_ctx->sg = dst; |
437 | 437 | ||
438 | areq_ctx->complete = authenc_esn_geniv_ahash_done; | 438 | areq_ctx->complete = authenc_esn_geniv_ahash_done; |
439 | areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done; | 439 | areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done; |
440 | areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2; | 440 | areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2; |
441 | 441 | ||
442 | hash = crypto_authenc_esn_ahash(req, flags); | 442 | hash = crypto_authenc_esn_ahash(req, flags); |
443 | if (IS_ERR(hash)) | 443 | if (IS_ERR(hash)) |
444 | return PTR_ERR(hash); | 444 | return PTR_ERR(hash); |
445 | 445 | ||
446 | scatterwalk_map_and_copy(hash, dst, cryptlen, | 446 | scatterwalk_map_and_copy(hash, dst, cryptlen, |
447 | crypto_aead_authsize(authenc_esn), 1); | 447 | crypto_aead_authsize(authenc_esn), 1); |
448 | return 0; | 448 | return 0; |
449 | } | 449 | } |
450 | 450 | ||
451 | 451 | ||
452 | static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, | 452 | static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, |
453 | int err) | 453 | int err) |
454 | { | 454 | { |
455 | struct aead_request *areq = req->data; | 455 | struct aead_request *areq = req->data; |
456 | 456 | ||
457 | if (!err) { | 457 | if (!err) { |
458 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq); | 458 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq); |
459 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 459 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
460 | struct ablkcipher_request *abreq = aead_request_ctx(areq); | 460 | struct ablkcipher_request *abreq = aead_request_ctx(areq); |
461 | u8 *iv = (u8 *)(abreq + 1) + | 461 | u8 *iv = (u8 *)(abreq + 1) + |
462 | crypto_ablkcipher_reqsize(ctx->enc); | 462 | crypto_ablkcipher_reqsize(ctx->enc); |
463 | 463 | ||
464 | err = crypto_authenc_esn_genicv(areq, iv, 0); | 464 | err = crypto_authenc_esn_genicv(areq, iv, 0); |
465 | } | 465 | } |
466 | 466 | ||
467 | authenc_esn_request_complete(areq, err); | 467 | authenc_esn_request_complete(areq, err); |
468 | } | 468 | } |
469 | 469 | ||
470 | static int crypto_authenc_esn_encrypt(struct aead_request *req) | 470 | static int crypto_authenc_esn_encrypt(struct aead_request *req) |
471 | { | 471 | { |
472 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 472 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
473 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 473 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
474 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 474 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
475 | struct crypto_ablkcipher *enc = ctx->enc; | 475 | struct crypto_ablkcipher *enc = ctx->enc; |
476 | struct scatterlist *dst = req->dst; | 476 | struct scatterlist *dst = req->dst; |
477 | unsigned int cryptlen = req->cryptlen; | 477 | unsigned int cryptlen = req->cryptlen; |
478 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail | 478 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail |
479 | + ctx->reqoff); | 479 | + ctx->reqoff); |
480 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); | 480 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); |
481 | int err; | 481 | int err; |
482 | 482 | ||
483 | ablkcipher_request_set_tfm(abreq, enc); | 483 | ablkcipher_request_set_tfm(abreq, enc); |
484 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 484 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
485 | crypto_authenc_esn_encrypt_done, req); | 485 | crypto_authenc_esn_encrypt_done, req); |
486 | ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); | 486 | ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); |
487 | 487 | ||
488 | memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn)); | 488 | memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn)); |
489 | 489 | ||
490 | err = crypto_ablkcipher_encrypt(abreq); | 490 | err = crypto_ablkcipher_encrypt(abreq); |
491 | if (err) | 491 | if (err) |
492 | return err; | 492 | return err; |
493 | 493 | ||
494 | return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | 494 | return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); |
495 | } | 495 | } |
496 | 496 | ||
497 | static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req, | 497 | static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req, |
498 | int err) | 498 | int err) |
499 | { | 499 | { |
500 | struct aead_request *areq = req->data; | 500 | struct aead_request *areq = req->data; |
501 | 501 | ||
502 | if (!err) { | 502 | if (!err) { |
503 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | 503 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); |
504 | 504 | ||
505 | err = crypto_authenc_esn_genicv(areq, greq->giv, 0); | 505 | err = crypto_authenc_esn_genicv(areq, greq->giv, 0); |
506 | } | 506 | } |
507 | 507 | ||
508 | authenc_esn_request_complete(areq, err); | 508 | authenc_esn_request_complete(areq, err); |
509 | } | 509 | } |
510 | 510 | ||
511 | static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req) | 511 | static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req) |
512 | { | 512 | { |
513 | struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req); | 513 | struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req); |
514 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 514 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
515 | struct aead_request *areq = &req->areq; | 515 | struct aead_request *areq = &req->areq; |
516 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | 516 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); |
517 | u8 *iv = req->giv; | 517 | u8 *iv = req->giv; |
518 | int err; | 518 | int err; |
519 | 519 | ||
520 | skcipher_givcrypt_set_tfm(greq, ctx->enc); | 520 | skcipher_givcrypt_set_tfm(greq, ctx->enc); |
521 | skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), | 521 | skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), |
522 | crypto_authenc_esn_givencrypt_done, areq); | 522 | crypto_authenc_esn_givencrypt_done, areq); |
523 | skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, | 523 | skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, |
524 | areq->iv); | 524 | areq->iv); |
525 | skcipher_givcrypt_set_giv(greq, iv, req->seq); | 525 | skcipher_givcrypt_set_giv(greq, iv, req->seq); |
526 | 526 | ||
527 | err = crypto_skcipher_givencrypt(greq); | 527 | err = crypto_skcipher_givencrypt(greq); |
528 | if (err) | 528 | if (err) |
529 | return err; | 529 | return err; |
530 | 530 | ||
531 | return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | 531 | return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); |
532 | } | 532 | } |
533 | 533 | ||
534 | static int crypto_authenc_esn_verify(struct aead_request *req) | 534 | static int crypto_authenc_esn_verify(struct aead_request *req) |
535 | { | 535 | { |
536 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 536 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
537 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 537 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
538 | u8 *ohash; | 538 | u8 *ohash; |
539 | u8 *ihash; | 539 | u8 *ihash; |
540 | unsigned int authsize; | 540 | unsigned int authsize; |
541 | 541 | ||
542 | areq_ctx->complete = authenc_esn_verify_ahash_done; | 542 | areq_ctx->complete = authenc_esn_verify_ahash_done; |
543 | areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; | 543 | areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; |
544 | 544 | ||
545 | ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP); | 545 | ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP); |
546 | if (IS_ERR(ohash)) | 546 | if (IS_ERR(ohash)) |
547 | return PTR_ERR(ohash); | 547 | return PTR_ERR(ohash); |
548 | 548 | ||
549 | authsize = crypto_aead_authsize(authenc_esn); | 549 | authsize = crypto_aead_authsize(authenc_esn); |
550 | ihash = ohash + authsize; | 550 | ihash = ohash + authsize; |
551 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 551 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
552 | authsize, 0); | 552 | authsize, 0); |
553 | return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; | 553 | return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; |
554 | } | 554 | } |
555 | 555 | ||
556 | static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, | 556 | static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, |
557 | unsigned int cryptlen) | 557 | unsigned int cryptlen) |
558 | { | 558 | { |
559 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 559 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
560 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 560 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
561 | struct scatterlist *src = req->src; | 561 | struct scatterlist *src = req->src; |
562 | struct scatterlist *assoc = req->assoc; | 562 | struct scatterlist *assoc = req->assoc; |
563 | struct scatterlist *cipher = areq_ctx->cipher; | 563 | struct scatterlist *cipher = areq_ctx->cipher; |
564 | struct scatterlist *hsg = areq_ctx->hsg; | 564 | struct scatterlist *hsg = areq_ctx->hsg; |
565 | struct scatterlist *tsg = areq_ctx->tsg; | 565 | struct scatterlist *tsg = areq_ctx->tsg; |
566 | struct scatterlist *assoc1; | 566 | struct scatterlist *assoc1; |
567 | struct scatterlist *assoc2; | 567 | struct scatterlist *assoc2; |
568 | unsigned int ivsize = crypto_aead_ivsize(authenc_esn); | 568 | unsigned int ivsize = crypto_aead_ivsize(authenc_esn); |
569 | struct page *srcp; | 569 | struct page *srcp; |
570 | u8 *vsrc; | 570 | u8 *vsrc; |
571 | 571 | ||
572 | srcp = sg_page(src); | 572 | srcp = sg_page(src); |
573 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; | 573 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; |
574 | 574 | ||
575 | if (ivsize) { | 575 | if (ivsize) { |
576 | sg_init_table(cipher, 2); | 576 | sg_init_table(cipher, 2); |
577 | sg_set_buf(cipher, iv, ivsize); | 577 | sg_set_buf(cipher, iv, ivsize); |
578 | scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); | 578 | scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); |
579 | src = cipher; | 579 | src = cipher; |
580 | cryptlen += ivsize; | 580 | cryptlen += ivsize; |
581 | } | 581 | } |
582 | 582 | ||
583 | if (sg_is_last(assoc)) | 583 | if (sg_is_last(assoc)) |
584 | return -EINVAL; | 584 | return -EINVAL; |
585 | 585 | ||
586 | assoc1 = assoc + 1; | 586 | assoc1 = assoc + 1; |
587 | if (sg_is_last(assoc1)) | 587 | if (sg_is_last(assoc1)) |
588 | return -EINVAL; | 588 | return -EINVAL; |
589 | 589 | ||
590 | assoc2 = assoc + 2; | 590 | assoc2 = assoc + 2; |
591 | if (!sg_is_last(assoc2)) | 591 | if (!sg_is_last(assoc2)) |
592 | return -EINVAL; | 592 | return -EINVAL; |
593 | 593 | ||
594 | sg_init_table(hsg, 2); | 594 | sg_init_table(hsg, 2); |
595 | sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); | 595 | sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); |
596 | sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); | 596 | sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); |
597 | 597 | ||
598 | sg_init_table(tsg, 1); | 598 | sg_init_table(tsg, 1); |
599 | sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); | 599 | sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); |
600 | 600 | ||
601 | areq_ctx->cryptlen = cryptlen; | 601 | areq_ctx->cryptlen = cryptlen; |
602 | areq_ctx->headlen = assoc->length + assoc2->length; | 602 | areq_ctx->headlen = assoc->length + assoc2->length; |
603 | areq_ctx->trailen = assoc1->length; | 603 | areq_ctx->trailen = assoc1->length; |
604 | areq_ctx->sg = src; | 604 | areq_ctx->sg = src; |
605 | 605 | ||
606 | areq_ctx->complete = authenc_esn_verify_ahash_done; | 606 | areq_ctx->complete = authenc_esn_verify_ahash_done; |
607 | areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; | 607 | areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; |
608 | areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2; | 608 | areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2; |
609 | 609 | ||
610 | return crypto_authenc_esn_verify(req); | 610 | return crypto_authenc_esn_verify(req); |
611 | } | 611 | } |
612 | 612 | ||
613 | static int crypto_authenc_esn_decrypt(struct aead_request *req) | 613 | static int crypto_authenc_esn_decrypt(struct aead_request *req) |
614 | { | 614 | { |
615 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 615 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
616 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 616 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
617 | struct ablkcipher_request *abreq = aead_request_ctx(req); | 617 | struct ablkcipher_request *abreq = aead_request_ctx(req); |
618 | unsigned int cryptlen = req->cryptlen; | 618 | unsigned int cryptlen = req->cryptlen; |
619 | unsigned int authsize = crypto_aead_authsize(authenc_esn); | 619 | unsigned int authsize = crypto_aead_authsize(authenc_esn); |
620 | u8 *iv = req->iv; | 620 | u8 *iv = req->iv; |
621 | int err; | 621 | int err; |
622 | 622 | ||
623 | if (cryptlen < authsize) | 623 | if (cryptlen < authsize) |
624 | return -EINVAL; | 624 | return -EINVAL; |
625 | cryptlen -= authsize; | 625 | cryptlen -= authsize; |
626 | 626 | ||
627 | err = crypto_authenc_esn_iverify(req, iv, cryptlen); | 627 | err = crypto_authenc_esn_iverify(req, iv, cryptlen); |
628 | if (err) | 628 | if (err) |
629 | return err; | 629 | return err; |
630 | 630 | ||
631 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 631 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
632 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 632 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
633 | req->base.complete, req->base.data); | 633 | req->base.complete, req->base.data); |
634 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); | 634 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); |
635 | 635 | ||
636 | return crypto_ablkcipher_decrypt(abreq); | 636 | return crypto_ablkcipher_decrypt(abreq); |
637 | } | 637 | } |
638 | 638 | ||
639 | static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) | 639 | static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) |
640 | { | 640 | { |
641 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 641 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
642 | struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst); | 642 | struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst); |
643 | struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); | 643 | struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); |
644 | struct crypto_ahash *auth; | 644 | struct crypto_ahash *auth; |
645 | struct crypto_ablkcipher *enc; | 645 | struct crypto_ablkcipher *enc; |
646 | int err; | 646 | int err; |
647 | 647 | ||
648 | auth = crypto_spawn_ahash(&ictx->auth); | 648 | auth = crypto_spawn_ahash(&ictx->auth); |
649 | if (IS_ERR(auth)) | 649 | if (IS_ERR(auth)) |
650 | return PTR_ERR(auth); | 650 | return PTR_ERR(auth); |
651 | 651 | ||
652 | enc = crypto_spawn_skcipher(&ictx->enc); | 652 | enc = crypto_spawn_skcipher(&ictx->enc); |
653 | err = PTR_ERR(enc); | 653 | err = PTR_ERR(enc); |
654 | if (IS_ERR(enc)) | 654 | if (IS_ERR(enc)) |
655 | goto err_free_ahash; | 655 | goto err_free_ahash; |
656 | 656 | ||
657 | ctx->auth = auth; | 657 | ctx->auth = auth; |
658 | ctx->enc = enc; | 658 | ctx->enc = enc; |
659 | 659 | ||
660 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + | 660 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + |
661 | crypto_ahash_alignmask(auth), | 661 | crypto_ahash_alignmask(auth), |
662 | crypto_ahash_alignmask(auth) + 1) + | 662 | crypto_ahash_alignmask(auth) + 1) + |
663 | crypto_ablkcipher_ivsize(enc); | 663 | crypto_ablkcipher_ivsize(enc); |
664 | 664 | ||
665 | tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) + | 665 | tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) + |
666 | ctx->reqoff + | 666 | ctx->reqoff + |
667 | max_t(unsigned int, | 667 | max_t(unsigned int, |
668 | crypto_ahash_reqsize(auth) + | 668 | crypto_ahash_reqsize(auth) + |
669 | sizeof(struct ahash_request), | 669 | sizeof(struct ahash_request), |
670 | sizeof(struct skcipher_givcrypt_request) + | 670 | sizeof(struct skcipher_givcrypt_request) + |
671 | crypto_ablkcipher_reqsize(enc)); | 671 | crypto_ablkcipher_reqsize(enc)); |
672 | 672 | ||
673 | return 0; | 673 | return 0; |
674 | 674 | ||
675 | err_free_ahash: | 675 | err_free_ahash: |
676 | crypto_free_ahash(auth); | 676 | crypto_free_ahash(auth); |
677 | return err; | 677 | return err; |
678 | } | 678 | } |
679 | 679 | ||
680 | static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm) | 680 | static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm) |
681 | { | 681 | { |
682 | struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); | 682 | struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); |
683 | 683 | ||
684 | crypto_free_ahash(ctx->auth); | 684 | crypto_free_ahash(ctx->auth); |
685 | crypto_free_ablkcipher(ctx->enc); | 685 | crypto_free_ablkcipher(ctx->enc); |
686 | } | 686 | } |
687 | 687 | ||
688 | static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) | 688 | static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) |
689 | { | 689 | { |
690 | struct crypto_attr_type *algt; | 690 | struct crypto_attr_type *algt; |
691 | struct crypto_instance *inst; | 691 | struct crypto_instance *inst; |
692 | struct hash_alg_common *auth; | 692 | struct hash_alg_common *auth; |
693 | struct crypto_alg *auth_base; | 693 | struct crypto_alg *auth_base; |
694 | struct crypto_alg *enc; | 694 | struct crypto_alg *enc; |
695 | struct authenc_esn_instance_ctx *ctx; | 695 | struct authenc_esn_instance_ctx *ctx; |
696 | const char *enc_name; | 696 | const char *enc_name; |
697 | int err; | 697 | int err; |
698 | 698 | ||
699 | algt = crypto_get_attr_type(tb); | 699 | algt = crypto_get_attr_type(tb); |
700 | if (IS_ERR(algt)) | 700 | if (IS_ERR(algt)) |
701 | return ERR_CAST(algt); | 701 | return ERR_CAST(algt); |
702 | 702 | ||
703 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 703 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
704 | return ERR_PTR(-EINVAL); | 704 | return ERR_PTR(-EINVAL); |
705 | 705 | ||
706 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 706 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
707 | CRYPTO_ALG_TYPE_AHASH_MASK); | 707 | CRYPTO_ALG_TYPE_AHASH_MASK); |
708 | if (IS_ERR(auth)) | 708 | if (IS_ERR(auth)) |
709 | return ERR_CAST(auth); | 709 | return ERR_CAST(auth); |
710 | 710 | ||
711 | auth_base = &auth->base; | 711 | auth_base = &auth->base; |
712 | 712 | ||
713 | enc_name = crypto_attr_alg_name(tb[2]); | 713 | enc_name = crypto_attr_alg_name(tb[2]); |
714 | err = PTR_ERR(enc_name); | 714 | err = PTR_ERR(enc_name); |
715 | if (IS_ERR(enc_name)) | 715 | if (IS_ERR(enc_name)) |
716 | goto out_put_auth; | 716 | goto out_put_auth; |
717 | 717 | ||
718 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 718 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
719 | err = -ENOMEM; | 719 | err = -ENOMEM; |
720 | if (!inst) | 720 | if (!inst) |
721 | goto out_put_auth; | 721 | goto out_put_auth; |
722 | 722 | ||
723 | ctx = crypto_instance_ctx(inst); | 723 | ctx = crypto_instance_ctx(inst); |
724 | 724 | ||
725 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); | 725 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); |
726 | if (err) | 726 | if (err) |
727 | goto err_free_inst; | 727 | goto err_free_inst; |
728 | 728 | ||
729 | crypto_set_skcipher_spawn(&ctx->enc, inst); | 729 | crypto_set_skcipher_spawn(&ctx->enc, inst); |
730 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, | 730 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
731 | crypto_requires_sync(algt->type, | 731 | crypto_requires_sync(algt->type, |
732 | algt->mask)); | 732 | algt->mask)); |
733 | if (err) | 733 | if (err) |
734 | goto err_drop_auth; | 734 | goto err_drop_auth; |
735 | 735 | ||
736 | enc = crypto_skcipher_spawn_alg(&ctx->enc); | 736 | enc = crypto_skcipher_spawn_alg(&ctx->enc); |
737 | 737 | ||
738 | err = -ENAMETOOLONG; | 738 | err = -ENAMETOOLONG; |
739 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 739 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, |
740 | "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >= | 740 | "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >= |
741 | CRYPTO_MAX_ALG_NAME) | 741 | CRYPTO_MAX_ALG_NAME) |
742 | goto err_drop_enc; | 742 | goto err_drop_enc; |
743 | 743 | ||
744 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 744 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
745 | "authencesn(%s,%s)", auth_base->cra_driver_name, | 745 | "authencesn(%s,%s)", auth_base->cra_driver_name, |
746 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 746 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
747 | goto err_drop_enc; | 747 | goto err_drop_enc; |
748 | 748 | ||
749 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 749 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
750 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; | 750 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; |
751 | inst->alg.cra_priority = enc->cra_priority * | 751 | inst->alg.cra_priority = enc->cra_priority * |
752 | 10 + auth_base->cra_priority; | 752 | 10 + auth_base->cra_priority; |
753 | inst->alg.cra_blocksize = enc->cra_blocksize; | 753 | inst->alg.cra_blocksize = enc->cra_blocksize; |
754 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; | 754 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; |
755 | inst->alg.cra_type = &crypto_aead_type; | 755 | inst->alg.cra_type = &crypto_aead_type; |
756 | 756 | ||
757 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; | 757 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; |
758 | inst->alg.cra_aead.maxauthsize = auth->digestsize; | 758 | inst->alg.cra_aead.maxauthsize = auth->digestsize; |
759 | 759 | ||
760 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); | 760 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); |
761 | 761 | ||
762 | inst->alg.cra_init = crypto_authenc_esn_init_tfm; | 762 | inst->alg.cra_init = crypto_authenc_esn_init_tfm; |
763 | inst->alg.cra_exit = crypto_authenc_esn_exit_tfm; | 763 | inst->alg.cra_exit = crypto_authenc_esn_exit_tfm; |
764 | 764 | ||
765 | inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey; | 765 | inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey; |
766 | inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt; | 766 | inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt; |
767 | inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt; | 767 | inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt; |
768 | inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt; | 768 | inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt; |
769 | 769 | ||
770 | out: | 770 | out: |
771 | crypto_mod_put(auth_base); | 771 | crypto_mod_put(auth_base); |
772 | return inst; | 772 | return inst; |
773 | 773 | ||
774 | err_drop_enc: | 774 | err_drop_enc: |
775 | crypto_drop_skcipher(&ctx->enc); | 775 | crypto_drop_skcipher(&ctx->enc); |
776 | err_drop_auth: | 776 | err_drop_auth: |
777 | crypto_drop_ahash(&ctx->auth); | 777 | crypto_drop_ahash(&ctx->auth); |
778 | err_free_inst: | 778 | err_free_inst: |
779 | kfree(inst); | 779 | kfree(inst); |
780 | out_put_auth: | 780 | out_put_auth: |
781 | inst = ERR_PTR(err); | 781 | inst = ERR_PTR(err); |
782 | goto out; | 782 | goto out; |
783 | } | 783 | } |
784 | 784 | ||
785 | static void crypto_authenc_esn_free(struct crypto_instance *inst) | 785 | static void crypto_authenc_esn_free(struct crypto_instance *inst) |
786 | { | 786 | { |
787 | struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst); | 787 | struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst); |
788 | 788 | ||
789 | crypto_drop_skcipher(&ctx->enc); | 789 | crypto_drop_skcipher(&ctx->enc); |
790 | crypto_drop_ahash(&ctx->auth); | 790 | crypto_drop_ahash(&ctx->auth); |
791 | kfree(inst); | 791 | kfree(inst); |
792 | } | 792 | } |
793 | 793 | ||
794 | static struct crypto_template crypto_authenc_esn_tmpl = { | 794 | static struct crypto_template crypto_authenc_esn_tmpl = { |
795 | .name = "authencesn", | 795 | .name = "authencesn", |
796 | .alloc = crypto_authenc_esn_alloc, | 796 | .alloc = crypto_authenc_esn_alloc, |
797 | .free = crypto_authenc_esn_free, | 797 | .free = crypto_authenc_esn_free, |
798 | .module = THIS_MODULE, | 798 | .module = THIS_MODULE, |
799 | }; | 799 | }; |
800 | 800 | ||
801 | static int __init crypto_authenc_esn_module_init(void) | 801 | static int __init crypto_authenc_esn_module_init(void) |
802 | { | 802 | { |
803 | return crypto_register_template(&crypto_authenc_esn_tmpl); | 803 | return crypto_register_template(&crypto_authenc_esn_tmpl); |
804 | } | 804 | } |
805 | 805 | ||
806 | static void __exit crypto_authenc_esn_module_exit(void) | 806 | static void __exit crypto_authenc_esn_module_exit(void) |
807 | { | 807 | { |
808 | crypto_unregister_template(&crypto_authenc_esn_tmpl); | 808 | crypto_unregister_template(&crypto_authenc_esn_tmpl); |
809 | } | 809 | } |
810 | 810 | ||
811 | module_init(crypto_authenc_esn_module_init); | 811 | module_init(crypto_authenc_esn_module_init); |
812 | module_exit(crypto_authenc_esn_module_exit); | 812 | module_exit(crypto_authenc_esn_module_exit); |
813 | 813 | ||
814 | MODULE_LICENSE("GPL"); | 814 | MODULE_LICENSE("GPL"); |
815 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); | 815 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); |
816 | MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers"); | 816 | MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers"); |
817 | MODULE_ALIAS_CRYPTO("authencesn"); | ||
817 | 818 |
crypto/cbc.c
1 | /* | 1 | /* |
2 | * CBC: Cipher Block Chaining mode | 2 | * CBC: Cipher Block Chaining mode |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/algapi.h> | 13 | #include <crypto/algapi.h> |
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/log2.h> | 17 | #include <linux/log2.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/scatterlist.h> | 19 | #include <linux/scatterlist.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | 21 | ||
22 | struct crypto_cbc_ctx { | 22 | struct crypto_cbc_ctx { |
23 | struct crypto_cipher *child; | 23 | struct crypto_cipher *child; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, | 26 | static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, |
27 | unsigned int keylen) | 27 | unsigned int keylen) |
28 | { | 28 | { |
29 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); | 29 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); |
30 | struct crypto_cipher *child = ctx->child; | 30 | struct crypto_cipher *child = ctx->child; |
31 | int err; | 31 | int err; |
32 | 32 | ||
33 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 33 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
34 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 34 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & |
35 | CRYPTO_TFM_REQ_MASK); | 35 | CRYPTO_TFM_REQ_MASK); |
36 | err = crypto_cipher_setkey(child, key, keylen); | 36 | err = crypto_cipher_setkey(child, key, keylen); |
37 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 37 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & |
38 | CRYPTO_TFM_RES_MASK); | 38 | CRYPTO_TFM_RES_MASK); |
39 | return err; | 39 | return err; |
40 | } | 40 | } |
41 | 41 | ||
42 | static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, | 42 | static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, |
43 | struct blkcipher_walk *walk, | 43 | struct blkcipher_walk *walk, |
44 | struct crypto_cipher *tfm) | 44 | struct crypto_cipher *tfm) |
45 | { | 45 | { |
46 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 46 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
47 | crypto_cipher_alg(tfm)->cia_encrypt; | 47 | crypto_cipher_alg(tfm)->cia_encrypt; |
48 | int bsize = crypto_cipher_blocksize(tfm); | 48 | int bsize = crypto_cipher_blocksize(tfm); |
49 | unsigned int nbytes = walk->nbytes; | 49 | unsigned int nbytes = walk->nbytes; |
50 | u8 *src = walk->src.virt.addr; | 50 | u8 *src = walk->src.virt.addr; |
51 | u8 *dst = walk->dst.virt.addr; | 51 | u8 *dst = walk->dst.virt.addr; |
52 | u8 *iv = walk->iv; | 52 | u8 *iv = walk->iv; |
53 | 53 | ||
54 | do { | 54 | do { |
55 | crypto_xor(iv, src, bsize); | 55 | crypto_xor(iv, src, bsize); |
56 | fn(crypto_cipher_tfm(tfm), dst, iv); | 56 | fn(crypto_cipher_tfm(tfm), dst, iv); |
57 | memcpy(iv, dst, bsize); | 57 | memcpy(iv, dst, bsize); |
58 | 58 | ||
59 | src += bsize; | 59 | src += bsize; |
60 | dst += bsize; | 60 | dst += bsize; |
61 | } while ((nbytes -= bsize) >= bsize); | 61 | } while ((nbytes -= bsize) >= bsize); |
62 | 62 | ||
63 | return nbytes; | 63 | return nbytes; |
64 | } | 64 | } |
65 | 65 | ||
66 | static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, | 66 | static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, |
67 | struct blkcipher_walk *walk, | 67 | struct blkcipher_walk *walk, |
68 | struct crypto_cipher *tfm) | 68 | struct crypto_cipher *tfm) |
69 | { | 69 | { |
70 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 70 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
71 | crypto_cipher_alg(tfm)->cia_encrypt; | 71 | crypto_cipher_alg(tfm)->cia_encrypt; |
72 | int bsize = crypto_cipher_blocksize(tfm); | 72 | int bsize = crypto_cipher_blocksize(tfm); |
73 | unsigned int nbytes = walk->nbytes; | 73 | unsigned int nbytes = walk->nbytes; |
74 | u8 *src = walk->src.virt.addr; | 74 | u8 *src = walk->src.virt.addr; |
75 | u8 *iv = walk->iv; | 75 | u8 *iv = walk->iv; |
76 | 76 | ||
77 | do { | 77 | do { |
78 | crypto_xor(src, iv, bsize); | 78 | crypto_xor(src, iv, bsize); |
79 | fn(crypto_cipher_tfm(tfm), src, src); | 79 | fn(crypto_cipher_tfm(tfm), src, src); |
80 | iv = src; | 80 | iv = src; |
81 | 81 | ||
82 | src += bsize; | 82 | src += bsize; |
83 | } while ((nbytes -= bsize) >= bsize); | 83 | } while ((nbytes -= bsize) >= bsize); |
84 | 84 | ||
85 | memcpy(walk->iv, iv, bsize); | 85 | memcpy(walk->iv, iv, bsize); |
86 | 86 | ||
87 | return nbytes; | 87 | return nbytes; |
88 | } | 88 | } |
89 | 89 | ||
90 | static int crypto_cbc_encrypt(struct blkcipher_desc *desc, | 90 | static int crypto_cbc_encrypt(struct blkcipher_desc *desc, |
91 | struct scatterlist *dst, struct scatterlist *src, | 91 | struct scatterlist *dst, struct scatterlist *src, |
92 | unsigned int nbytes) | 92 | unsigned int nbytes) |
93 | { | 93 | { |
94 | struct blkcipher_walk walk; | 94 | struct blkcipher_walk walk; |
95 | struct crypto_blkcipher *tfm = desc->tfm; | 95 | struct crypto_blkcipher *tfm = desc->tfm; |
96 | struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | 96 | struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); |
97 | struct crypto_cipher *child = ctx->child; | 97 | struct crypto_cipher *child = ctx->child; |
98 | int err; | 98 | int err; |
99 | 99 | ||
100 | blkcipher_walk_init(&walk, dst, src, nbytes); | 100 | blkcipher_walk_init(&walk, dst, src, nbytes); |
101 | err = blkcipher_walk_virt(desc, &walk); | 101 | err = blkcipher_walk_virt(desc, &walk); |
102 | 102 | ||
103 | while ((nbytes = walk.nbytes)) { | 103 | while ((nbytes = walk.nbytes)) { |
104 | if (walk.src.virt.addr == walk.dst.virt.addr) | 104 | if (walk.src.virt.addr == walk.dst.virt.addr) |
105 | nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); | 105 | nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); |
106 | else | 106 | else |
107 | nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); | 107 | nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); |
108 | err = blkcipher_walk_done(desc, &walk, nbytes); | 108 | err = blkcipher_walk_done(desc, &walk, nbytes); |
109 | } | 109 | } |
110 | 110 | ||
111 | return err; | 111 | return err; |
112 | } | 112 | } |
113 | 113 | ||
114 | static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, | 114 | static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, |
115 | struct blkcipher_walk *walk, | 115 | struct blkcipher_walk *walk, |
116 | struct crypto_cipher *tfm) | 116 | struct crypto_cipher *tfm) |
117 | { | 117 | { |
118 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 118 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
119 | crypto_cipher_alg(tfm)->cia_decrypt; | 119 | crypto_cipher_alg(tfm)->cia_decrypt; |
120 | int bsize = crypto_cipher_blocksize(tfm); | 120 | int bsize = crypto_cipher_blocksize(tfm); |
121 | unsigned int nbytes = walk->nbytes; | 121 | unsigned int nbytes = walk->nbytes; |
122 | u8 *src = walk->src.virt.addr; | 122 | u8 *src = walk->src.virt.addr; |
123 | u8 *dst = walk->dst.virt.addr; | 123 | u8 *dst = walk->dst.virt.addr; |
124 | u8 *iv = walk->iv; | 124 | u8 *iv = walk->iv; |
125 | 125 | ||
126 | do { | 126 | do { |
127 | fn(crypto_cipher_tfm(tfm), dst, src); | 127 | fn(crypto_cipher_tfm(tfm), dst, src); |
128 | crypto_xor(dst, iv, bsize); | 128 | crypto_xor(dst, iv, bsize); |
129 | iv = src; | 129 | iv = src; |
130 | 130 | ||
131 | src += bsize; | 131 | src += bsize; |
132 | dst += bsize; | 132 | dst += bsize; |
133 | } while ((nbytes -= bsize) >= bsize); | 133 | } while ((nbytes -= bsize) >= bsize); |
134 | 134 | ||
135 | memcpy(walk->iv, iv, bsize); | 135 | memcpy(walk->iv, iv, bsize); |
136 | 136 | ||
137 | return nbytes; | 137 | return nbytes; |
138 | } | 138 | } |
139 | 139 | ||
140 | static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, | 140 | static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, |
141 | struct blkcipher_walk *walk, | 141 | struct blkcipher_walk *walk, |
142 | struct crypto_cipher *tfm) | 142 | struct crypto_cipher *tfm) |
143 | { | 143 | { |
144 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 144 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
145 | crypto_cipher_alg(tfm)->cia_decrypt; | 145 | crypto_cipher_alg(tfm)->cia_decrypt; |
146 | int bsize = crypto_cipher_blocksize(tfm); | 146 | int bsize = crypto_cipher_blocksize(tfm); |
147 | unsigned int nbytes = walk->nbytes; | 147 | unsigned int nbytes = walk->nbytes; |
148 | u8 *src = walk->src.virt.addr; | 148 | u8 *src = walk->src.virt.addr; |
149 | u8 last_iv[bsize]; | 149 | u8 last_iv[bsize]; |
150 | 150 | ||
151 | /* Start of the last block. */ | 151 | /* Start of the last block. */ |
152 | src += nbytes - (nbytes & (bsize - 1)) - bsize; | 152 | src += nbytes - (nbytes & (bsize - 1)) - bsize; |
153 | memcpy(last_iv, src, bsize); | 153 | memcpy(last_iv, src, bsize); |
154 | 154 | ||
155 | for (;;) { | 155 | for (;;) { |
156 | fn(crypto_cipher_tfm(tfm), src, src); | 156 | fn(crypto_cipher_tfm(tfm), src, src); |
157 | if ((nbytes -= bsize) < bsize) | 157 | if ((nbytes -= bsize) < bsize) |
158 | break; | 158 | break; |
159 | crypto_xor(src, src - bsize, bsize); | 159 | crypto_xor(src, src - bsize, bsize); |
160 | src -= bsize; | 160 | src -= bsize; |
161 | } | 161 | } |
162 | 162 | ||
163 | crypto_xor(src, walk->iv, bsize); | 163 | crypto_xor(src, walk->iv, bsize); |
164 | memcpy(walk->iv, last_iv, bsize); | 164 | memcpy(walk->iv, last_iv, bsize); |
165 | 165 | ||
166 | return nbytes; | 166 | return nbytes; |
167 | } | 167 | } |
168 | 168 | ||
169 | static int crypto_cbc_decrypt(struct blkcipher_desc *desc, | 169 | static int crypto_cbc_decrypt(struct blkcipher_desc *desc, |
170 | struct scatterlist *dst, struct scatterlist *src, | 170 | struct scatterlist *dst, struct scatterlist *src, |
171 | unsigned int nbytes) | 171 | unsigned int nbytes) |
172 | { | 172 | { |
173 | struct blkcipher_walk walk; | 173 | struct blkcipher_walk walk; |
174 | struct crypto_blkcipher *tfm = desc->tfm; | 174 | struct crypto_blkcipher *tfm = desc->tfm; |
175 | struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | 175 | struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); |
176 | struct crypto_cipher *child = ctx->child; | 176 | struct crypto_cipher *child = ctx->child; |
177 | int err; | 177 | int err; |
178 | 178 | ||
179 | blkcipher_walk_init(&walk, dst, src, nbytes); | 179 | blkcipher_walk_init(&walk, dst, src, nbytes); |
180 | err = blkcipher_walk_virt(desc, &walk); | 180 | err = blkcipher_walk_virt(desc, &walk); |
181 | 181 | ||
182 | while ((nbytes = walk.nbytes)) { | 182 | while ((nbytes = walk.nbytes)) { |
183 | if (walk.src.virt.addr == walk.dst.virt.addr) | 183 | if (walk.src.virt.addr == walk.dst.virt.addr) |
184 | nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child); | 184 | nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child); |
185 | else | 185 | else |
186 | nbytes = crypto_cbc_decrypt_segment(desc, &walk, child); | 186 | nbytes = crypto_cbc_decrypt_segment(desc, &walk, child); |
187 | err = blkcipher_walk_done(desc, &walk, nbytes); | 187 | err = blkcipher_walk_done(desc, &walk, nbytes); |
188 | } | 188 | } |
189 | 189 | ||
190 | return err; | 190 | return err; |
191 | } | 191 | } |
192 | 192 | ||
193 | static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) | 193 | static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) |
194 | { | 194 | { |
195 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 195 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
196 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 196 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
197 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 197 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); |
198 | struct crypto_cipher *cipher; | 198 | struct crypto_cipher *cipher; |
199 | 199 | ||
200 | cipher = crypto_spawn_cipher(spawn); | 200 | cipher = crypto_spawn_cipher(spawn); |
201 | if (IS_ERR(cipher)) | 201 | if (IS_ERR(cipher)) |
202 | return PTR_ERR(cipher); | 202 | return PTR_ERR(cipher); |
203 | 203 | ||
204 | ctx->child = cipher; | 204 | ctx->child = cipher; |
205 | return 0; | 205 | return 0; |
206 | } | 206 | } |
207 | 207 | ||
208 | static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) | 208 | static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) |
209 | { | 209 | { |
210 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 210 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); |
211 | crypto_free_cipher(ctx->child); | 211 | crypto_free_cipher(ctx->child); |
212 | } | 212 | } |
213 | 213 | ||
214 | static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) | 214 | static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) |
215 | { | 215 | { |
216 | struct crypto_instance *inst; | 216 | struct crypto_instance *inst; |
217 | struct crypto_alg *alg; | 217 | struct crypto_alg *alg; |
218 | int err; | 218 | int err; |
219 | 219 | ||
220 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 220 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
221 | if (err) | 221 | if (err) |
222 | return ERR_PTR(err); | 222 | return ERR_PTR(err); |
223 | 223 | ||
224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
225 | CRYPTO_ALG_TYPE_MASK); | 225 | CRYPTO_ALG_TYPE_MASK); |
226 | if (IS_ERR(alg)) | 226 | if (IS_ERR(alg)) |
227 | return ERR_CAST(alg); | 227 | return ERR_CAST(alg); |
228 | 228 | ||
229 | inst = ERR_PTR(-EINVAL); | 229 | inst = ERR_PTR(-EINVAL); |
230 | if (!is_power_of_2(alg->cra_blocksize)) | 230 | if (!is_power_of_2(alg->cra_blocksize)) |
231 | goto out_put_alg; | 231 | goto out_put_alg; |
232 | 232 | ||
233 | inst = crypto_alloc_instance("cbc", alg); | 233 | inst = crypto_alloc_instance("cbc", alg); |
234 | if (IS_ERR(inst)) | 234 | if (IS_ERR(inst)) |
235 | goto out_put_alg; | 235 | goto out_put_alg; |
236 | 236 | ||
237 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 237 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; |
238 | inst->alg.cra_priority = alg->cra_priority; | 238 | inst->alg.cra_priority = alg->cra_priority; |
239 | inst->alg.cra_blocksize = alg->cra_blocksize; | 239 | inst->alg.cra_blocksize = alg->cra_blocksize; |
240 | inst->alg.cra_alignmask = alg->cra_alignmask; | 240 | inst->alg.cra_alignmask = alg->cra_alignmask; |
241 | inst->alg.cra_type = &crypto_blkcipher_type; | 241 | inst->alg.cra_type = &crypto_blkcipher_type; |
242 | 242 | ||
243 | /* We access the data as u32s when xoring. */ | 243 | /* We access the data as u32s when xoring. */ |
244 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 244 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; |
245 | 245 | ||
246 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 246 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; |
247 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; | 247 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; |
248 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; | 248 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; |
249 | 249 | ||
250 | inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); | 250 | inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); |
251 | 251 | ||
252 | inst->alg.cra_init = crypto_cbc_init_tfm; | 252 | inst->alg.cra_init = crypto_cbc_init_tfm; |
253 | inst->alg.cra_exit = crypto_cbc_exit_tfm; | 253 | inst->alg.cra_exit = crypto_cbc_exit_tfm; |
254 | 254 | ||
255 | inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; | 255 | inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; |
256 | inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; | 256 | inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; |
257 | inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; | 257 | inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; |
258 | 258 | ||
259 | out_put_alg: | 259 | out_put_alg: |
260 | crypto_mod_put(alg); | 260 | crypto_mod_put(alg); |
261 | return inst; | 261 | return inst; |
262 | } | 262 | } |
263 | 263 | ||
264 | static void crypto_cbc_free(struct crypto_instance *inst) | 264 | static void crypto_cbc_free(struct crypto_instance *inst) |
265 | { | 265 | { |
266 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 266 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
267 | kfree(inst); | 267 | kfree(inst); |
268 | } | 268 | } |
269 | 269 | ||
270 | static struct crypto_template crypto_cbc_tmpl = { | 270 | static struct crypto_template crypto_cbc_tmpl = { |
271 | .name = "cbc", | 271 | .name = "cbc", |
272 | .alloc = crypto_cbc_alloc, | 272 | .alloc = crypto_cbc_alloc, |
273 | .free = crypto_cbc_free, | 273 | .free = crypto_cbc_free, |
274 | .module = THIS_MODULE, | 274 | .module = THIS_MODULE, |
275 | }; | 275 | }; |
276 | 276 | ||
277 | static int __init crypto_cbc_module_init(void) | 277 | static int __init crypto_cbc_module_init(void) |
278 | { | 278 | { |
279 | return crypto_register_template(&crypto_cbc_tmpl); | 279 | return crypto_register_template(&crypto_cbc_tmpl); |
280 | } | 280 | } |
281 | 281 | ||
282 | static void __exit crypto_cbc_module_exit(void) | 282 | static void __exit crypto_cbc_module_exit(void) |
283 | { | 283 | { |
284 | crypto_unregister_template(&crypto_cbc_tmpl); | 284 | crypto_unregister_template(&crypto_cbc_tmpl); |
285 | } | 285 | } |
286 | 286 | ||
287 | module_init(crypto_cbc_module_init); | 287 | module_init(crypto_cbc_module_init); |
288 | module_exit(crypto_cbc_module_exit); | 288 | module_exit(crypto_cbc_module_exit); |
289 | 289 | ||
290 | MODULE_LICENSE("GPL"); | 290 | MODULE_LICENSE("GPL"); |
291 | MODULE_DESCRIPTION("CBC block cipher algorithm"); | 291 | MODULE_DESCRIPTION("CBC block cipher algorithm"); |
292 | MODULE_ALIAS_CRYPTO("cbc"); | ||
292 | 293 |
crypto/ccm.c
1 | /* | 1 | /* |
2 | * CCM: Counter with CBC-MAC | 2 | * CCM: Counter with CBC-MAC |
3 | * | 3 | * |
4 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> | 4 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/internal/aead.h> | 13 | #include <crypto/internal/aead.h> |
14 | #include <crypto/internal/skcipher.h> | 14 | #include <crypto/internal/skcipher.h> |
15 | #include <crypto/scatterwalk.h> | 15 | #include <crypto/scatterwalk.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | 21 | ||
22 | #include "internal.h" | 22 | #include "internal.h" |
23 | 23 | ||
24 | struct ccm_instance_ctx { | 24 | struct ccm_instance_ctx { |
25 | struct crypto_skcipher_spawn ctr; | 25 | struct crypto_skcipher_spawn ctr; |
26 | struct crypto_spawn cipher; | 26 | struct crypto_spawn cipher; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct crypto_ccm_ctx { | 29 | struct crypto_ccm_ctx { |
30 | struct crypto_cipher *cipher; | 30 | struct crypto_cipher *cipher; |
31 | struct crypto_ablkcipher *ctr; | 31 | struct crypto_ablkcipher *ctr; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | struct crypto_rfc4309_ctx { | 34 | struct crypto_rfc4309_ctx { |
35 | struct crypto_aead *child; | 35 | struct crypto_aead *child; |
36 | u8 nonce[3]; | 36 | u8 nonce[3]; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct crypto_ccm_req_priv_ctx { | 39 | struct crypto_ccm_req_priv_ctx { |
40 | u8 odata[16]; | 40 | u8 odata[16]; |
41 | u8 idata[16]; | 41 | u8 idata[16]; |
42 | u8 auth_tag[16]; | 42 | u8 auth_tag[16]; |
43 | u32 ilen; | 43 | u32 ilen; |
44 | u32 flags; | 44 | u32 flags; |
45 | struct scatterlist src[2]; | 45 | struct scatterlist src[2]; |
46 | struct scatterlist dst[2]; | 46 | struct scatterlist dst[2]; |
47 | struct ablkcipher_request abreq; | 47 | struct ablkcipher_request abreq; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( | 50 | static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( |
51 | struct aead_request *req) | 51 | struct aead_request *req) |
52 | { | 52 | { |
53 | unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); | 53 | unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); |
54 | 54 | ||
55 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | 55 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); |
56 | } | 56 | } |
57 | 57 | ||
58 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) | 58 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
59 | { | 59 | { |
60 | __be32 data; | 60 | __be32 data; |
61 | 61 | ||
62 | memset(block, 0, csize); | 62 | memset(block, 0, csize); |
63 | block += csize; | 63 | block += csize; |
64 | 64 | ||
65 | if (csize >= 4) | 65 | if (csize >= 4) |
66 | csize = 4; | 66 | csize = 4; |
67 | else if (msglen > (1 << (8 * csize))) | 67 | else if (msglen > (1 << (8 * csize))) |
68 | return -EOVERFLOW; | 68 | return -EOVERFLOW; |
69 | 69 | ||
70 | data = cpu_to_be32(msglen); | 70 | data = cpu_to_be32(msglen); |
71 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); | 71 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); |
72 | 72 | ||
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, | 76 | static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, |
77 | unsigned int keylen) | 77 | unsigned int keylen) |
78 | { | 78 | { |
79 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); | 79 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); |
80 | struct crypto_ablkcipher *ctr = ctx->ctr; | 80 | struct crypto_ablkcipher *ctr = ctx->ctr; |
81 | struct crypto_cipher *tfm = ctx->cipher; | 81 | struct crypto_cipher *tfm = ctx->cipher; |
82 | int err = 0; | 82 | int err = 0; |
83 | 83 | ||
84 | crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); | 84 | crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); |
85 | crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & | 85 | crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & |
86 | CRYPTO_TFM_REQ_MASK); | 86 | CRYPTO_TFM_REQ_MASK); |
87 | err = crypto_ablkcipher_setkey(ctr, key, keylen); | 87 | err = crypto_ablkcipher_setkey(ctr, key, keylen); |
88 | crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & | 88 | crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & |
89 | CRYPTO_TFM_RES_MASK); | 89 | CRYPTO_TFM_RES_MASK); |
90 | if (err) | 90 | if (err) |
91 | goto out; | 91 | goto out; |
92 | 92 | ||
93 | crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); | 93 | crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); |
94 | crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) & | 94 | crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) & |
95 | CRYPTO_TFM_REQ_MASK); | 95 | CRYPTO_TFM_REQ_MASK); |
96 | err = crypto_cipher_setkey(tfm, key, keylen); | 96 | err = crypto_cipher_setkey(tfm, key, keylen); |
97 | crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) & | 97 | crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) & |
98 | CRYPTO_TFM_RES_MASK); | 98 | CRYPTO_TFM_RES_MASK); |
99 | 99 | ||
100 | out: | 100 | out: |
101 | return err; | 101 | return err; |
102 | } | 102 | } |
103 | 103 | ||
104 | static int crypto_ccm_setauthsize(struct crypto_aead *tfm, | 104 | static int crypto_ccm_setauthsize(struct crypto_aead *tfm, |
105 | unsigned int authsize) | 105 | unsigned int authsize) |
106 | { | 106 | { |
107 | switch (authsize) { | 107 | switch (authsize) { |
108 | case 4: | 108 | case 4: |
109 | case 6: | 109 | case 6: |
110 | case 8: | 110 | case 8: |
111 | case 10: | 111 | case 10: |
112 | case 12: | 112 | case 12: |
113 | case 14: | 113 | case 14: |
114 | case 16: | 114 | case 16: |
115 | break; | 115 | break; |
116 | default: | 116 | default: |
117 | return -EINVAL; | 117 | return -EINVAL; |
118 | } | 118 | } |
119 | 119 | ||
120 | return 0; | 120 | return 0; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int format_input(u8 *info, struct aead_request *req, | 123 | static int format_input(u8 *info, struct aead_request *req, |
124 | unsigned int cryptlen) | 124 | unsigned int cryptlen) |
125 | { | 125 | { |
126 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 126 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
127 | unsigned int lp = req->iv[0]; | 127 | unsigned int lp = req->iv[0]; |
128 | unsigned int l = lp + 1; | 128 | unsigned int l = lp + 1; |
129 | unsigned int m; | 129 | unsigned int m; |
130 | 130 | ||
131 | m = crypto_aead_authsize(aead); | 131 | m = crypto_aead_authsize(aead); |
132 | 132 | ||
133 | memcpy(info, req->iv, 16); | 133 | memcpy(info, req->iv, 16); |
134 | 134 | ||
135 | /* format control info per RFC 3610 and | 135 | /* format control info per RFC 3610 and |
136 | * NIST Special Publication 800-38C | 136 | * NIST Special Publication 800-38C |
137 | */ | 137 | */ |
138 | *info |= (8 * ((m - 2) / 2)); | 138 | *info |= (8 * ((m - 2) / 2)); |
139 | if (req->assoclen) | 139 | if (req->assoclen) |
140 | *info |= 64; | 140 | *info |= 64; |
141 | 141 | ||
142 | return set_msg_len(info + 16 - l, cryptlen, l); | 142 | return set_msg_len(info + 16 - l, cryptlen, l); |
143 | } | 143 | } |
144 | 144 | ||
145 | static int format_adata(u8 *adata, unsigned int a) | 145 | static int format_adata(u8 *adata, unsigned int a) |
146 | { | 146 | { |
147 | int len = 0; | 147 | int len = 0; |
148 | 148 | ||
149 | /* add control info for associated data | 149 | /* add control info for associated data |
150 | * RFC 3610 and NIST Special Publication 800-38C | 150 | * RFC 3610 and NIST Special Publication 800-38C |
151 | */ | 151 | */ |
152 | if (a < 65280) { | 152 | if (a < 65280) { |
153 | *(__be16 *)adata = cpu_to_be16(a); | 153 | *(__be16 *)adata = cpu_to_be16(a); |
154 | len = 2; | 154 | len = 2; |
155 | } else { | 155 | } else { |
156 | *(__be16 *)adata = cpu_to_be16(0xfffe); | 156 | *(__be16 *)adata = cpu_to_be16(0xfffe); |
157 | *(__be32 *)&adata[2] = cpu_to_be32(a); | 157 | *(__be32 *)&adata[2] = cpu_to_be32(a); |
158 | len = 6; | 158 | len = 6; |
159 | } | 159 | } |
160 | 160 | ||
161 | return len; | 161 | return len; |
162 | } | 162 | } |
163 | 163 | ||
164 | static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n, | 164 | static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n, |
165 | struct crypto_ccm_req_priv_ctx *pctx) | 165 | struct crypto_ccm_req_priv_ctx *pctx) |
166 | { | 166 | { |
167 | unsigned int bs = 16; | 167 | unsigned int bs = 16; |
168 | u8 *odata = pctx->odata; | 168 | u8 *odata = pctx->odata; |
169 | u8 *idata = pctx->idata; | 169 | u8 *idata = pctx->idata; |
170 | int datalen, getlen; | 170 | int datalen, getlen; |
171 | 171 | ||
172 | datalen = n; | 172 | datalen = n; |
173 | 173 | ||
174 | /* first time in here, block may be partially filled. */ | 174 | /* first time in here, block may be partially filled. */ |
175 | getlen = bs - pctx->ilen; | 175 | getlen = bs - pctx->ilen; |
176 | if (datalen >= getlen) { | 176 | if (datalen >= getlen) { |
177 | memcpy(idata + pctx->ilen, data, getlen); | 177 | memcpy(idata + pctx->ilen, data, getlen); |
178 | crypto_xor(odata, idata, bs); | 178 | crypto_xor(odata, idata, bs); |
179 | crypto_cipher_encrypt_one(tfm, odata, odata); | 179 | crypto_cipher_encrypt_one(tfm, odata, odata); |
180 | datalen -= getlen; | 180 | datalen -= getlen; |
181 | data += getlen; | 181 | data += getlen; |
182 | pctx->ilen = 0; | 182 | pctx->ilen = 0; |
183 | } | 183 | } |
184 | 184 | ||
185 | /* now encrypt rest of data */ | 185 | /* now encrypt rest of data */ |
186 | while (datalen >= bs) { | 186 | while (datalen >= bs) { |
187 | crypto_xor(odata, data, bs); | 187 | crypto_xor(odata, data, bs); |
188 | crypto_cipher_encrypt_one(tfm, odata, odata); | 188 | crypto_cipher_encrypt_one(tfm, odata, odata); |
189 | 189 | ||
190 | datalen -= bs; | 190 | datalen -= bs; |
191 | data += bs; | 191 | data += bs; |
192 | } | 192 | } |
193 | 193 | ||
194 | /* check and see if there's leftover data that wasn't | 194 | /* check and see if there's leftover data that wasn't |
195 | * enough to fill a block. | 195 | * enough to fill a block. |
196 | */ | 196 | */ |
197 | if (datalen) { | 197 | if (datalen) { |
198 | memcpy(idata + pctx->ilen, data, datalen); | 198 | memcpy(idata + pctx->ilen, data, datalen); |
199 | pctx->ilen += datalen; | 199 | pctx->ilen += datalen; |
200 | } | 200 | } |
201 | } | 201 | } |
202 | 202 | ||
203 | static void get_data_to_compute(struct crypto_cipher *tfm, | 203 | static void get_data_to_compute(struct crypto_cipher *tfm, |
204 | struct crypto_ccm_req_priv_ctx *pctx, | 204 | struct crypto_ccm_req_priv_ctx *pctx, |
205 | struct scatterlist *sg, unsigned int len) | 205 | struct scatterlist *sg, unsigned int len) |
206 | { | 206 | { |
207 | struct scatter_walk walk; | 207 | struct scatter_walk walk; |
208 | u8 *data_src; | 208 | u8 *data_src; |
209 | int n; | 209 | int n; |
210 | 210 | ||
211 | scatterwalk_start(&walk, sg); | 211 | scatterwalk_start(&walk, sg); |
212 | 212 | ||
213 | while (len) { | 213 | while (len) { |
214 | n = scatterwalk_clamp(&walk, len); | 214 | n = scatterwalk_clamp(&walk, len); |
215 | if (!n) { | 215 | if (!n) { |
216 | scatterwalk_start(&walk, sg_next(walk.sg)); | 216 | scatterwalk_start(&walk, sg_next(walk.sg)); |
217 | n = scatterwalk_clamp(&walk, len); | 217 | n = scatterwalk_clamp(&walk, len); |
218 | } | 218 | } |
219 | data_src = scatterwalk_map(&walk); | 219 | data_src = scatterwalk_map(&walk); |
220 | 220 | ||
221 | compute_mac(tfm, data_src, n, pctx); | 221 | compute_mac(tfm, data_src, n, pctx); |
222 | len -= n; | 222 | len -= n; |
223 | 223 | ||
224 | scatterwalk_unmap(data_src); | 224 | scatterwalk_unmap(data_src); |
225 | scatterwalk_advance(&walk, n); | 225 | scatterwalk_advance(&walk, n); |
226 | scatterwalk_done(&walk, 0, len); | 226 | scatterwalk_done(&walk, 0, len); |
227 | if (len) | 227 | if (len) |
228 | crypto_yield(pctx->flags); | 228 | crypto_yield(pctx->flags); |
229 | } | 229 | } |
230 | 230 | ||
231 | /* any leftover needs padding and then encrypted */ | 231 | /* any leftover needs padding and then encrypted */ |
232 | if (pctx->ilen) { | 232 | if (pctx->ilen) { |
233 | int padlen; | 233 | int padlen; |
234 | u8 *odata = pctx->odata; | 234 | u8 *odata = pctx->odata; |
235 | u8 *idata = pctx->idata; | 235 | u8 *idata = pctx->idata; |
236 | 236 | ||
237 | padlen = 16 - pctx->ilen; | 237 | padlen = 16 - pctx->ilen; |
238 | memset(idata + pctx->ilen, 0, padlen); | 238 | memset(idata + pctx->ilen, 0, padlen); |
239 | crypto_xor(odata, idata, 16); | 239 | crypto_xor(odata, idata, 16); |
240 | crypto_cipher_encrypt_one(tfm, odata, odata); | 240 | crypto_cipher_encrypt_one(tfm, odata, odata); |
241 | pctx->ilen = 0; | 241 | pctx->ilen = 0; |
242 | } | 242 | } |
243 | } | 243 | } |
244 | 244 | ||
245 | static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, | 245 | static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, |
246 | unsigned int cryptlen) | 246 | unsigned int cryptlen) |
247 | { | 247 | { |
248 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 248 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
249 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); | 249 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); |
250 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); | 250 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); |
251 | struct crypto_cipher *cipher = ctx->cipher; | 251 | struct crypto_cipher *cipher = ctx->cipher; |
252 | unsigned int assoclen = req->assoclen; | 252 | unsigned int assoclen = req->assoclen; |
253 | u8 *odata = pctx->odata; | 253 | u8 *odata = pctx->odata; |
254 | u8 *idata = pctx->idata; | 254 | u8 *idata = pctx->idata; |
255 | int err; | 255 | int err; |
256 | 256 | ||
257 | /* format control data for input */ | 257 | /* format control data for input */ |
258 | err = format_input(odata, req, cryptlen); | 258 | err = format_input(odata, req, cryptlen); |
259 | if (err) | 259 | if (err) |
260 | goto out; | 260 | goto out; |
261 | 261 | ||
262 | /* encrypt first block to use as start in computing mac */ | 262 | /* encrypt first block to use as start in computing mac */ |
263 | crypto_cipher_encrypt_one(cipher, odata, odata); | 263 | crypto_cipher_encrypt_one(cipher, odata, odata); |
264 | 264 | ||
265 | /* format associated data and compute into mac */ | 265 | /* format associated data and compute into mac */ |
266 | if (assoclen) { | 266 | if (assoclen) { |
267 | pctx->ilen = format_adata(idata, assoclen); | 267 | pctx->ilen = format_adata(idata, assoclen); |
268 | get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); | 268 | get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); |
269 | } else { | 269 | } else { |
270 | pctx->ilen = 0; | 270 | pctx->ilen = 0; |
271 | } | 271 | } |
272 | 272 | ||
273 | /* compute plaintext into mac */ | 273 | /* compute plaintext into mac */ |
274 | if (cryptlen) | 274 | if (cryptlen) |
275 | get_data_to_compute(cipher, pctx, plain, cryptlen); | 275 | get_data_to_compute(cipher, pctx, plain, cryptlen); |
276 | 276 | ||
277 | out: | 277 | out: |
278 | return err; | 278 | return err; |
279 | } | 279 | } |
280 | 280 | ||
281 | static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err) | 281 | static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err) |
282 | { | 282 | { |
283 | struct aead_request *req = areq->data; | 283 | struct aead_request *req = areq->data; |
284 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 284 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
285 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); | 285 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); |
286 | u8 *odata = pctx->odata; | 286 | u8 *odata = pctx->odata; |
287 | 287 | ||
288 | if (!err) | 288 | if (!err) |
289 | scatterwalk_map_and_copy(odata, req->dst, req->cryptlen, | 289 | scatterwalk_map_and_copy(odata, req->dst, req->cryptlen, |
290 | crypto_aead_authsize(aead), 1); | 290 | crypto_aead_authsize(aead), 1); |
291 | aead_request_complete(req, err); | 291 | aead_request_complete(req, err); |
292 | } | 292 | } |
293 | 293 | ||
294 | static inline int crypto_ccm_check_iv(const u8 *iv) | 294 | static inline int crypto_ccm_check_iv(const u8 *iv) |
295 | { | 295 | { |
296 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ | 296 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ |
297 | if (1 > iv[0] || iv[0] > 7) | 297 | if (1 > iv[0] || iv[0] > 7) |
298 | return -EINVAL; | 298 | return -EINVAL; |
299 | 299 | ||
300 | return 0; | 300 | return 0; |
301 | } | 301 | } |
302 | 302 | ||
303 | static int crypto_ccm_encrypt(struct aead_request *req) | 303 | static int crypto_ccm_encrypt(struct aead_request *req) |
304 | { | 304 | { |
305 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 305 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
306 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); | 306 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); |
307 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); | 307 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); |
308 | struct ablkcipher_request *abreq = &pctx->abreq; | 308 | struct ablkcipher_request *abreq = &pctx->abreq; |
309 | struct scatterlist *dst; | 309 | struct scatterlist *dst; |
310 | unsigned int cryptlen = req->cryptlen; | 310 | unsigned int cryptlen = req->cryptlen; |
311 | u8 *odata = pctx->odata; | 311 | u8 *odata = pctx->odata; |
312 | u8 *iv = req->iv; | 312 | u8 *iv = req->iv; |
313 | int err; | 313 | int err; |
314 | 314 | ||
315 | err = crypto_ccm_check_iv(iv); | 315 | err = crypto_ccm_check_iv(iv); |
316 | if (err) | 316 | if (err) |
317 | return err; | 317 | return err; |
318 | 318 | ||
319 | pctx->flags = aead_request_flags(req); | 319 | pctx->flags = aead_request_flags(req); |
320 | 320 | ||
321 | err = crypto_ccm_auth(req, req->src, cryptlen); | 321 | err = crypto_ccm_auth(req, req->src, cryptlen); |
322 | if (err) | 322 | if (err) |
323 | return err; | 323 | return err; |
324 | 324 | ||
325 | /* Note: rfc 3610 and NIST 800-38C require counter of | 325 | /* Note: rfc 3610 and NIST 800-38C require counter of |
326 | * zero to encrypt auth tag. | 326 | * zero to encrypt auth tag. |
327 | */ | 327 | */ |
328 | memset(iv + 15 - iv[0], 0, iv[0] + 1); | 328 | memset(iv + 15 - iv[0], 0, iv[0] + 1); |
329 | 329 | ||
330 | sg_init_table(pctx->src, 2); | 330 | sg_init_table(pctx->src, 2); |
331 | sg_set_buf(pctx->src, odata, 16); | 331 | sg_set_buf(pctx->src, odata, 16); |
332 | scatterwalk_sg_chain(pctx->src, 2, req->src); | 332 | scatterwalk_sg_chain(pctx->src, 2, req->src); |
333 | 333 | ||
334 | dst = pctx->src; | 334 | dst = pctx->src; |
335 | if (req->src != req->dst) { | 335 | if (req->src != req->dst) { |
336 | sg_init_table(pctx->dst, 2); | 336 | sg_init_table(pctx->dst, 2); |
337 | sg_set_buf(pctx->dst, odata, 16); | 337 | sg_set_buf(pctx->dst, odata, 16); |
338 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); | 338 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); |
339 | dst = pctx->dst; | 339 | dst = pctx->dst; |
340 | } | 340 | } |
341 | 341 | ||
342 | ablkcipher_request_set_tfm(abreq, ctx->ctr); | 342 | ablkcipher_request_set_tfm(abreq, ctx->ctr); |
343 | ablkcipher_request_set_callback(abreq, pctx->flags, | 343 | ablkcipher_request_set_callback(abreq, pctx->flags, |
344 | crypto_ccm_encrypt_done, req); | 344 | crypto_ccm_encrypt_done, req); |
345 | ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); | 345 | ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); |
346 | err = crypto_ablkcipher_encrypt(abreq); | 346 | err = crypto_ablkcipher_encrypt(abreq); |
347 | if (err) | 347 | if (err) |
348 | return err; | 348 | return err; |
349 | 349 | ||
350 | /* copy authtag to end of dst */ | 350 | /* copy authtag to end of dst */ |
351 | scatterwalk_map_and_copy(odata, req->dst, cryptlen, | 351 | scatterwalk_map_and_copy(odata, req->dst, cryptlen, |
352 | crypto_aead_authsize(aead), 1); | 352 | crypto_aead_authsize(aead), 1); |
353 | return err; | 353 | return err; |
354 | } | 354 | } |
355 | 355 | ||
356 | static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, | 356 | static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, |
357 | int err) | 357 | int err) |
358 | { | 358 | { |
359 | struct aead_request *req = areq->data; | 359 | struct aead_request *req = areq->data; |
360 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); | 360 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); |
361 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 361 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
362 | unsigned int authsize = crypto_aead_authsize(aead); | 362 | unsigned int authsize = crypto_aead_authsize(aead); |
363 | unsigned int cryptlen = req->cryptlen - authsize; | 363 | unsigned int cryptlen = req->cryptlen - authsize; |
364 | 364 | ||
365 | if (!err) { | 365 | if (!err) { |
366 | err = crypto_ccm_auth(req, req->dst, cryptlen); | 366 | err = crypto_ccm_auth(req, req->dst, cryptlen); |
367 | if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) | 367 | if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) |
368 | err = -EBADMSG; | 368 | err = -EBADMSG; |
369 | } | 369 | } |
370 | aead_request_complete(req, err); | 370 | aead_request_complete(req, err); |
371 | } | 371 | } |
372 | 372 | ||
373 | static int crypto_ccm_decrypt(struct aead_request *req) | 373 | static int crypto_ccm_decrypt(struct aead_request *req) |
374 | { | 374 | { |
375 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 375 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
376 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); | 376 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); |
377 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); | 377 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); |
378 | struct ablkcipher_request *abreq = &pctx->abreq; | 378 | struct ablkcipher_request *abreq = &pctx->abreq; |
379 | struct scatterlist *dst; | 379 | struct scatterlist *dst; |
380 | unsigned int authsize = crypto_aead_authsize(aead); | 380 | unsigned int authsize = crypto_aead_authsize(aead); |
381 | unsigned int cryptlen = req->cryptlen; | 381 | unsigned int cryptlen = req->cryptlen; |
382 | u8 *authtag = pctx->auth_tag; | 382 | u8 *authtag = pctx->auth_tag; |
383 | u8 *odata = pctx->odata; | 383 | u8 *odata = pctx->odata; |
384 | u8 *iv = req->iv; | 384 | u8 *iv = req->iv; |
385 | int err; | 385 | int err; |
386 | 386 | ||
387 | if (cryptlen < authsize) | 387 | if (cryptlen < authsize) |
388 | return -EINVAL; | 388 | return -EINVAL; |
389 | cryptlen -= authsize; | 389 | cryptlen -= authsize; |
390 | 390 | ||
391 | err = crypto_ccm_check_iv(iv); | 391 | err = crypto_ccm_check_iv(iv); |
392 | if (err) | 392 | if (err) |
393 | return err; | 393 | return err; |
394 | 394 | ||
395 | pctx->flags = aead_request_flags(req); | 395 | pctx->flags = aead_request_flags(req); |
396 | 396 | ||
397 | scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); | 397 | scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); |
398 | 398 | ||
399 | memset(iv + 15 - iv[0], 0, iv[0] + 1); | 399 | memset(iv + 15 - iv[0], 0, iv[0] + 1); |
400 | 400 | ||
401 | sg_init_table(pctx->src, 2); | 401 | sg_init_table(pctx->src, 2); |
402 | sg_set_buf(pctx->src, authtag, 16); | 402 | sg_set_buf(pctx->src, authtag, 16); |
403 | scatterwalk_sg_chain(pctx->src, 2, req->src); | 403 | scatterwalk_sg_chain(pctx->src, 2, req->src); |
404 | 404 | ||
405 | dst = pctx->src; | 405 | dst = pctx->src; |
406 | if (req->src != req->dst) { | 406 | if (req->src != req->dst) { |
407 | sg_init_table(pctx->dst, 2); | 407 | sg_init_table(pctx->dst, 2); |
408 | sg_set_buf(pctx->dst, authtag, 16); | 408 | sg_set_buf(pctx->dst, authtag, 16); |
409 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); | 409 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); |
410 | dst = pctx->dst; | 410 | dst = pctx->dst; |
411 | } | 411 | } |
412 | 412 | ||
413 | ablkcipher_request_set_tfm(abreq, ctx->ctr); | 413 | ablkcipher_request_set_tfm(abreq, ctx->ctr); |
414 | ablkcipher_request_set_callback(abreq, pctx->flags, | 414 | ablkcipher_request_set_callback(abreq, pctx->flags, |
415 | crypto_ccm_decrypt_done, req); | 415 | crypto_ccm_decrypt_done, req); |
416 | ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); | 416 | ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); |
417 | err = crypto_ablkcipher_decrypt(abreq); | 417 | err = crypto_ablkcipher_decrypt(abreq); |
418 | if (err) | 418 | if (err) |
419 | return err; | 419 | return err; |
420 | 420 | ||
421 | err = crypto_ccm_auth(req, req->dst, cryptlen); | 421 | err = crypto_ccm_auth(req, req->dst, cryptlen); |
422 | if (err) | 422 | if (err) |
423 | return err; | 423 | return err; |
424 | 424 | ||
425 | /* verify */ | 425 | /* verify */ |
426 | if (crypto_memneq(authtag, odata, authsize)) | 426 | if (crypto_memneq(authtag, odata, authsize)) |
427 | return -EBADMSG; | 427 | return -EBADMSG; |
428 | 428 | ||
429 | return err; | 429 | return err; |
430 | } | 430 | } |
431 | 431 | ||
432 | static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) | 432 | static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) |
433 | { | 433 | { |
434 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 434 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
435 | struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst); | 435 | struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst); |
436 | struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); | 436 | struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); |
437 | struct crypto_cipher *cipher; | 437 | struct crypto_cipher *cipher; |
438 | struct crypto_ablkcipher *ctr; | 438 | struct crypto_ablkcipher *ctr; |
439 | unsigned long align; | 439 | unsigned long align; |
440 | int err; | 440 | int err; |
441 | 441 | ||
442 | cipher = crypto_spawn_cipher(&ictx->cipher); | 442 | cipher = crypto_spawn_cipher(&ictx->cipher); |
443 | if (IS_ERR(cipher)) | 443 | if (IS_ERR(cipher)) |
444 | return PTR_ERR(cipher); | 444 | return PTR_ERR(cipher); |
445 | 445 | ||
446 | ctr = crypto_spawn_skcipher(&ictx->ctr); | 446 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
447 | err = PTR_ERR(ctr); | 447 | err = PTR_ERR(ctr); |
448 | if (IS_ERR(ctr)) | 448 | if (IS_ERR(ctr)) |
449 | goto err_free_cipher; | 449 | goto err_free_cipher; |
450 | 450 | ||
451 | ctx->cipher = cipher; | 451 | ctx->cipher = cipher; |
452 | ctx->ctr = ctr; | 452 | ctx->ctr = ctr; |
453 | 453 | ||
454 | align = crypto_tfm_alg_alignmask(tfm); | 454 | align = crypto_tfm_alg_alignmask(tfm); |
455 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 455 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
456 | tfm->crt_aead.reqsize = align + | 456 | tfm->crt_aead.reqsize = align + |
457 | sizeof(struct crypto_ccm_req_priv_ctx) + | 457 | sizeof(struct crypto_ccm_req_priv_ctx) + |
458 | crypto_ablkcipher_reqsize(ctr); | 458 | crypto_ablkcipher_reqsize(ctr); |
459 | 459 | ||
460 | return 0; | 460 | return 0; |
461 | 461 | ||
462 | err_free_cipher: | 462 | err_free_cipher: |
463 | crypto_free_cipher(cipher); | 463 | crypto_free_cipher(cipher); |
464 | return err; | 464 | return err; |
465 | } | 465 | } |
466 | 466 | ||
467 | static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm) | 467 | static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm) |
468 | { | 468 | { |
469 | struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); | 469 | struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); |
470 | 470 | ||
471 | crypto_free_cipher(ctx->cipher); | 471 | crypto_free_cipher(ctx->cipher); |
472 | crypto_free_ablkcipher(ctx->ctr); | 472 | crypto_free_ablkcipher(ctx->ctr); |
473 | } | 473 | } |
474 | 474 | ||
475 | static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, | 475 | static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, |
476 | const char *full_name, | 476 | const char *full_name, |
477 | const char *ctr_name, | 477 | const char *ctr_name, |
478 | const char *cipher_name) | 478 | const char *cipher_name) |
479 | { | 479 | { |
480 | struct crypto_attr_type *algt; | 480 | struct crypto_attr_type *algt; |
481 | struct crypto_instance *inst; | 481 | struct crypto_instance *inst; |
482 | struct crypto_alg *ctr; | 482 | struct crypto_alg *ctr; |
483 | struct crypto_alg *cipher; | 483 | struct crypto_alg *cipher; |
484 | struct ccm_instance_ctx *ictx; | 484 | struct ccm_instance_ctx *ictx; |
485 | int err; | 485 | int err; |
486 | 486 | ||
487 | algt = crypto_get_attr_type(tb); | 487 | algt = crypto_get_attr_type(tb); |
488 | if (IS_ERR(algt)) | 488 | if (IS_ERR(algt)) |
489 | return ERR_CAST(algt); | 489 | return ERR_CAST(algt); |
490 | 490 | ||
491 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 491 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
492 | return ERR_PTR(-EINVAL); | 492 | return ERR_PTR(-EINVAL); |
493 | 493 | ||
494 | cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, | 494 | cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, |
495 | CRYPTO_ALG_TYPE_MASK); | 495 | CRYPTO_ALG_TYPE_MASK); |
496 | if (IS_ERR(cipher)) | 496 | if (IS_ERR(cipher)) |
497 | return ERR_CAST(cipher); | 497 | return ERR_CAST(cipher); |
498 | 498 | ||
499 | err = -EINVAL; | 499 | err = -EINVAL; |
500 | if (cipher->cra_blocksize != 16) | 500 | if (cipher->cra_blocksize != 16) |
501 | goto out_put_cipher; | 501 | goto out_put_cipher; |
502 | 502 | ||
503 | inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); | 503 | inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); |
504 | err = -ENOMEM; | 504 | err = -ENOMEM; |
505 | if (!inst) | 505 | if (!inst) |
506 | goto out_put_cipher; | 506 | goto out_put_cipher; |
507 | 507 | ||
508 | ictx = crypto_instance_ctx(inst); | 508 | ictx = crypto_instance_ctx(inst); |
509 | 509 | ||
510 | err = crypto_init_spawn(&ictx->cipher, cipher, inst, | 510 | err = crypto_init_spawn(&ictx->cipher, cipher, inst, |
511 | CRYPTO_ALG_TYPE_MASK); | 511 | CRYPTO_ALG_TYPE_MASK); |
512 | if (err) | 512 | if (err) |
513 | goto err_free_inst; | 513 | goto err_free_inst; |
514 | 514 | ||
515 | crypto_set_skcipher_spawn(&ictx->ctr, inst); | 515 | crypto_set_skcipher_spawn(&ictx->ctr, inst); |
516 | err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, | 516 | err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, |
517 | crypto_requires_sync(algt->type, | 517 | crypto_requires_sync(algt->type, |
518 | algt->mask)); | 518 | algt->mask)); |
519 | if (err) | 519 | if (err) |
520 | goto err_drop_cipher; | 520 | goto err_drop_cipher; |
521 | 521 | ||
522 | ctr = crypto_skcipher_spawn_alg(&ictx->ctr); | 522 | ctr = crypto_skcipher_spawn_alg(&ictx->ctr); |
523 | 523 | ||
524 | /* Not a stream cipher? */ | 524 | /* Not a stream cipher? */ |
525 | err = -EINVAL; | 525 | err = -EINVAL; |
526 | if (ctr->cra_blocksize != 1) | 526 | if (ctr->cra_blocksize != 1) |
527 | goto err_drop_ctr; | 527 | goto err_drop_ctr; |
528 | 528 | ||
529 | /* We want the real thing! */ | 529 | /* We want the real thing! */ |
530 | if (ctr->cra_ablkcipher.ivsize != 16) | 530 | if (ctr->cra_ablkcipher.ivsize != 16) |
531 | goto err_drop_ctr; | 531 | goto err_drop_ctr; |
532 | 532 | ||
533 | err = -ENAMETOOLONG; | 533 | err = -ENAMETOOLONG; |
534 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 534 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
535 | "ccm_base(%s,%s)", ctr->cra_driver_name, | 535 | "ccm_base(%s,%s)", ctr->cra_driver_name, |
536 | cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 536 | cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
537 | goto err_drop_ctr; | 537 | goto err_drop_ctr; |
538 | 538 | ||
539 | memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); | 539 | memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); |
540 | 540 | ||
541 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 541 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
542 | inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; | 542 | inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; |
543 | inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority; | 543 | inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority; |
544 | inst->alg.cra_blocksize = 1; | 544 | inst->alg.cra_blocksize = 1; |
545 | inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask | | 545 | inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask | |
546 | (__alignof__(u32) - 1); | 546 | (__alignof__(u32) - 1); |
547 | inst->alg.cra_type = &crypto_aead_type; | 547 | inst->alg.cra_type = &crypto_aead_type; |
548 | inst->alg.cra_aead.ivsize = 16; | 548 | inst->alg.cra_aead.ivsize = 16; |
549 | inst->alg.cra_aead.maxauthsize = 16; | 549 | inst->alg.cra_aead.maxauthsize = 16; |
550 | inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx); | 550 | inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx); |
551 | inst->alg.cra_init = crypto_ccm_init_tfm; | 551 | inst->alg.cra_init = crypto_ccm_init_tfm; |
552 | inst->alg.cra_exit = crypto_ccm_exit_tfm; | 552 | inst->alg.cra_exit = crypto_ccm_exit_tfm; |
553 | inst->alg.cra_aead.setkey = crypto_ccm_setkey; | 553 | inst->alg.cra_aead.setkey = crypto_ccm_setkey; |
554 | inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize; | 554 | inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize; |
555 | inst->alg.cra_aead.encrypt = crypto_ccm_encrypt; | 555 | inst->alg.cra_aead.encrypt = crypto_ccm_encrypt; |
556 | inst->alg.cra_aead.decrypt = crypto_ccm_decrypt; | 556 | inst->alg.cra_aead.decrypt = crypto_ccm_decrypt; |
557 | 557 | ||
558 | out: | 558 | out: |
559 | crypto_mod_put(cipher); | 559 | crypto_mod_put(cipher); |
560 | return inst; | 560 | return inst; |
561 | 561 | ||
562 | err_drop_ctr: | 562 | err_drop_ctr: |
563 | crypto_drop_skcipher(&ictx->ctr); | 563 | crypto_drop_skcipher(&ictx->ctr); |
564 | err_drop_cipher: | 564 | err_drop_cipher: |
565 | crypto_drop_spawn(&ictx->cipher); | 565 | crypto_drop_spawn(&ictx->cipher); |
566 | err_free_inst: | 566 | err_free_inst: |
567 | kfree(inst); | 567 | kfree(inst); |
568 | out_put_cipher: | 568 | out_put_cipher: |
569 | inst = ERR_PTR(err); | 569 | inst = ERR_PTR(err); |
570 | goto out; | 570 | goto out; |
571 | } | 571 | } |
572 | 572 | ||
573 | static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb) | 573 | static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb) |
574 | { | 574 | { |
575 | const char *cipher_name; | 575 | const char *cipher_name; |
576 | char ctr_name[CRYPTO_MAX_ALG_NAME]; | 576 | char ctr_name[CRYPTO_MAX_ALG_NAME]; |
577 | char full_name[CRYPTO_MAX_ALG_NAME]; | 577 | char full_name[CRYPTO_MAX_ALG_NAME]; |
578 | 578 | ||
579 | cipher_name = crypto_attr_alg_name(tb[1]); | 579 | cipher_name = crypto_attr_alg_name(tb[1]); |
580 | if (IS_ERR(cipher_name)) | 580 | if (IS_ERR(cipher_name)) |
581 | return ERR_CAST(cipher_name); | 581 | return ERR_CAST(cipher_name); |
582 | 582 | ||
583 | if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", | 583 | if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", |
584 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | 584 | cipher_name) >= CRYPTO_MAX_ALG_NAME) |
585 | return ERR_PTR(-ENAMETOOLONG); | 585 | return ERR_PTR(-ENAMETOOLONG); |
586 | 586 | ||
587 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= | 587 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= |
588 | CRYPTO_MAX_ALG_NAME) | 588 | CRYPTO_MAX_ALG_NAME) |
589 | return ERR_PTR(-ENAMETOOLONG); | 589 | return ERR_PTR(-ENAMETOOLONG); |
590 | 590 | ||
591 | return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); | 591 | return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); |
592 | } | 592 | } |
593 | 593 | ||
594 | static void crypto_ccm_free(struct crypto_instance *inst) | 594 | static void crypto_ccm_free(struct crypto_instance *inst) |
595 | { | 595 | { |
596 | struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst); | 596 | struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst); |
597 | 597 | ||
598 | crypto_drop_spawn(&ctx->cipher); | 598 | crypto_drop_spawn(&ctx->cipher); |
599 | crypto_drop_skcipher(&ctx->ctr); | 599 | crypto_drop_skcipher(&ctx->ctr); |
600 | kfree(inst); | 600 | kfree(inst); |
601 | } | 601 | } |
602 | 602 | ||
603 | static struct crypto_template crypto_ccm_tmpl = { | 603 | static struct crypto_template crypto_ccm_tmpl = { |
604 | .name = "ccm", | 604 | .name = "ccm", |
605 | .alloc = crypto_ccm_alloc, | 605 | .alloc = crypto_ccm_alloc, |
606 | .free = crypto_ccm_free, | 606 | .free = crypto_ccm_free, |
607 | .module = THIS_MODULE, | 607 | .module = THIS_MODULE, |
608 | }; | 608 | }; |
609 | 609 | ||
610 | static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb) | 610 | static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb) |
611 | { | 611 | { |
612 | const char *ctr_name; | 612 | const char *ctr_name; |
613 | const char *cipher_name; | 613 | const char *cipher_name; |
614 | char full_name[CRYPTO_MAX_ALG_NAME]; | 614 | char full_name[CRYPTO_MAX_ALG_NAME]; |
615 | 615 | ||
616 | ctr_name = crypto_attr_alg_name(tb[1]); | 616 | ctr_name = crypto_attr_alg_name(tb[1]); |
617 | if (IS_ERR(ctr_name)) | 617 | if (IS_ERR(ctr_name)) |
618 | return ERR_CAST(ctr_name); | 618 | return ERR_CAST(ctr_name); |
619 | 619 | ||
620 | cipher_name = crypto_attr_alg_name(tb[2]); | 620 | cipher_name = crypto_attr_alg_name(tb[2]); |
621 | if (IS_ERR(cipher_name)) | 621 | if (IS_ERR(cipher_name)) |
622 | return ERR_CAST(cipher_name); | 622 | return ERR_CAST(cipher_name); |
623 | 623 | ||
624 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", | 624 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", |
625 | ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) | 625 | ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) |
626 | return ERR_PTR(-ENAMETOOLONG); | 626 | return ERR_PTR(-ENAMETOOLONG); |
627 | 627 | ||
628 | return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); | 628 | return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); |
629 | } | 629 | } |
630 | 630 | ||
631 | static struct crypto_template crypto_ccm_base_tmpl = { | 631 | static struct crypto_template crypto_ccm_base_tmpl = { |
632 | .name = "ccm_base", | 632 | .name = "ccm_base", |
633 | .alloc = crypto_ccm_base_alloc, | 633 | .alloc = crypto_ccm_base_alloc, |
634 | .free = crypto_ccm_free, | 634 | .free = crypto_ccm_free, |
635 | .module = THIS_MODULE, | 635 | .module = THIS_MODULE, |
636 | }; | 636 | }; |
637 | 637 | ||
638 | static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, | 638 | static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, |
639 | unsigned int keylen) | 639 | unsigned int keylen) |
640 | { | 640 | { |
641 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); | 641 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); |
642 | struct crypto_aead *child = ctx->child; | 642 | struct crypto_aead *child = ctx->child; |
643 | int err; | 643 | int err; |
644 | 644 | ||
645 | if (keylen < 3) | 645 | if (keylen < 3) |
646 | return -EINVAL; | 646 | return -EINVAL; |
647 | 647 | ||
648 | keylen -= 3; | 648 | keylen -= 3; |
649 | memcpy(ctx->nonce, key + keylen, 3); | 649 | memcpy(ctx->nonce, key + keylen, 3); |
650 | 650 | ||
651 | crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 651 | crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
652 | crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & | 652 | crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & |
653 | CRYPTO_TFM_REQ_MASK); | 653 | CRYPTO_TFM_REQ_MASK); |
654 | err = crypto_aead_setkey(child, key, keylen); | 654 | err = crypto_aead_setkey(child, key, keylen); |
655 | crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & | 655 | crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & |
656 | CRYPTO_TFM_RES_MASK); | 656 | CRYPTO_TFM_RES_MASK); |
657 | 657 | ||
658 | return err; | 658 | return err; |
659 | } | 659 | } |
660 | 660 | ||
661 | static int crypto_rfc4309_setauthsize(struct crypto_aead *parent, | 661 | static int crypto_rfc4309_setauthsize(struct crypto_aead *parent, |
662 | unsigned int authsize) | 662 | unsigned int authsize) |
663 | { | 663 | { |
664 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); | 664 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); |
665 | 665 | ||
666 | switch (authsize) { | 666 | switch (authsize) { |
667 | case 8: | 667 | case 8: |
668 | case 12: | 668 | case 12: |
669 | case 16: | 669 | case 16: |
670 | break; | 670 | break; |
671 | default: | 671 | default: |
672 | return -EINVAL; | 672 | return -EINVAL; |
673 | } | 673 | } |
674 | 674 | ||
675 | return crypto_aead_setauthsize(ctx->child, authsize); | 675 | return crypto_aead_setauthsize(ctx->child, authsize); |
676 | } | 676 | } |
677 | 677 | ||
678 | static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) | 678 | static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) |
679 | { | 679 | { |
680 | struct aead_request *subreq = aead_request_ctx(req); | 680 | struct aead_request *subreq = aead_request_ctx(req); |
681 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 681 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
682 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); | 682 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); |
683 | struct crypto_aead *child = ctx->child; | 683 | struct crypto_aead *child = ctx->child; |
684 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), | 684 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), |
685 | crypto_aead_alignmask(child) + 1); | 685 | crypto_aead_alignmask(child) + 1); |
686 | 686 | ||
687 | /* L' */ | 687 | /* L' */ |
688 | iv[0] = 3; | 688 | iv[0] = 3; |
689 | 689 | ||
690 | memcpy(iv + 1, ctx->nonce, 3); | 690 | memcpy(iv + 1, ctx->nonce, 3); |
691 | memcpy(iv + 4, req->iv, 8); | 691 | memcpy(iv + 4, req->iv, 8); |
692 | 692 | ||
693 | aead_request_set_tfm(subreq, child); | 693 | aead_request_set_tfm(subreq, child); |
694 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, | 694 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, |
695 | req->base.data); | 695 | req->base.data); |
696 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); | 696 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); |
697 | aead_request_set_assoc(subreq, req->assoc, req->assoclen); | 697 | aead_request_set_assoc(subreq, req->assoc, req->assoclen); |
698 | 698 | ||
699 | return subreq; | 699 | return subreq; |
700 | } | 700 | } |
701 | 701 | ||
702 | static int crypto_rfc4309_encrypt(struct aead_request *req) | 702 | static int crypto_rfc4309_encrypt(struct aead_request *req) |
703 | { | 703 | { |
704 | req = crypto_rfc4309_crypt(req); | 704 | req = crypto_rfc4309_crypt(req); |
705 | 705 | ||
706 | return crypto_aead_encrypt(req); | 706 | return crypto_aead_encrypt(req); |
707 | } | 707 | } |
708 | 708 | ||
709 | static int crypto_rfc4309_decrypt(struct aead_request *req) | 709 | static int crypto_rfc4309_decrypt(struct aead_request *req) |
710 | { | 710 | { |
711 | req = crypto_rfc4309_crypt(req); | 711 | req = crypto_rfc4309_crypt(req); |
712 | 712 | ||
713 | return crypto_aead_decrypt(req); | 713 | return crypto_aead_decrypt(req); |
714 | } | 714 | } |
715 | 715 | ||
716 | static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) | 716 | static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) |
717 | { | 717 | { |
718 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 718 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
719 | struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); | 719 | struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); |
720 | struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); | 720 | struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); |
721 | struct crypto_aead *aead; | 721 | struct crypto_aead *aead; |
722 | unsigned long align; | 722 | unsigned long align; |
723 | 723 | ||
724 | aead = crypto_spawn_aead(spawn); | 724 | aead = crypto_spawn_aead(spawn); |
725 | if (IS_ERR(aead)) | 725 | if (IS_ERR(aead)) |
726 | return PTR_ERR(aead); | 726 | return PTR_ERR(aead); |
727 | 727 | ||
728 | ctx->child = aead; | 728 | ctx->child = aead; |
729 | 729 | ||
730 | align = crypto_aead_alignmask(aead); | 730 | align = crypto_aead_alignmask(aead); |
731 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 731 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
732 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | 732 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + |
733 | ALIGN(crypto_aead_reqsize(aead), | 733 | ALIGN(crypto_aead_reqsize(aead), |
734 | crypto_tfm_ctx_alignment()) + | 734 | crypto_tfm_ctx_alignment()) + |
735 | align + 16; | 735 | align + 16; |
736 | 736 | ||
737 | return 0; | 737 | return 0; |
738 | } | 738 | } |
739 | 739 | ||
740 | static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm) | 740 | static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm) |
741 | { | 741 | { |
742 | struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); | 742 | struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); |
743 | 743 | ||
744 | crypto_free_aead(ctx->child); | 744 | crypto_free_aead(ctx->child); |
745 | } | 745 | } |
746 | 746 | ||
747 | static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb) | 747 | static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb) |
748 | { | 748 | { |
749 | struct crypto_attr_type *algt; | 749 | struct crypto_attr_type *algt; |
750 | struct crypto_instance *inst; | 750 | struct crypto_instance *inst; |
751 | struct crypto_aead_spawn *spawn; | 751 | struct crypto_aead_spawn *spawn; |
752 | struct crypto_alg *alg; | 752 | struct crypto_alg *alg; |
753 | const char *ccm_name; | 753 | const char *ccm_name; |
754 | int err; | 754 | int err; |
755 | 755 | ||
756 | algt = crypto_get_attr_type(tb); | 756 | algt = crypto_get_attr_type(tb); |
757 | if (IS_ERR(algt)) | 757 | if (IS_ERR(algt)) |
758 | return ERR_CAST(algt); | 758 | return ERR_CAST(algt); |
759 | 759 | ||
760 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 760 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
761 | return ERR_PTR(-EINVAL); | 761 | return ERR_PTR(-EINVAL); |
762 | 762 | ||
763 | ccm_name = crypto_attr_alg_name(tb[1]); | 763 | ccm_name = crypto_attr_alg_name(tb[1]); |
764 | if (IS_ERR(ccm_name)) | 764 | if (IS_ERR(ccm_name)) |
765 | return ERR_CAST(ccm_name); | 765 | return ERR_CAST(ccm_name); |
766 | 766 | ||
767 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | 767 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
768 | if (!inst) | 768 | if (!inst) |
769 | return ERR_PTR(-ENOMEM); | 769 | return ERR_PTR(-ENOMEM); |
770 | 770 | ||
771 | spawn = crypto_instance_ctx(inst); | 771 | spawn = crypto_instance_ctx(inst); |
772 | crypto_set_aead_spawn(spawn, inst); | 772 | crypto_set_aead_spawn(spawn, inst); |
773 | err = crypto_grab_aead(spawn, ccm_name, 0, | 773 | err = crypto_grab_aead(spawn, ccm_name, 0, |
774 | crypto_requires_sync(algt->type, algt->mask)); | 774 | crypto_requires_sync(algt->type, algt->mask)); |
775 | if (err) | 775 | if (err) |
776 | goto out_free_inst; | 776 | goto out_free_inst; |
777 | 777 | ||
778 | alg = crypto_aead_spawn_alg(spawn); | 778 | alg = crypto_aead_spawn_alg(spawn); |
779 | 779 | ||
780 | err = -EINVAL; | 780 | err = -EINVAL; |
781 | 781 | ||
782 | /* We only support 16-byte blocks. */ | 782 | /* We only support 16-byte blocks. */ |
783 | if (alg->cra_aead.ivsize != 16) | 783 | if (alg->cra_aead.ivsize != 16) |
784 | goto out_drop_alg; | 784 | goto out_drop_alg; |
785 | 785 | ||
786 | /* Not a stream cipher? */ | 786 | /* Not a stream cipher? */ |
787 | if (alg->cra_blocksize != 1) | 787 | if (alg->cra_blocksize != 1) |
788 | goto out_drop_alg; | 788 | goto out_drop_alg; |
789 | 789 | ||
790 | err = -ENAMETOOLONG; | 790 | err = -ENAMETOOLONG; |
791 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 791 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, |
792 | "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || | 792 | "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || |
793 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 793 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
794 | "rfc4309(%s)", alg->cra_driver_name) >= | 794 | "rfc4309(%s)", alg->cra_driver_name) >= |
795 | CRYPTO_MAX_ALG_NAME) | 795 | CRYPTO_MAX_ALG_NAME) |
796 | goto out_drop_alg; | 796 | goto out_drop_alg; |
797 | 797 | ||
798 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 798 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
799 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | 799 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; |
800 | inst->alg.cra_priority = alg->cra_priority; | 800 | inst->alg.cra_priority = alg->cra_priority; |
801 | inst->alg.cra_blocksize = 1; | 801 | inst->alg.cra_blocksize = 1; |
802 | inst->alg.cra_alignmask = alg->cra_alignmask; | 802 | inst->alg.cra_alignmask = alg->cra_alignmask; |
803 | inst->alg.cra_type = &crypto_nivaead_type; | 803 | inst->alg.cra_type = &crypto_nivaead_type; |
804 | 804 | ||
805 | inst->alg.cra_aead.ivsize = 8; | 805 | inst->alg.cra_aead.ivsize = 8; |
806 | inst->alg.cra_aead.maxauthsize = 16; | 806 | inst->alg.cra_aead.maxauthsize = 16; |
807 | 807 | ||
808 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); | 808 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); |
809 | 809 | ||
810 | inst->alg.cra_init = crypto_rfc4309_init_tfm; | 810 | inst->alg.cra_init = crypto_rfc4309_init_tfm; |
811 | inst->alg.cra_exit = crypto_rfc4309_exit_tfm; | 811 | inst->alg.cra_exit = crypto_rfc4309_exit_tfm; |
812 | 812 | ||
813 | inst->alg.cra_aead.setkey = crypto_rfc4309_setkey; | 813 | inst->alg.cra_aead.setkey = crypto_rfc4309_setkey; |
814 | inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize; | 814 | inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize; |
815 | inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt; | 815 | inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt; |
816 | inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt; | 816 | inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt; |
817 | 817 | ||
818 | inst->alg.cra_aead.geniv = "seqiv"; | 818 | inst->alg.cra_aead.geniv = "seqiv"; |
819 | 819 | ||
820 | out: | 820 | out: |
821 | return inst; | 821 | return inst; |
822 | 822 | ||
823 | out_drop_alg: | 823 | out_drop_alg: |
824 | crypto_drop_aead(spawn); | 824 | crypto_drop_aead(spawn); |
825 | out_free_inst: | 825 | out_free_inst: |
826 | kfree(inst); | 826 | kfree(inst); |
827 | inst = ERR_PTR(err); | 827 | inst = ERR_PTR(err); |
828 | goto out; | 828 | goto out; |
829 | } | 829 | } |
830 | 830 | ||
831 | static void crypto_rfc4309_free(struct crypto_instance *inst) | 831 | static void crypto_rfc4309_free(struct crypto_instance *inst) |
832 | { | 832 | { |
833 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 833 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
834 | kfree(inst); | 834 | kfree(inst); |
835 | } | 835 | } |
836 | 836 | ||
837 | static struct crypto_template crypto_rfc4309_tmpl = { | 837 | static struct crypto_template crypto_rfc4309_tmpl = { |
838 | .name = "rfc4309", | 838 | .name = "rfc4309", |
839 | .alloc = crypto_rfc4309_alloc, | 839 | .alloc = crypto_rfc4309_alloc, |
840 | .free = crypto_rfc4309_free, | 840 | .free = crypto_rfc4309_free, |
841 | .module = THIS_MODULE, | 841 | .module = THIS_MODULE, |
842 | }; | 842 | }; |
843 | 843 | ||
844 | static int __init crypto_ccm_module_init(void) | 844 | static int __init crypto_ccm_module_init(void) |
845 | { | 845 | { |
846 | int err; | 846 | int err; |
847 | 847 | ||
848 | err = crypto_register_template(&crypto_ccm_base_tmpl); | 848 | err = crypto_register_template(&crypto_ccm_base_tmpl); |
849 | if (err) | 849 | if (err) |
850 | goto out; | 850 | goto out; |
851 | 851 | ||
852 | err = crypto_register_template(&crypto_ccm_tmpl); | 852 | err = crypto_register_template(&crypto_ccm_tmpl); |
853 | if (err) | 853 | if (err) |
854 | goto out_undo_base; | 854 | goto out_undo_base; |
855 | 855 | ||
856 | err = crypto_register_template(&crypto_rfc4309_tmpl); | 856 | err = crypto_register_template(&crypto_rfc4309_tmpl); |
857 | if (err) | 857 | if (err) |
858 | goto out_undo_ccm; | 858 | goto out_undo_ccm; |
859 | 859 | ||
860 | out: | 860 | out: |
861 | return err; | 861 | return err; |
862 | 862 | ||
863 | out_undo_ccm: | 863 | out_undo_ccm: |
864 | crypto_unregister_template(&crypto_ccm_tmpl); | 864 | crypto_unregister_template(&crypto_ccm_tmpl); |
865 | out_undo_base: | 865 | out_undo_base: |
866 | crypto_unregister_template(&crypto_ccm_base_tmpl); | 866 | crypto_unregister_template(&crypto_ccm_base_tmpl); |
867 | goto out; | 867 | goto out; |
868 | } | 868 | } |
869 | 869 | ||
870 | static void __exit crypto_ccm_module_exit(void) | 870 | static void __exit crypto_ccm_module_exit(void) |
871 | { | 871 | { |
872 | crypto_unregister_template(&crypto_rfc4309_tmpl); | 872 | crypto_unregister_template(&crypto_rfc4309_tmpl); |
873 | crypto_unregister_template(&crypto_ccm_tmpl); | 873 | crypto_unregister_template(&crypto_ccm_tmpl); |
874 | crypto_unregister_template(&crypto_ccm_base_tmpl); | 874 | crypto_unregister_template(&crypto_ccm_base_tmpl); |
875 | } | 875 | } |
876 | 876 | ||
877 | module_init(crypto_ccm_module_init); | 877 | module_init(crypto_ccm_module_init); |
878 | module_exit(crypto_ccm_module_exit); | 878 | module_exit(crypto_ccm_module_exit); |
879 | 879 | ||
880 | MODULE_LICENSE("GPL"); | 880 | MODULE_LICENSE("GPL"); |
881 | MODULE_DESCRIPTION("Counter with CBC MAC"); | 881 | MODULE_DESCRIPTION("Counter with CBC MAC"); |
882 | MODULE_ALIAS_CRYPTO("ccm_base"); | 882 | MODULE_ALIAS_CRYPTO("ccm_base"); |
883 | MODULE_ALIAS_CRYPTO("rfc4309"); | 883 | MODULE_ALIAS_CRYPTO("rfc4309"); |
884 | MODULE_ALIAS_CRYPTO("ccm"); | ||
884 | 885 |
crypto/chainiv.c
1 | /* | 1 | /* |
2 | * chainiv: Chain IV Generator | 2 | * chainiv: Chain IV Generator |
3 | * | 3 | * |
4 | * Generate IVs simply be using the last block of the previous encryption. | 4 | * Generate IVs simply be using the last block of the previous encryption. |
5 | * This is mainly useful for CBC with a synchronous algorithm. | 5 | * This is mainly useful for CBC with a synchronous algorithm. |
6 | * | 6 | * |
7 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) | 11 | * Software Foundation; either version 2 of the License, or (at your option) |
12 | * any later version. | 12 | * any later version. |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <crypto/internal/skcipher.h> | 16 | #include <crypto/internal/skcipher.h> |
17 | #include <crypto/rng.h> | 17 | #include <crypto/rng.h> |
18 | #include <crypto/crypto_wq.h> | 18 | #include <crypto/crypto_wq.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
26 | 26 | ||
27 | enum { | 27 | enum { |
28 | CHAINIV_STATE_INUSE = 0, | 28 | CHAINIV_STATE_INUSE = 0, |
29 | }; | 29 | }; |
30 | 30 | ||
31 | struct chainiv_ctx { | 31 | struct chainiv_ctx { |
32 | spinlock_t lock; | 32 | spinlock_t lock; |
33 | char iv[]; | 33 | char iv[]; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | struct async_chainiv_ctx { | 36 | struct async_chainiv_ctx { |
37 | unsigned long state; | 37 | unsigned long state; |
38 | 38 | ||
39 | spinlock_t lock; | 39 | spinlock_t lock; |
40 | int err; | 40 | int err; |
41 | 41 | ||
42 | struct crypto_queue queue; | 42 | struct crypto_queue queue; |
43 | struct work_struct postponed; | 43 | struct work_struct postponed; |
44 | 44 | ||
45 | char iv[]; | 45 | char iv[]; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) | 48 | static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) |
49 | { | 49 | { |
50 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 50 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
51 | struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 51 | struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
52 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | 52 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
53 | unsigned int ivsize; | 53 | unsigned int ivsize; |
54 | int err; | 54 | int err; |
55 | 55 | ||
56 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); | 56 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); |
57 | ablkcipher_request_set_callback(subreq, req->creq.base.flags & | 57 | ablkcipher_request_set_callback(subreq, req->creq.base.flags & |
58 | ~CRYPTO_TFM_REQ_MAY_SLEEP, | 58 | ~CRYPTO_TFM_REQ_MAY_SLEEP, |
59 | req->creq.base.complete, | 59 | req->creq.base.complete, |
60 | req->creq.base.data); | 60 | req->creq.base.data); |
61 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, | 61 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, |
62 | req->creq.nbytes, req->creq.info); | 62 | req->creq.nbytes, req->creq.info); |
63 | 63 | ||
64 | spin_lock_bh(&ctx->lock); | 64 | spin_lock_bh(&ctx->lock); |
65 | 65 | ||
66 | ivsize = crypto_ablkcipher_ivsize(geniv); | 66 | ivsize = crypto_ablkcipher_ivsize(geniv); |
67 | 67 | ||
68 | memcpy(req->giv, ctx->iv, ivsize); | 68 | memcpy(req->giv, ctx->iv, ivsize); |
69 | memcpy(subreq->info, ctx->iv, ivsize); | 69 | memcpy(subreq->info, ctx->iv, ivsize); |
70 | 70 | ||
71 | err = crypto_ablkcipher_encrypt(subreq); | 71 | err = crypto_ablkcipher_encrypt(subreq); |
72 | if (err) | 72 | if (err) |
73 | goto unlock; | 73 | goto unlock; |
74 | 74 | ||
75 | memcpy(ctx->iv, subreq->info, ivsize); | 75 | memcpy(ctx->iv, subreq->info, ivsize); |
76 | 76 | ||
77 | unlock: | 77 | unlock: |
78 | spin_unlock_bh(&ctx->lock); | 78 | spin_unlock_bh(&ctx->lock); |
79 | 79 | ||
80 | return err; | 80 | return err; |
81 | } | 81 | } |
82 | 82 | ||
83 | static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) | 83 | static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) |
84 | { | 84 | { |
85 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 85 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
86 | struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 86 | struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
87 | int err = 0; | 87 | int err = 0; |
88 | 88 | ||
89 | spin_lock_bh(&ctx->lock); | 89 | spin_lock_bh(&ctx->lock); |
90 | if (crypto_ablkcipher_crt(geniv)->givencrypt != | 90 | if (crypto_ablkcipher_crt(geniv)->givencrypt != |
91 | chainiv_givencrypt_first) | 91 | chainiv_givencrypt_first) |
92 | goto unlock; | 92 | goto unlock; |
93 | 93 | ||
94 | crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; | 94 | crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; |
95 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, | 95 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, |
96 | crypto_ablkcipher_ivsize(geniv)); | 96 | crypto_ablkcipher_ivsize(geniv)); |
97 | 97 | ||
98 | unlock: | 98 | unlock: |
99 | spin_unlock_bh(&ctx->lock); | 99 | spin_unlock_bh(&ctx->lock); |
100 | 100 | ||
101 | if (err) | 101 | if (err) |
102 | return err; | 102 | return err; |
103 | 103 | ||
104 | return chainiv_givencrypt(req); | 104 | return chainiv_givencrypt(req); |
105 | } | 105 | } |
106 | 106 | ||
107 | static int chainiv_init_common(struct crypto_tfm *tfm) | 107 | static int chainiv_init_common(struct crypto_tfm *tfm) |
108 | { | 108 | { |
109 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); | 109 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); |
110 | 110 | ||
111 | return skcipher_geniv_init(tfm); | 111 | return skcipher_geniv_init(tfm); |
112 | } | 112 | } |
113 | 113 | ||
114 | static int chainiv_init(struct crypto_tfm *tfm) | 114 | static int chainiv_init(struct crypto_tfm *tfm) |
115 | { | 115 | { |
116 | struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); | 116 | struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); |
117 | 117 | ||
118 | spin_lock_init(&ctx->lock); | 118 | spin_lock_init(&ctx->lock); |
119 | 119 | ||
120 | return chainiv_init_common(tfm); | 120 | return chainiv_init_common(tfm); |
121 | } | 121 | } |
122 | 122 | ||
123 | static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) | 123 | static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) |
124 | { | 124 | { |
125 | int queued; | 125 | int queued; |
126 | int err = ctx->err; | 126 | int err = ctx->err; |
127 | 127 | ||
128 | if (!ctx->queue.qlen) { | 128 | if (!ctx->queue.qlen) { |
129 | smp_mb__before_atomic(); | 129 | smp_mb__before_atomic(); |
130 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); | 130 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); |
131 | 131 | ||
132 | if (!ctx->queue.qlen || | 132 | if (!ctx->queue.qlen || |
133 | test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | 133 | test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) |
134 | goto out; | 134 | goto out; |
135 | } | 135 | } |
136 | 136 | ||
137 | queued = queue_work(kcrypto_wq, &ctx->postponed); | 137 | queued = queue_work(kcrypto_wq, &ctx->postponed); |
138 | BUG_ON(!queued); | 138 | BUG_ON(!queued); |
139 | 139 | ||
140 | out: | 140 | out: |
141 | return err; | 141 | return err; |
142 | } | 142 | } |
143 | 143 | ||
144 | static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) | 144 | static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) |
145 | { | 145 | { |
146 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 146 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
147 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 147 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
148 | int err; | 148 | int err; |
149 | 149 | ||
150 | spin_lock_bh(&ctx->lock); | 150 | spin_lock_bh(&ctx->lock); |
151 | err = skcipher_enqueue_givcrypt(&ctx->queue, req); | 151 | err = skcipher_enqueue_givcrypt(&ctx->queue, req); |
152 | spin_unlock_bh(&ctx->lock); | 152 | spin_unlock_bh(&ctx->lock); |
153 | 153 | ||
154 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | 154 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) |
155 | return err; | 155 | return err; |
156 | 156 | ||
157 | ctx->err = err; | 157 | ctx->err = err; |
158 | return async_chainiv_schedule_work(ctx); | 158 | return async_chainiv_schedule_work(ctx); |
159 | } | 159 | } |
160 | 160 | ||
161 | static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req) | 161 | static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req) |
162 | { | 162 | { |
163 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 163 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
164 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 164 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
165 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | 165 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
166 | unsigned int ivsize = crypto_ablkcipher_ivsize(geniv); | 166 | unsigned int ivsize = crypto_ablkcipher_ivsize(geniv); |
167 | 167 | ||
168 | memcpy(req->giv, ctx->iv, ivsize); | 168 | memcpy(req->giv, ctx->iv, ivsize); |
169 | memcpy(subreq->info, ctx->iv, ivsize); | 169 | memcpy(subreq->info, ctx->iv, ivsize); |
170 | 170 | ||
171 | ctx->err = crypto_ablkcipher_encrypt(subreq); | 171 | ctx->err = crypto_ablkcipher_encrypt(subreq); |
172 | if (ctx->err) | 172 | if (ctx->err) |
173 | goto out; | 173 | goto out; |
174 | 174 | ||
175 | memcpy(ctx->iv, subreq->info, ivsize); | 175 | memcpy(ctx->iv, subreq->info, ivsize); |
176 | 176 | ||
177 | out: | 177 | out: |
178 | return async_chainiv_schedule_work(ctx); | 178 | return async_chainiv_schedule_work(ctx); |
179 | } | 179 | } |
180 | 180 | ||
181 | static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req) | 181 | static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req) |
182 | { | 182 | { |
183 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 183 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
184 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 184 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
185 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | 185 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
186 | 186 | ||
187 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); | 187 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); |
188 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, | 188 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, |
189 | req->creq.base.complete, | 189 | req->creq.base.complete, |
190 | req->creq.base.data); | 190 | req->creq.base.data); |
191 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, | 191 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, |
192 | req->creq.nbytes, req->creq.info); | 192 | req->creq.nbytes, req->creq.info); |
193 | 193 | ||
194 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | 194 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) |
195 | goto postpone; | 195 | goto postpone; |
196 | 196 | ||
197 | if (ctx->queue.qlen) { | 197 | if (ctx->queue.qlen) { |
198 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); | 198 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); |
199 | goto postpone; | 199 | goto postpone; |
200 | } | 200 | } |
201 | 201 | ||
202 | return async_chainiv_givencrypt_tail(req); | 202 | return async_chainiv_givencrypt_tail(req); |
203 | 203 | ||
204 | postpone: | 204 | postpone: |
205 | return async_chainiv_postpone_request(req); | 205 | return async_chainiv_postpone_request(req); |
206 | } | 206 | } |
207 | 207 | ||
208 | static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) | 208 | static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) |
209 | { | 209 | { |
210 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 210 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
211 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 211 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
212 | int err = 0; | 212 | int err = 0; |
213 | 213 | ||
214 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | 214 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) |
215 | goto out; | 215 | goto out; |
216 | 216 | ||
217 | if (crypto_ablkcipher_crt(geniv)->givencrypt != | 217 | if (crypto_ablkcipher_crt(geniv)->givencrypt != |
218 | async_chainiv_givencrypt_first) | 218 | async_chainiv_givencrypt_first) |
219 | goto unlock; | 219 | goto unlock; |
220 | 220 | ||
221 | crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt; | 221 | crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt; |
222 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, | 222 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, |
223 | crypto_ablkcipher_ivsize(geniv)); | 223 | crypto_ablkcipher_ivsize(geniv)); |
224 | 224 | ||
225 | unlock: | 225 | unlock: |
226 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); | 226 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); |
227 | 227 | ||
228 | if (err) | 228 | if (err) |
229 | return err; | 229 | return err; |
230 | 230 | ||
231 | out: | 231 | out: |
232 | return async_chainiv_givencrypt(req); | 232 | return async_chainiv_givencrypt(req); |
233 | } | 233 | } |
234 | 234 | ||
235 | static void async_chainiv_do_postponed(struct work_struct *work) | 235 | static void async_chainiv_do_postponed(struct work_struct *work) |
236 | { | 236 | { |
237 | struct async_chainiv_ctx *ctx = container_of(work, | 237 | struct async_chainiv_ctx *ctx = container_of(work, |
238 | struct async_chainiv_ctx, | 238 | struct async_chainiv_ctx, |
239 | postponed); | 239 | postponed); |
240 | struct skcipher_givcrypt_request *req; | 240 | struct skcipher_givcrypt_request *req; |
241 | struct ablkcipher_request *subreq; | 241 | struct ablkcipher_request *subreq; |
242 | int err; | 242 | int err; |
243 | 243 | ||
244 | /* Only handle one request at a time to avoid hogging keventd. */ | 244 | /* Only handle one request at a time to avoid hogging keventd. */ |
245 | spin_lock_bh(&ctx->lock); | 245 | spin_lock_bh(&ctx->lock); |
246 | req = skcipher_dequeue_givcrypt(&ctx->queue); | 246 | req = skcipher_dequeue_givcrypt(&ctx->queue); |
247 | spin_unlock_bh(&ctx->lock); | 247 | spin_unlock_bh(&ctx->lock); |
248 | 248 | ||
249 | if (!req) { | 249 | if (!req) { |
250 | async_chainiv_schedule_work(ctx); | 250 | async_chainiv_schedule_work(ctx); |
251 | return; | 251 | return; |
252 | } | 252 | } |
253 | 253 | ||
254 | subreq = skcipher_givcrypt_reqctx(req); | 254 | subreq = skcipher_givcrypt_reqctx(req); |
255 | subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP; | 255 | subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP; |
256 | 256 | ||
257 | err = async_chainiv_givencrypt_tail(req); | 257 | err = async_chainiv_givencrypt_tail(req); |
258 | 258 | ||
259 | local_bh_disable(); | 259 | local_bh_disable(); |
260 | skcipher_givcrypt_complete(req, err); | 260 | skcipher_givcrypt_complete(req, err); |
261 | local_bh_enable(); | 261 | local_bh_enable(); |
262 | } | 262 | } |
263 | 263 | ||
264 | static int async_chainiv_init(struct crypto_tfm *tfm) | 264 | static int async_chainiv_init(struct crypto_tfm *tfm) |
265 | { | 265 | { |
266 | struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); | 266 | struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); |
267 | 267 | ||
268 | spin_lock_init(&ctx->lock); | 268 | spin_lock_init(&ctx->lock); |
269 | 269 | ||
270 | crypto_init_queue(&ctx->queue, 100); | 270 | crypto_init_queue(&ctx->queue, 100); |
271 | INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); | 271 | INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); |
272 | 272 | ||
273 | return chainiv_init_common(tfm); | 273 | return chainiv_init_common(tfm); |
274 | } | 274 | } |
275 | 275 | ||
276 | static void async_chainiv_exit(struct crypto_tfm *tfm) | 276 | static void async_chainiv_exit(struct crypto_tfm *tfm) |
277 | { | 277 | { |
278 | struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); | 278 | struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); |
279 | 279 | ||
280 | BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); | 280 | BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); |
281 | 281 | ||
282 | skcipher_geniv_exit(tfm); | 282 | skcipher_geniv_exit(tfm); |
283 | } | 283 | } |
284 | 284 | ||
285 | static struct crypto_template chainiv_tmpl; | 285 | static struct crypto_template chainiv_tmpl; |
286 | 286 | ||
287 | static struct crypto_instance *chainiv_alloc(struct rtattr **tb) | 287 | static struct crypto_instance *chainiv_alloc(struct rtattr **tb) |
288 | { | 288 | { |
289 | struct crypto_attr_type *algt; | 289 | struct crypto_attr_type *algt; |
290 | struct crypto_instance *inst; | 290 | struct crypto_instance *inst; |
291 | int err; | 291 | int err; |
292 | 292 | ||
293 | algt = crypto_get_attr_type(tb); | 293 | algt = crypto_get_attr_type(tb); |
294 | if (IS_ERR(algt)) | 294 | if (IS_ERR(algt)) |
295 | return ERR_CAST(algt); | 295 | return ERR_CAST(algt); |
296 | 296 | ||
297 | err = crypto_get_default_rng(); | 297 | err = crypto_get_default_rng(); |
298 | if (err) | 298 | if (err) |
299 | return ERR_PTR(err); | 299 | return ERR_PTR(err); |
300 | 300 | ||
301 | inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); | 301 | inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); |
302 | if (IS_ERR(inst)) | 302 | if (IS_ERR(inst)) |
303 | goto put_rng; | 303 | goto put_rng; |
304 | 304 | ||
305 | inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; | 305 | inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; |
306 | 306 | ||
307 | inst->alg.cra_init = chainiv_init; | 307 | inst->alg.cra_init = chainiv_init; |
308 | inst->alg.cra_exit = skcipher_geniv_exit; | 308 | inst->alg.cra_exit = skcipher_geniv_exit; |
309 | 309 | ||
310 | inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx); | 310 | inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx); |
311 | 311 | ||
312 | if (!crypto_requires_sync(algt->type, algt->mask)) { | 312 | if (!crypto_requires_sync(algt->type, algt->mask)) { |
313 | inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; | 313 | inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; |
314 | 314 | ||
315 | inst->alg.cra_ablkcipher.givencrypt = | 315 | inst->alg.cra_ablkcipher.givencrypt = |
316 | async_chainiv_givencrypt_first; | 316 | async_chainiv_givencrypt_first; |
317 | 317 | ||
318 | inst->alg.cra_init = async_chainiv_init; | 318 | inst->alg.cra_init = async_chainiv_init; |
319 | inst->alg.cra_exit = async_chainiv_exit; | 319 | inst->alg.cra_exit = async_chainiv_exit; |
320 | 320 | ||
321 | inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx); | 321 | inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx); |
322 | } | 322 | } |
323 | 323 | ||
324 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; | 324 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; |
325 | 325 | ||
326 | out: | 326 | out: |
327 | return inst; | 327 | return inst; |
328 | 328 | ||
329 | put_rng: | 329 | put_rng: |
330 | crypto_put_default_rng(); | 330 | crypto_put_default_rng(); |
331 | goto out; | 331 | goto out; |
332 | } | 332 | } |
333 | 333 | ||
334 | static void chainiv_free(struct crypto_instance *inst) | 334 | static void chainiv_free(struct crypto_instance *inst) |
335 | { | 335 | { |
336 | skcipher_geniv_free(inst); | 336 | skcipher_geniv_free(inst); |
337 | crypto_put_default_rng(); | 337 | crypto_put_default_rng(); |
338 | } | 338 | } |
339 | 339 | ||
340 | static struct crypto_template chainiv_tmpl = { | 340 | static struct crypto_template chainiv_tmpl = { |
341 | .name = "chainiv", | 341 | .name = "chainiv", |
342 | .alloc = chainiv_alloc, | 342 | .alloc = chainiv_alloc, |
343 | .free = chainiv_free, | 343 | .free = chainiv_free, |
344 | .module = THIS_MODULE, | 344 | .module = THIS_MODULE, |
345 | }; | 345 | }; |
346 | 346 | ||
347 | static int __init chainiv_module_init(void) | 347 | static int __init chainiv_module_init(void) |
348 | { | 348 | { |
349 | return crypto_register_template(&chainiv_tmpl); | 349 | return crypto_register_template(&chainiv_tmpl); |
350 | } | 350 | } |
351 | 351 | ||
352 | static void chainiv_module_exit(void) | 352 | static void chainiv_module_exit(void) |
353 | { | 353 | { |
354 | crypto_unregister_template(&chainiv_tmpl); | 354 | crypto_unregister_template(&chainiv_tmpl); |
355 | } | 355 | } |
356 | 356 | ||
357 | module_init(chainiv_module_init); | 357 | module_init(chainiv_module_init); |
358 | module_exit(chainiv_module_exit); | 358 | module_exit(chainiv_module_exit); |
359 | 359 | ||
360 | MODULE_LICENSE("GPL"); | 360 | MODULE_LICENSE("GPL"); |
361 | MODULE_DESCRIPTION("Chain IV Generator"); | 361 | MODULE_DESCRIPTION("Chain IV Generator"); |
362 | MODULE_ALIAS_CRYPTO("chainiv"); | ||
362 | 363 |
crypto/cmac.c
1 | /* | 1 | /* |
2 | * CMAC: Cipher Block Mode for Authentication | 2 | * CMAC: Cipher Block Mode for Authentication |
3 | * | 3 | * |
4 | * Copyright ยฉ 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> | 4 | * Copyright ยฉ 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> |
5 | * | 5 | * |
6 | * Based on work by: | 6 | * Based on work by: |
7 | * Copyright ยฉ 2013 Tom St Denis <tstdenis@elliptictech.com> | 7 | * Copyright ยฉ 2013 Tom St Denis <tstdenis@elliptictech.com> |
8 | * Based on crypto/xcbc.c: | 8 | * Based on crypto/xcbc.c: |
9 | * Copyright ยฉ 2006 USAGI/WIDE Project, | 9 | * Copyright ยฉ 2006 USAGI/WIDE Project, |
10 | * Author: Kazunori Miyazawa <miyazawa@linux-ipv6.org> | 10 | * Author: Kazunori Miyazawa <miyazawa@linux-ipv6.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License as published by | 13 | * it under the terms of the GNU General Public License as published by |
14 | * the Free Software Foundation; either version 2 of the License, or | 14 | * the Free Software Foundation; either version 2 of the License, or |
15 | * (at your option) any later version. | 15 | * (at your option) any later version. |
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <crypto/internal/hash.h> | 19 | #include <crypto/internal/hash.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * +------------------------ | 25 | * +------------------------ |
26 | * | <parent tfm> | 26 | * | <parent tfm> |
27 | * +------------------------ | 27 | * +------------------------ |
28 | * | cmac_tfm_ctx | 28 | * | cmac_tfm_ctx |
29 | * +------------------------ | 29 | * +------------------------ |
30 | * | consts (block size * 2) | 30 | * | consts (block size * 2) |
31 | * +------------------------ | 31 | * +------------------------ |
32 | */ | 32 | */ |
33 | struct cmac_tfm_ctx { | 33 | struct cmac_tfm_ctx { |
34 | struct crypto_cipher *child; | 34 | struct crypto_cipher *child; |
35 | u8 ctx[]; | 35 | u8 ctx[]; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * +------------------------ | 39 | * +------------------------ |
40 | * | <shash desc> | 40 | * | <shash desc> |
41 | * +------------------------ | 41 | * +------------------------ |
42 | * | cmac_desc_ctx | 42 | * | cmac_desc_ctx |
43 | * +------------------------ | 43 | * +------------------------ |
44 | * | odds (block size) | 44 | * | odds (block size) |
45 | * +------------------------ | 45 | * +------------------------ |
46 | * | prev (block size) | 46 | * | prev (block size) |
47 | * +------------------------ | 47 | * +------------------------ |
48 | */ | 48 | */ |
49 | struct cmac_desc_ctx { | 49 | struct cmac_desc_ctx { |
50 | unsigned int len; | 50 | unsigned int len; |
51 | u8 ctx[]; | 51 | u8 ctx[]; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static int crypto_cmac_digest_setkey(struct crypto_shash *parent, | 54 | static int crypto_cmac_digest_setkey(struct crypto_shash *parent, |
55 | const u8 *inkey, unsigned int keylen) | 55 | const u8 *inkey, unsigned int keylen) |
56 | { | 56 | { |
57 | unsigned long alignmask = crypto_shash_alignmask(parent); | 57 | unsigned long alignmask = crypto_shash_alignmask(parent); |
58 | struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); | 58 | struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); |
59 | unsigned int bs = crypto_shash_blocksize(parent); | 59 | unsigned int bs = crypto_shash_blocksize(parent); |
60 | __be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); | 60 | __be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); |
61 | u64 _const[2]; | 61 | u64 _const[2]; |
62 | int i, err = 0; | 62 | int i, err = 0; |
63 | u8 msb_mask, gfmask; | 63 | u8 msb_mask, gfmask; |
64 | 64 | ||
65 | err = crypto_cipher_setkey(ctx->child, inkey, keylen); | 65 | err = crypto_cipher_setkey(ctx->child, inkey, keylen); |
66 | if (err) | 66 | if (err) |
67 | return err; | 67 | return err; |
68 | 68 | ||
69 | /* encrypt the zero block */ | 69 | /* encrypt the zero block */ |
70 | memset(consts, 0, bs); | 70 | memset(consts, 0, bs); |
71 | crypto_cipher_encrypt_one(ctx->child, (u8 *)consts, (u8 *)consts); | 71 | crypto_cipher_encrypt_one(ctx->child, (u8 *)consts, (u8 *)consts); |
72 | 72 | ||
73 | switch (bs) { | 73 | switch (bs) { |
74 | case 16: | 74 | case 16: |
75 | gfmask = 0x87; | 75 | gfmask = 0x87; |
76 | _const[0] = be64_to_cpu(consts[1]); | 76 | _const[0] = be64_to_cpu(consts[1]); |
77 | _const[1] = be64_to_cpu(consts[0]); | 77 | _const[1] = be64_to_cpu(consts[0]); |
78 | 78 | ||
79 | /* gf(2^128) multiply zero-ciphertext with u and u^2 */ | 79 | /* gf(2^128) multiply zero-ciphertext with u and u^2 */ |
80 | for (i = 0; i < 4; i += 2) { | 80 | for (i = 0; i < 4; i += 2) { |
81 | msb_mask = ((s64)_const[1] >> 63) & gfmask; | 81 | msb_mask = ((s64)_const[1] >> 63) & gfmask; |
82 | _const[1] = (_const[1] << 1) | (_const[0] >> 63); | 82 | _const[1] = (_const[1] << 1) | (_const[0] >> 63); |
83 | _const[0] = (_const[0] << 1) ^ msb_mask; | 83 | _const[0] = (_const[0] << 1) ^ msb_mask; |
84 | 84 | ||
85 | consts[i + 0] = cpu_to_be64(_const[1]); | 85 | consts[i + 0] = cpu_to_be64(_const[1]); |
86 | consts[i + 1] = cpu_to_be64(_const[0]); | 86 | consts[i + 1] = cpu_to_be64(_const[0]); |
87 | } | 87 | } |
88 | 88 | ||
89 | break; | 89 | break; |
90 | case 8: | 90 | case 8: |
91 | gfmask = 0x1B; | 91 | gfmask = 0x1B; |
92 | _const[0] = be64_to_cpu(consts[0]); | 92 | _const[0] = be64_to_cpu(consts[0]); |
93 | 93 | ||
94 | /* gf(2^64) multiply zero-ciphertext with u and u^2 */ | 94 | /* gf(2^64) multiply zero-ciphertext with u and u^2 */ |
95 | for (i = 0; i < 2; i++) { | 95 | for (i = 0; i < 2; i++) { |
96 | msb_mask = ((s64)_const[0] >> 63) & gfmask; | 96 | msb_mask = ((s64)_const[0] >> 63) & gfmask; |
97 | _const[0] = (_const[0] << 1) ^ msb_mask; | 97 | _const[0] = (_const[0] << 1) ^ msb_mask; |
98 | 98 | ||
99 | consts[i] = cpu_to_be64(_const[0]); | 99 | consts[i] = cpu_to_be64(_const[0]); |
100 | } | 100 | } |
101 | 101 | ||
102 | break; | 102 | break; |
103 | } | 103 | } |
104 | 104 | ||
105 | return 0; | 105 | return 0; |
106 | } | 106 | } |
107 | 107 | ||
108 | static int crypto_cmac_digest_init(struct shash_desc *pdesc) | 108 | static int crypto_cmac_digest_init(struct shash_desc *pdesc) |
109 | { | 109 | { |
110 | unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); | 110 | unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); |
111 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); | 111 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); |
112 | int bs = crypto_shash_blocksize(pdesc->tfm); | 112 | int bs = crypto_shash_blocksize(pdesc->tfm); |
113 | u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs; | 113 | u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs; |
114 | 114 | ||
115 | ctx->len = 0; | 115 | ctx->len = 0; |
116 | memset(prev, 0, bs); | 116 | memset(prev, 0, bs); |
117 | 117 | ||
118 | return 0; | 118 | return 0; |
119 | } | 119 | } |
120 | 120 | ||
121 | static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, | 121 | static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, |
122 | unsigned int len) | 122 | unsigned int len) |
123 | { | 123 | { |
124 | struct crypto_shash *parent = pdesc->tfm; | 124 | struct crypto_shash *parent = pdesc->tfm; |
125 | unsigned long alignmask = crypto_shash_alignmask(parent); | 125 | unsigned long alignmask = crypto_shash_alignmask(parent); |
126 | struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); | 126 | struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); |
127 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); | 127 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); |
128 | struct crypto_cipher *tfm = tctx->child; | 128 | struct crypto_cipher *tfm = tctx->child; |
129 | int bs = crypto_shash_blocksize(parent); | 129 | int bs = crypto_shash_blocksize(parent); |
130 | u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); | 130 | u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); |
131 | u8 *prev = odds + bs; | 131 | u8 *prev = odds + bs; |
132 | 132 | ||
133 | /* checking the data can fill the block */ | 133 | /* checking the data can fill the block */ |
134 | if ((ctx->len + len) <= bs) { | 134 | if ((ctx->len + len) <= bs) { |
135 | memcpy(odds + ctx->len, p, len); | 135 | memcpy(odds + ctx->len, p, len); |
136 | ctx->len += len; | 136 | ctx->len += len; |
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | /* filling odds with new data and encrypting it */ | 140 | /* filling odds with new data and encrypting it */ |
141 | memcpy(odds + ctx->len, p, bs - ctx->len); | 141 | memcpy(odds + ctx->len, p, bs - ctx->len); |
142 | len -= bs - ctx->len; | 142 | len -= bs - ctx->len; |
143 | p += bs - ctx->len; | 143 | p += bs - ctx->len; |
144 | 144 | ||
145 | crypto_xor(prev, odds, bs); | 145 | crypto_xor(prev, odds, bs); |
146 | crypto_cipher_encrypt_one(tfm, prev, prev); | 146 | crypto_cipher_encrypt_one(tfm, prev, prev); |
147 | 147 | ||
148 | /* clearing the length */ | 148 | /* clearing the length */ |
149 | ctx->len = 0; | 149 | ctx->len = 0; |
150 | 150 | ||
151 | /* encrypting the rest of data */ | 151 | /* encrypting the rest of data */ |
152 | while (len > bs) { | 152 | while (len > bs) { |
153 | crypto_xor(prev, p, bs); | 153 | crypto_xor(prev, p, bs); |
154 | crypto_cipher_encrypt_one(tfm, prev, prev); | 154 | crypto_cipher_encrypt_one(tfm, prev, prev); |
155 | p += bs; | 155 | p += bs; |
156 | len -= bs; | 156 | len -= bs; |
157 | } | 157 | } |
158 | 158 | ||
159 | /* keeping the surplus of blocksize */ | 159 | /* keeping the surplus of blocksize */ |
160 | if (len) { | 160 | if (len) { |
161 | memcpy(odds, p, len); | 161 | memcpy(odds, p, len); |
162 | ctx->len = len; | 162 | ctx->len = len; |
163 | } | 163 | } |
164 | 164 | ||
165 | return 0; | 165 | return 0; |
166 | } | 166 | } |
167 | 167 | ||
168 | static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) | 168 | static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) |
169 | { | 169 | { |
170 | struct crypto_shash *parent = pdesc->tfm; | 170 | struct crypto_shash *parent = pdesc->tfm; |
171 | unsigned long alignmask = crypto_shash_alignmask(parent); | 171 | unsigned long alignmask = crypto_shash_alignmask(parent); |
172 | struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); | 172 | struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); |
173 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); | 173 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); |
174 | struct crypto_cipher *tfm = tctx->child; | 174 | struct crypto_cipher *tfm = tctx->child; |
175 | int bs = crypto_shash_blocksize(parent); | 175 | int bs = crypto_shash_blocksize(parent); |
176 | u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1); | 176 | u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1); |
177 | u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); | 177 | u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); |
178 | u8 *prev = odds + bs; | 178 | u8 *prev = odds + bs; |
179 | unsigned int offset = 0; | 179 | unsigned int offset = 0; |
180 | 180 | ||
181 | if (ctx->len != bs) { | 181 | if (ctx->len != bs) { |
182 | unsigned int rlen; | 182 | unsigned int rlen; |
183 | u8 *p = odds + ctx->len; | 183 | u8 *p = odds + ctx->len; |
184 | 184 | ||
185 | *p = 0x80; | 185 | *p = 0x80; |
186 | p++; | 186 | p++; |
187 | 187 | ||
188 | rlen = bs - ctx->len - 1; | 188 | rlen = bs - ctx->len - 1; |
189 | if (rlen) | 189 | if (rlen) |
190 | memset(p, 0, rlen); | 190 | memset(p, 0, rlen); |
191 | 191 | ||
192 | offset += bs; | 192 | offset += bs; |
193 | } | 193 | } |
194 | 194 | ||
195 | crypto_xor(prev, odds, bs); | 195 | crypto_xor(prev, odds, bs); |
196 | crypto_xor(prev, consts + offset, bs); | 196 | crypto_xor(prev, consts + offset, bs); |
197 | 197 | ||
198 | crypto_cipher_encrypt_one(tfm, out, prev); | 198 | crypto_cipher_encrypt_one(tfm, out, prev); |
199 | 199 | ||
200 | return 0; | 200 | return 0; |
201 | } | 201 | } |
202 | 202 | ||
203 | static int cmac_init_tfm(struct crypto_tfm *tfm) | 203 | static int cmac_init_tfm(struct crypto_tfm *tfm) |
204 | { | 204 | { |
205 | struct crypto_cipher *cipher; | 205 | struct crypto_cipher *cipher; |
206 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 206 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
207 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 207 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
208 | struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); | 208 | struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
209 | 209 | ||
210 | cipher = crypto_spawn_cipher(spawn); | 210 | cipher = crypto_spawn_cipher(spawn); |
211 | if (IS_ERR(cipher)) | 211 | if (IS_ERR(cipher)) |
212 | return PTR_ERR(cipher); | 212 | return PTR_ERR(cipher); |
213 | 213 | ||
214 | ctx->child = cipher; | 214 | ctx->child = cipher; |
215 | 215 | ||
216 | return 0; | 216 | return 0; |
217 | }; | 217 | }; |
218 | 218 | ||
219 | static void cmac_exit_tfm(struct crypto_tfm *tfm) | 219 | static void cmac_exit_tfm(struct crypto_tfm *tfm) |
220 | { | 220 | { |
221 | struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); | 221 | struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
222 | crypto_free_cipher(ctx->child); | 222 | crypto_free_cipher(ctx->child); |
223 | } | 223 | } |
224 | 224 | ||
225 | static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) | 225 | static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) |
226 | { | 226 | { |
227 | struct shash_instance *inst; | 227 | struct shash_instance *inst; |
228 | struct crypto_alg *alg; | 228 | struct crypto_alg *alg; |
229 | unsigned long alignmask; | 229 | unsigned long alignmask; |
230 | int err; | 230 | int err; |
231 | 231 | ||
232 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); | 232 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
233 | if (err) | 233 | if (err) |
234 | return err; | 234 | return err; |
235 | 235 | ||
236 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 236 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
237 | CRYPTO_ALG_TYPE_MASK); | 237 | CRYPTO_ALG_TYPE_MASK); |
238 | if (IS_ERR(alg)) | 238 | if (IS_ERR(alg)) |
239 | return PTR_ERR(alg); | 239 | return PTR_ERR(alg); |
240 | 240 | ||
241 | switch (alg->cra_blocksize) { | 241 | switch (alg->cra_blocksize) { |
242 | case 16: | 242 | case 16: |
243 | case 8: | 243 | case 8: |
244 | break; | 244 | break; |
245 | default: | 245 | default: |
246 | goto out_put_alg; | 246 | goto out_put_alg; |
247 | } | 247 | } |
248 | 248 | ||
249 | inst = shash_alloc_instance("cmac", alg); | 249 | inst = shash_alloc_instance("cmac", alg); |
250 | err = PTR_ERR(inst); | 250 | err = PTR_ERR(inst); |
251 | if (IS_ERR(inst)) | 251 | if (IS_ERR(inst)) |
252 | goto out_put_alg; | 252 | goto out_put_alg; |
253 | 253 | ||
254 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, | 254 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, |
255 | shash_crypto_instance(inst), | 255 | shash_crypto_instance(inst), |
256 | CRYPTO_ALG_TYPE_MASK); | 256 | CRYPTO_ALG_TYPE_MASK); |
257 | if (err) | 257 | if (err) |
258 | goto out_free_inst; | 258 | goto out_free_inst; |
259 | 259 | ||
260 | alignmask = alg->cra_alignmask | (sizeof(long) - 1); | 260 | alignmask = alg->cra_alignmask | (sizeof(long) - 1); |
261 | inst->alg.base.cra_alignmask = alignmask; | 261 | inst->alg.base.cra_alignmask = alignmask; |
262 | inst->alg.base.cra_priority = alg->cra_priority; | 262 | inst->alg.base.cra_priority = alg->cra_priority; |
263 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | 263 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
264 | 264 | ||
265 | inst->alg.digestsize = alg->cra_blocksize; | 265 | inst->alg.digestsize = alg->cra_blocksize; |
266 | inst->alg.descsize = | 266 | inst->alg.descsize = |
267 | ALIGN(sizeof(struct cmac_desc_ctx), crypto_tfm_ctx_alignment()) | 267 | ALIGN(sizeof(struct cmac_desc_ctx), crypto_tfm_ctx_alignment()) |
268 | + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)) | 268 | + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)) |
269 | + alg->cra_blocksize * 2; | 269 | + alg->cra_blocksize * 2; |
270 | 270 | ||
271 | inst->alg.base.cra_ctxsize = | 271 | inst->alg.base.cra_ctxsize = |
272 | ALIGN(sizeof(struct cmac_tfm_ctx), alignmask + 1) | 272 | ALIGN(sizeof(struct cmac_tfm_ctx), alignmask + 1) |
273 | + alg->cra_blocksize * 2; | 273 | + alg->cra_blocksize * 2; |
274 | 274 | ||
275 | inst->alg.base.cra_init = cmac_init_tfm; | 275 | inst->alg.base.cra_init = cmac_init_tfm; |
276 | inst->alg.base.cra_exit = cmac_exit_tfm; | 276 | inst->alg.base.cra_exit = cmac_exit_tfm; |
277 | 277 | ||
278 | inst->alg.init = crypto_cmac_digest_init; | 278 | inst->alg.init = crypto_cmac_digest_init; |
279 | inst->alg.update = crypto_cmac_digest_update; | 279 | inst->alg.update = crypto_cmac_digest_update; |
280 | inst->alg.final = crypto_cmac_digest_final; | 280 | inst->alg.final = crypto_cmac_digest_final; |
281 | inst->alg.setkey = crypto_cmac_digest_setkey; | 281 | inst->alg.setkey = crypto_cmac_digest_setkey; |
282 | 282 | ||
283 | err = shash_register_instance(tmpl, inst); | 283 | err = shash_register_instance(tmpl, inst); |
284 | if (err) { | 284 | if (err) { |
285 | out_free_inst: | 285 | out_free_inst: |
286 | shash_free_instance(shash_crypto_instance(inst)); | 286 | shash_free_instance(shash_crypto_instance(inst)); |
287 | } | 287 | } |
288 | 288 | ||
289 | out_put_alg: | 289 | out_put_alg: |
290 | crypto_mod_put(alg); | 290 | crypto_mod_put(alg); |
291 | return err; | 291 | return err; |
292 | } | 292 | } |
293 | 293 | ||
294 | static struct crypto_template crypto_cmac_tmpl = { | 294 | static struct crypto_template crypto_cmac_tmpl = { |
295 | .name = "cmac", | 295 | .name = "cmac", |
296 | .create = cmac_create, | 296 | .create = cmac_create, |
297 | .free = shash_free_instance, | 297 | .free = shash_free_instance, |
298 | .module = THIS_MODULE, | 298 | .module = THIS_MODULE, |
299 | }; | 299 | }; |
300 | 300 | ||
301 | static int __init crypto_cmac_module_init(void) | 301 | static int __init crypto_cmac_module_init(void) |
302 | { | 302 | { |
303 | return crypto_register_template(&crypto_cmac_tmpl); | 303 | return crypto_register_template(&crypto_cmac_tmpl); |
304 | } | 304 | } |
305 | 305 | ||
306 | static void __exit crypto_cmac_module_exit(void) | 306 | static void __exit crypto_cmac_module_exit(void) |
307 | { | 307 | { |
308 | crypto_unregister_template(&crypto_cmac_tmpl); | 308 | crypto_unregister_template(&crypto_cmac_tmpl); |
309 | } | 309 | } |
310 | 310 | ||
311 | module_init(crypto_cmac_module_init); | 311 | module_init(crypto_cmac_module_init); |
312 | module_exit(crypto_cmac_module_exit); | 312 | module_exit(crypto_cmac_module_exit); |
313 | 313 | ||
314 | MODULE_LICENSE("GPL"); | 314 | MODULE_LICENSE("GPL"); |
315 | MODULE_DESCRIPTION("CMAC keyed hash algorithm"); | 315 | MODULE_DESCRIPTION("CMAC keyed hash algorithm"); |
316 | MODULE_ALIAS_CRYPTO("cmac"); | ||
316 | 317 |
crypto/cryptd.c
1 | /* | 1 | /* |
2 | * Software async crypto daemon. | 2 | * Software async crypto daemon. |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * Added AEAD support to cryptd. | 6 | * Added AEAD support to cryptd. |
7 | * Authors: Tadeusz Struk (tadeusz.struk@intel.com) | 7 | * Authors: Tadeusz Struk (tadeusz.struk@intel.com) |
8 | * Adrian Hoban <adrian.hoban@intel.com> | 8 | * Adrian Hoban <adrian.hoban@intel.com> |
9 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | 9 | * Gabriele Paoloni <gabriele.paoloni@intel.com> |
10 | * Aidan O'Mahony (aidan.o.mahony@intel.com) | 10 | * Aidan O'Mahony (aidan.o.mahony@intel.com) |
11 | * Copyright (c) 2010, Intel Corporation. | 11 | * Copyright (c) 2010, Intel Corporation. |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify it | 13 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms of the GNU General Public License as published by the Free | 14 | * under the terms of the GNU General Public License as published by the Free |
15 | * Software Foundation; either version 2 of the License, or (at your option) | 15 | * Software Foundation; either version 2 of the License, or (at your option) |
16 | * any later version. | 16 | * any later version. |
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <crypto/algapi.h> | 20 | #include <crypto/algapi.h> |
21 | #include <crypto/internal/hash.h> | 21 | #include <crypto/internal/hash.h> |
22 | #include <crypto/internal/aead.h> | 22 | #include <crypto/internal/aead.h> |
23 | #include <crypto/cryptd.h> | 23 | #include <crypto/cryptd.h> |
24 | #include <crypto/crypto_wq.h> | 24 | #include <crypto/crypto_wq.h> |
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/scatterlist.h> | 30 | #include <linux/scatterlist.h> |
31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | 33 | ||
34 | #define CRYPTD_MAX_CPU_QLEN 100 | 34 | #define CRYPTD_MAX_CPU_QLEN 100 |
35 | 35 | ||
36 | struct cryptd_cpu_queue { | 36 | struct cryptd_cpu_queue { |
37 | struct crypto_queue queue; | 37 | struct crypto_queue queue; |
38 | struct work_struct work; | 38 | struct work_struct work; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | struct cryptd_queue { | 41 | struct cryptd_queue { |
42 | struct cryptd_cpu_queue __percpu *cpu_queue; | 42 | struct cryptd_cpu_queue __percpu *cpu_queue; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | struct cryptd_instance_ctx { | 45 | struct cryptd_instance_ctx { |
46 | struct crypto_spawn spawn; | 46 | struct crypto_spawn spawn; |
47 | struct cryptd_queue *queue; | 47 | struct cryptd_queue *queue; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct hashd_instance_ctx { | 50 | struct hashd_instance_ctx { |
51 | struct crypto_shash_spawn spawn; | 51 | struct crypto_shash_spawn spawn; |
52 | struct cryptd_queue *queue; | 52 | struct cryptd_queue *queue; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct aead_instance_ctx { | 55 | struct aead_instance_ctx { |
56 | struct crypto_aead_spawn aead_spawn; | 56 | struct crypto_aead_spawn aead_spawn; |
57 | struct cryptd_queue *queue; | 57 | struct cryptd_queue *queue; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | struct cryptd_blkcipher_ctx { | 60 | struct cryptd_blkcipher_ctx { |
61 | struct crypto_blkcipher *child; | 61 | struct crypto_blkcipher *child; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct cryptd_blkcipher_request_ctx { | 64 | struct cryptd_blkcipher_request_ctx { |
65 | crypto_completion_t complete; | 65 | crypto_completion_t complete; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct cryptd_hash_ctx { | 68 | struct cryptd_hash_ctx { |
69 | struct crypto_shash *child; | 69 | struct crypto_shash *child; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | struct cryptd_hash_request_ctx { | 72 | struct cryptd_hash_request_ctx { |
73 | crypto_completion_t complete; | 73 | crypto_completion_t complete; |
74 | struct shash_desc desc; | 74 | struct shash_desc desc; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | struct cryptd_aead_ctx { | 77 | struct cryptd_aead_ctx { |
78 | struct crypto_aead *child; | 78 | struct crypto_aead *child; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | struct cryptd_aead_request_ctx { | 81 | struct cryptd_aead_request_ctx { |
82 | crypto_completion_t complete; | 82 | crypto_completion_t complete; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | static void cryptd_queue_worker(struct work_struct *work); | 85 | static void cryptd_queue_worker(struct work_struct *work); |
86 | 86 | ||
87 | static int cryptd_init_queue(struct cryptd_queue *queue, | 87 | static int cryptd_init_queue(struct cryptd_queue *queue, |
88 | unsigned int max_cpu_qlen) | 88 | unsigned int max_cpu_qlen) |
89 | { | 89 | { |
90 | int cpu; | 90 | int cpu; |
91 | struct cryptd_cpu_queue *cpu_queue; | 91 | struct cryptd_cpu_queue *cpu_queue; |
92 | 92 | ||
93 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | 93 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); |
94 | if (!queue->cpu_queue) | 94 | if (!queue->cpu_queue) |
95 | return -ENOMEM; | 95 | return -ENOMEM; |
96 | for_each_possible_cpu(cpu) { | 96 | for_each_possible_cpu(cpu) { |
97 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 97 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
98 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | 98 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
99 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | 99 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); |
100 | } | 100 | } |
101 | return 0; | 101 | return 0; |
102 | } | 102 | } |
103 | 103 | ||
104 | static void cryptd_fini_queue(struct cryptd_queue *queue) | 104 | static void cryptd_fini_queue(struct cryptd_queue *queue) |
105 | { | 105 | { |
106 | int cpu; | 106 | int cpu; |
107 | struct cryptd_cpu_queue *cpu_queue; | 107 | struct cryptd_cpu_queue *cpu_queue; |
108 | 108 | ||
109 | for_each_possible_cpu(cpu) { | 109 | for_each_possible_cpu(cpu) { |
110 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 110 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
111 | BUG_ON(cpu_queue->queue.qlen); | 111 | BUG_ON(cpu_queue->queue.qlen); |
112 | } | 112 | } |
113 | free_percpu(queue->cpu_queue); | 113 | free_percpu(queue->cpu_queue); |
114 | } | 114 | } |
115 | 115 | ||
116 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | 116 | static int cryptd_enqueue_request(struct cryptd_queue *queue, |
117 | struct crypto_async_request *request) | 117 | struct crypto_async_request *request) |
118 | { | 118 | { |
119 | int cpu, err; | 119 | int cpu, err; |
120 | struct cryptd_cpu_queue *cpu_queue; | 120 | struct cryptd_cpu_queue *cpu_queue; |
121 | 121 | ||
122 | cpu = get_cpu(); | 122 | cpu = get_cpu(); |
123 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | 123 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
124 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 124 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
125 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 125 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
126 | put_cpu(); | 126 | put_cpu(); |
127 | 127 | ||
128 | return err; | 128 | return err; |
129 | } | 129 | } |
130 | 130 | ||
131 | /* Called in workqueue context, do one real cryption work (via | 131 | /* Called in workqueue context, do one real cryption work (via |
132 | * req->complete) and reschedule itself if there are more work to | 132 | * req->complete) and reschedule itself if there are more work to |
133 | * do. */ | 133 | * do. */ |
134 | static void cryptd_queue_worker(struct work_struct *work) | 134 | static void cryptd_queue_worker(struct work_struct *work) |
135 | { | 135 | { |
136 | struct cryptd_cpu_queue *cpu_queue; | 136 | struct cryptd_cpu_queue *cpu_queue; |
137 | struct crypto_async_request *req, *backlog; | 137 | struct crypto_async_request *req, *backlog; |
138 | 138 | ||
139 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | 139 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); |
140 | /* | 140 | /* |
141 | * Only handle one request at a time to avoid hogging crypto workqueue. | 141 | * Only handle one request at a time to avoid hogging crypto workqueue. |
142 | * preempt_disable/enable is used to prevent being preempted by | 142 | * preempt_disable/enable is used to prevent being preempted by |
143 | * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent | 143 | * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent |
144 | * cryptd_enqueue_request() being accessed from software interrupts. | 144 | * cryptd_enqueue_request() being accessed from software interrupts. |
145 | */ | 145 | */ |
146 | local_bh_disable(); | 146 | local_bh_disable(); |
147 | preempt_disable(); | 147 | preempt_disable(); |
148 | backlog = crypto_get_backlog(&cpu_queue->queue); | 148 | backlog = crypto_get_backlog(&cpu_queue->queue); |
149 | req = crypto_dequeue_request(&cpu_queue->queue); | 149 | req = crypto_dequeue_request(&cpu_queue->queue); |
150 | preempt_enable(); | 150 | preempt_enable(); |
151 | local_bh_enable(); | 151 | local_bh_enable(); |
152 | 152 | ||
153 | if (!req) | 153 | if (!req) |
154 | return; | 154 | return; |
155 | 155 | ||
156 | if (backlog) | 156 | if (backlog) |
157 | backlog->complete(backlog, -EINPROGRESS); | 157 | backlog->complete(backlog, -EINPROGRESS); |
158 | req->complete(req, 0); | 158 | req->complete(req, 0); |
159 | 159 | ||
160 | if (cpu_queue->queue.qlen) | 160 | if (cpu_queue->queue.qlen) |
161 | queue_work(kcrypto_wq, &cpu_queue->work); | 161 | queue_work(kcrypto_wq, &cpu_queue->work); |
162 | } | 162 | } |
163 | 163 | ||
164 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | 164 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) |
165 | { | 165 | { |
166 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 166 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
167 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 167 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
168 | return ictx->queue; | 168 | return ictx->queue; |
169 | } | 169 | } |
170 | 170 | ||
171 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | 171 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
172 | const u8 *key, unsigned int keylen) | 172 | const u8 *key, unsigned int keylen) |
173 | { | 173 | { |
174 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | 174 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); |
175 | struct crypto_blkcipher *child = ctx->child; | 175 | struct crypto_blkcipher *child = ctx->child; |
176 | int err; | 176 | int err; |
177 | 177 | ||
178 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 178 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
179 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | 179 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & |
180 | CRYPTO_TFM_REQ_MASK); | 180 | CRYPTO_TFM_REQ_MASK); |
181 | err = crypto_blkcipher_setkey(child, key, keylen); | 181 | err = crypto_blkcipher_setkey(child, key, keylen); |
182 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | 182 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & |
183 | CRYPTO_TFM_RES_MASK); | 183 | CRYPTO_TFM_RES_MASK); |
184 | return err; | 184 | return err; |
185 | } | 185 | } |
186 | 186 | ||
187 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | 187 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, |
188 | struct crypto_blkcipher *child, | 188 | struct crypto_blkcipher *child, |
189 | int err, | 189 | int err, |
190 | int (*crypt)(struct blkcipher_desc *desc, | 190 | int (*crypt)(struct blkcipher_desc *desc, |
191 | struct scatterlist *dst, | 191 | struct scatterlist *dst, |
192 | struct scatterlist *src, | 192 | struct scatterlist *src, |
193 | unsigned int len)) | 193 | unsigned int len)) |
194 | { | 194 | { |
195 | struct cryptd_blkcipher_request_ctx *rctx; | 195 | struct cryptd_blkcipher_request_ctx *rctx; |
196 | struct blkcipher_desc desc; | 196 | struct blkcipher_desc desc; |
197 | 197 | ||
198 | rctx = ablkcipher_request_ctx(req); | 198 | rctx = ablkcipher_request_ctx(req); |
199 | 199 | ||
200 | if (unlikely(err == -EINPROGRESS)) | 200 | if (unlikely(err == -EINPROGRESS)) |
201 | goto out; | 201 | goto out; |
202 | 202 | ||
203 | desc.tfm = child; | 203 | desc.tfm = child; |
204 | desc.info = req->info; | 204 | desc.info = req->info; |
205 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 205 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
206 | 206 | ||
207 | err = crypt(&desc, req->dst, req->src, req->nbytes); | 207 | err = crypt(&desc, req->dst, req->src, req->nbytes); |
208 | 208 | ||
209 | req->base.complete = rctx->complete; | 209 | req->base.complete = rctx->complete; |
210 | 210 | ||
211 | out: | 211 | out: |
212 | local_bh_disable(); | 212 | local_bh_disable(); |
213 | rctx->complete(&req->base, err); | 213 | rctx->complete(&req->base, err); |
214 | local_bh_enable(); | 214 | local_bh_enable(); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | 217 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) |
218 | { | 218 | { |
219 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | 219 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
220 | struct crypto_blkcipher *child = ctx->child; | 220 | struct crypto_blkcipher *child = ctx->child; |
221 | 221 | ||
222 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | 222 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
223 | crypto_blkcipher_crt(child)->encrypt); | 223 | crypto_blkcipher_crt(child)->encrypt); |
224 | } | 224 | } |
225 | 225 | ||
226 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | 226 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) |
227 | { | 227 | { |
228 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | 228 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
229 | struct crypto_blkcipher *child = ctx->child; | 229 | struct crypto_blkcipher *child = ctx->child; |
230 | 230 | ||
231 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | 231 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
232 | crypto_blkcipher_crt(child)->decrypt); | 232 | crypto_blkcipher_crt(child)->decrypt); |
233 | } | 233 | } |
234 | 234 | ||
235 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | 235 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, |
236 | crypto_completion_t compl) | 236 | crypto_completion_t compl) |
237 | { | 237 | { |
238 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | 238 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
239 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 239 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
240 | struct cryptd_queue *queue; | 240 | struct cryptd_queue *queue; |
241 | 241 | ||
242 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); | 242 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
243 | rctx->complete = req->base.complete; | 243 | rctx->complete = req->base.complete; |
244 | req->base.complete = compl; | 244 | req->base.complete = compl; |
245 | 245 | ||
246 | return cryptd_enqueue_request(queue, &req->base); | 246 | return cryptd_enqueue_request(queue, &req->base); |
247 | } | 247 | } |
248 | 248 | ||
249 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | 249 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) |
250 | { | 250 | { |
251 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | 251 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); |
252 | } | 252 | } |
253 | 253 | ||
254 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | 254 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) |
255 | { | 255 | { |
256 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | 256 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); |
257 | } | 257 | } |
258 | 258 | ||
259 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | 259 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) |
260 | { | 260 | { |
261 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 261 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
262 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 262 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
263 | struct crypto_spawn *spawn = &ictx->spawn; | 263 | struct crypto_spawn *spawn = &ictx->spawn; |
264 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 264 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
265 | struct crypto_blkcipher *cipher; | 265 | struct crypto_blkcipher *cipher; |
266 | 266 | ||
267 | cipher = crypto_spawn_blkcipher(spawn); | 267 | cipher = crypto_spawn_blkcipher(spawn); |
268 | if (IS_ERR(cipher)) | 268 | if (IS_ERR(cipher)) |
269 | return PTR_ERR(cipher); | 269 | return PTR_ERR(cipher); |
270 | 270 | ||
271 | ctx->child = cipher; | 271 | ctx->child = cipher; |
272 | tfm->crt_ablkcipher.reqsize = | 272 | tfm->crt_ablkcipher.reqsize = |
273 | sizeof(struct cryptd_blkcipher_request_ctx); | 273 | sizeof(struct cryptd_blkcipher_request_ctx); |
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | 276 | ||
277 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | 277 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) |
278 | { | 278 | { |
279 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 279 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
280 | 280 | ||
281 | crypto_free_blkcipher(ctx->child); | 281 | crypto_free_blkcipher(ctx->child); |
282 | } | 282 | } |
283 | 283 | ||
284 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | 284 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
285 | unsigned int tail) | 285 | unsigned int tail) |
286 | { | 286 | { |
287 | char *p; | 287 | char *p; |
288 | struct crypto_instance *inst; | 288 | struct crypto_instance *inst; |
289 | int err; | 289 | int err; |
290 | 290 | ||
291 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); | 291 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
292 | if (!p) | 292 | if (!p) |
293 | return ERR_PTR(-ENOMEM); | 293 | return ERR_PTR(-ENOMEM); |
294 | 294 | ||
295 | inst = (void *)(p + head); | 295 | inst = (void *)(p + head); |
296 | 296 | ||
297 | err = -ENAMETOOLONG; | 297 | err = -ENAMETOOLONG; |
298 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 298 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
299 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 299 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
300 | goto out_free_inst; | 300 | goto out_free_inst; |
301 | 301 | ||
302 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 302 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
303 | 303 | ||
304 | inst->alg.cra_priority = alg->cra_priority + 50; | 304 | inst->alg.cra_priority = alg->cra_priority + 50; |
305 | inst->alg.cra_blocksize = alg->cra_blocksize; | 305 | inst->alg.cra_blocksize = alg->cra_blocksize; |
306 | inst->alg.cra_alignmask = alg->cra_alignmask; | 306 | inst->alg.cra_alignmask = alg->cra_alignmask; |
307 | 307 | ||
308 | out: | 308 | out: |
309 | return p; | 309 | return p; |
310 | 310 | ||
311 | out_free_inst: | 311 | out_free_inst: |
312 | kfree(p); | 312 | kfree(p); |
313 | p = ERR_PTR(err); | 313 | p = ERR_PTR(err); |
314 | goto out; | 314 | goto out; |
315 | } | 315 | } |
316 | 316 | ||
317 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, | 317 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
318 | struct rtattr **tb, | 318 | struct rtattr **tb, |
319 | struct cryptd_queue *queue) | 319 | struct cryptd_queue *queue) |
320 | { | 320 | { |
321 | struct cryptd_instance_ctx *ctx; | 321 | struct cryptd_instance_ctx *ctx; |
322 | struct crypto_instance *inst; | 322 | struct crypto_instance *inst; |
323 | struct crypto_alg *alg; | 323 | struct crypto_alg *alg; |
324 | int err; | 324 | int err; |
325 | 325 | ||
326 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | 326 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
327 | CRYPTO_ALG_TYPE_MASK); | 327 | CRYPTO_ALG_TYPE_MASK); |
328 | if (IS_ERR(alg)) | 328 | if (IS_ERR(alg)) |
329 | return PTR_ERR(alg); | 329 | return PTR_ERR(alg); |
330 | 330 | ||
331 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); | 331 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
332 | err = PTR_ERR(inst); | 332 | err = PTR_ERR(inst); |
333 | if (IS_ERR(inst)) | 333 | if (IS_ERR(inst)) |
334 | goto out_put_alg; | 334 | goto out_put_alg; |
335 | 335 | ||
336 | ctx = crypto_instance_ctx(inst); | 336 | ctx = crypto_instance_ctx(inst); |
337 | ctx->queue = queue; | 337 | ctx->queue = queue; |
338 | 338 | ||
339 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | 339 | err = crypto_init_spawn(&ctx->spawn, alg, inst, |
340 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 340 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
341 | if (err) | 341 | if (err) |
342 | goto out_free_inst; | 342 | goto out_free_inst; |
343 | 343 | ||
344 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | 344 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
345 | inst->alg.cra_type = &crypto_ablkcipher_type; | 345 | inst->alg.cra_type = &crypto_ablkcipher_type; |
346 | 346 | ||
347 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | 347 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; |
348 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | 348 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
349 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | 349 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
350 | 350 | ||
351 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; | 351 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
352 | 352 | ||
353 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); | 353 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
354 | 354 | ||
355 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | 355 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; |
356 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | 356 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; |
357 | 357 | ||
358 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | 358 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; |
359 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | 359 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
360 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | 360 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
361 | 361 | ||
362 | err = crypto_register_instance(tmpl, inst); | 362 | err = crypto_register_instance(tmpl, inst); |
363 | if (err) { | 363 | if (err) { |
364 | crypto_drop_spawn(&ctx->spawn); | 364 | crypto_drop_spawn(&ctx->spawn); |
365 | out_free_inst: | 365 | out_free_inst: |
366 | kfree(inst); | 366 | kfree(inst); |
367 | } | 367 | } |
368 | 368 | ||
369 | out_put_alg: | 369 | out_put_alg: |
370 | crypto_mod_put(alg); | 370 | crypto_mod_put(alg); |
371 | return err; | 371 | return err; |
372 | } | 372 | } |
373 | 373 | ||
374 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 374 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
375 | { | 375 | { |
376 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 376 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
377 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | 377 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
378 | struct crypto_shash_spawn *spawn = &ictx->spawn; | 378 | struct crypto_shash_spawn *spawn = &ictx->spawn; |
379 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 379 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
380 | struct crypto_shash *hash; | 380 | struct crypto_shash *hash; |
381 | 381 | ||
382 | hash = crypto_spawn_shash(spawn); | 382 | hash = crypto_spawn_shash(spawn); |
383 | if (IS_ERR(hash)) | 383 | if (IS_ERR(hash)) |
384 | return PTR_ERR(hash); | 384 | return PTR_ERR(hash); |
385 | 385 | ||
386 | ctx->child = hash; | 386 | ctx->child = hash; |
387 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 387 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
388 | sizeof(struct cryptd_hash_request_ctx) + | 388 | sizeof(struct cryptd_hash_request_ctx) + |
389 | crypto_shash_descsize(hash)); | 389 | crypto_shash_descsize(hash)); |
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
392 | 392 | ||
393 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | 393 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) |
394 | { | 394 | { |
395 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 395 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
396 | 396 | ||
397 | crypto_free_shash(ctx->child); | 397 | crypto_free_shash(ctx->child); |
398 | } | 398 | } |
399 | 399 | ||
400 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | 400 | static int cryptd_hash_setkey(struct crypto_ahash *parent, |
401 | const u8 *key, unsigned int keylen) | 401 | const u8 *key, unsigned int keylen) |
402 | { | 402 | { |
403 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | 403 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
404 | struct crypto_shash *child = ctx->child; | 404 | struct crypto_shash *child = ctx->child; |
405 | int err; | 405 | int err; |
406 | 406 | ||
407 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 407 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
408 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | 408 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
409 | CRYPTO_TFM_REQ_MASK); | 409 | CRYPTO_TFM_REQ_MASK); |
410 | err = crypto_shash_setkey(child, key, keylen); | 410 | err = crypto_shash_setkey(child, key, keylen); |
411 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | 411 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
412 | CRYPTO_TFM_RES_MASK); | 412 | CRYPTO_TFM_RES_MASK); |
413 | return err; | 413 | return err; |
414 | } | 414 | } |
415 | 415 | ||
416 | static int cryptd_hash_enqueue(struct ahash_request *req, | 416 | static int cryptd_hash_enqueue(struct ahash_request *req, |
417 | crypto_completion_t compl) | 417 | crypto_completion_t compl) |
418 | { | 418 | { |
419 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 419 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
420 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 420 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
421 | struct cryptd_queue *queue = | 421 | struct cryptd_queue *queue = |
422 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | 422 | cryptd_get_queue(crypto_ahash_tfm(tfm)); |
423 | 423 | ||
424 | rctx->complete = req->base.complete; | 424 | rctx->complete = req->base.complete; |
425 | req->base.complete = compl; | 425 | req->base.complete = compl; |
426 | 426 | ||
427 | return cryptd_enqueue_request(queue, &req->base); | 427 | return cryptd_enqueue_request(queue, &req->base); |
428 | } | 428 | } |
429 | 429 | ||
430 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | 430 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
431 | { | 431 | { |
432 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 432 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
433 | struct crypto_shash *child = ctx->child; | 433 | struct crypto_shash *child = ctx->child; |
434 | struct ahash_request *req = ahash_request_cast(req_async); | 434 | struct ahash_request *req = ahash_request_cast(req_async); |
435 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 435 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
436 | struct shash_desc *desc = &rctx->desc; | 436 | struct shash_desc *desc = &rctx->desc; |
437 | 437 | ||
438 | if (unlikely(err == -EINPROGRESS)) | 438 | if (unlikely(err == -EINPROGRESS)) |
439 | goto out; | 439 | goto out; |
440 | 440 | ||
441 | desc->tfm = child; | 441 | desc->tfm = child; |
442 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 442 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
443 | 443 | ||
444 | err = crypto_shash_init(desc); | 444 | err = crypto_shash_init(desc); |
445 | 445 | ||
446 | req->base.complete = rctx->complete; | 446 | req->base.complete = rctx->complete; |
447 | 447 | ||
448 | out: | 448 | out: |
449 | local_bh_disable(); | 449 | local_bh_disable(); |
450 | rctx->complete(&req->base, err); | 450 | rctx->complete(&req->base, err); |
451 | local_bh_enable(); | 451 | local_bh_enable(); |
452 | } | 452 | } |
453 | 453 | ||
454 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | 454 | static int cryptd_hash_init_enqueue(struct ahash_request *req) |
455 | { | 455 | { |
456 | return cryptd_hash_enqueue(req, cryptd_hash_init); | 456 | return cryptd_hash_enqueue(req, cryptd_hash_init); |
457 | } | 457 | } |
458 | 458 | ||
459 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | 459 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) |
460 | { | 460 | { |
461 | struct ahash_request *req = ahash_request_cast(req_async); | 461 | struct ahash_request *req = ahash_request_cast(req_async); |
462 | struct cryptd_hash_request_ctx *rctx; | 462 | struct cryptd_hash_request_ctx *rctx; |
463 | 463 | ||
464 | rctx = ahash_request_ctx(req); | 464 | rctx = ahash_request_ctx(req); |
465 | 465 | ||
466 | if (unlikely(err == -EINPROGRESS)) | 466 | if (unlikely(err == -EINPROGRESS)) |
467 | goto out; | 467 | goto out; |
468 | 468 | ||
469 | err = shash_ahash_update(req, &rctx->desc); | 469 | err = shash_ahash_update(req, &rctx->desc); |
470 | 470 | ||
471 | req->base.complete = rctx->complete; | 471 | req->base.complete = rctx->complete; |
472 | 472 | ||
473 | out: | 473 | out: |
474 | local_bh_disable(); | 474 | local_bh_disable(); |
475 | rctx->complete(&req->base, err); | 475 | rctx->complete(&req->base, err); |
476 | local_bh_enable(); | 476 | local_bh_enable(); |
477 | } | 477 | } |
478 | 478 | ||
479 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | 479 | static int cryptd_hash_update_enqueue(struct ahash_request *req) |
480 | { | 480 | { |
481 | return cryptd_hash_enqueue(req, cryptd_hash_update); | 481 | return cryptd_hash_enqueue(req, cryptd_hash_update); |
482 | } | 482 | } |
483 | 483 | ||
484 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | 484 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) |
485 | { | 485 | { |
486 | struct ahash_request *req = ahash_request_cast(req_async); | 486 | struct ahash_request *req = ahash_request_cast(req_async); |
487 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 487 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
488 | 488 | ||
489 | if (unlikely(err == -EINPROGRESS)) | 489 | if (unlikely(err == -EINPROGRESS)) |
490 | goto out; | 490 | goto out; |
491 | 491 | ||
492 | err = crypto_shash_final(&rctx->desc, req->result); | 492 | err = crypto_shash_final(&rctx->desc, req->result); |
493 | 493 | ||
494 | req->base.complete = rctx->complete; | 494 | req->base.complete = rctx->complete; |
495 | 495 | ||
496 | out: | 496 | out: |
497 | local_bh_disable(); | 497 | local_bh_disable(); |
498 | rctx->complete(&req->base, err); | 498 | rctx->complete(&req->base, err); |
499 | local_bh_enable(); | 499 | local_bh_enable(); |
500 | } | 500 | } |
501 | 501 | ||
502 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | 502 | static int cryptd_hash_final_enqueue(struct ahash_request *req) |
503 | { | 503 | { |
504 | return cryptd_hash_enqueue(req, cryptd_hash_final); | 504 | return cryptd_hash_enqueue(req, cryptd_hash_final); |
505 | } | 505 | } |
506 | 506 | ||
507 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) | 507 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
508 | { | 508 | { |
509 | struct ahash_request *req = ahash_request_cast(req_async); | 509 | struct ahash_request *req = ahash_request_cast(req_async); |
510 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 510 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
511 | 511 | ||
512 | if (unlikely(err == -EINPROGRESS)) | 512 | if (unlikely(err == -EINPROGRESS)) |
513 | goto out; | 513 | goto out; |
514 | 514 | ||
515 | err = shash_ahash_finup(req, &rctx->desc); | 515 | err = shash_ahash_finup(req, &rctx->desc); |
516 | 516 | ||
517 | req->base.complete = rctx->complete; | 517 | req->base.complete = rctx->complete; |
518 | 518 | ||
519 | out: | 519 | out: |
520 | local_bh_disable(); | 520 | local_bh_disable(); |
521 | rctx->complete(&req->base, err); | 521 | rctx->complete(&req->base, err); |
522 | local_bh_enable(); | 522 | local_bh_enable(); |
523 | } | 523 | } |
524 | 524 | ||
525 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) | 525 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) |
526 | { | 526 | { |
527 | return cryptd_hash_enqueue(req, cryptd_hash_finup); | 527 | return cryptd_hash_enqueue(req, cryptd_hash_finup); |
528 | } | 528 | } |
529 | 529 | ||
530 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | 530 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) |
531 | { | 531 | { |
532 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 532 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
533 | struct crypto_shash *child = ctx->child; | 533 | struct crypto_shash *child = ctx->child; |
534 | struct ahash_request *req = ahash_request_cast(req_async); | 534 | struct ahash_request *req = ahash_request_cast(req_async); |
535 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 535 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
536 | struct shash_desc *desc = &rctx->desc; | 536 | struct shash_desc *desc = &rctx->desc; |
537 | 537 | ||
538 | if (unlikely(err == -EINPROGRESS)) | 538 | if (unlikely(err == -EINPROGRESS)) |
539 | goto out; | 539 | goto out; |
540 | 540 | ||
541 | desc->tfm = child; | 541 | desc->tfm = child; |
542 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 542 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
543 | 543 | ||
544 | err = shash_ahash_digest(req, desc); | 544 | err = shash_ahash_digest(req, desc); |
545 | 545 | ||
546 | req->base.complete = rctx->complete; | 546 | req->base.complete = rctx->complete; |
547 | 547 | ||
548 | out: | 548 | out: |
549 | local_bh_disable(); | 549 | local_bh_disable(); |
550 | rctx->complete(&req->base, err); | 550 | rctx->complete(&req->base, err); |
551 | local_bh_enable(); | 551 | local_bh_enable(); |
552 | } | 552 | } |
553 | 553 | ||
554 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | 554 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) |
555 | { | 555 | { |
556 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | 556 | return cryptd_hash_enqueue(req, cryptd_hash_digest); |
557 | } | 557 | } |
558 | 558 | ||
559 | static int cryptd_hash_export(struct ahash_request *req, void *out) | 559 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
560 | { | 560 | { |
561 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 561 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
562 | 562 | ||
563 | return crypto_shash_export(&rctx->desc, out); | 563 | return crypto_shash_export(&rctx->desc, out); |
564 | } | 564 | } |
565 | 565 | ||
566 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | 566 | static int cryptd_hash_import(struct ahash_request *req, const void *in) |
567 | { | 567 | { |
568 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 568 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
569 | 569 | ||
570 | return crypto_shash_import(&rctx->desc, in); | 570 | return crypto_shash_import(&rctx->desc, in); |
571 | } | 571 | } |
572 | 572 | ||
573 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | 573 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
574 | struct cryptd_queue *queue) | 574 | struct cryptd_queue *queue) |
575 | { | 575 | { |
576 | struct hashd_instance_ctx *ctx; | 576 | struct hashd_instance_ctx *ctx; |
577 | struct ahash_instance *inst; | 577 | struct ahash_instance *inst; |
578 | struct shash_alg *salg; | 578 | struct shash_alg *salg; |
579 | struct crypto_alg *alg; | 579 | struct crypto_alg *alg; |
580 | int err; | 580 | int err; |
581 | 581 | ||
582 | salg = shash_attr_alg(tb[1], 0, 0); | 582 | salg = shash_attr_alg(tb[1], 0, 0); |
583 | if (IS_ERR(salg)) | 583 | if (IS_ERR(salg)) |
584 | return PTR_ERR(salg); | 584 | return PTR_ERR(salg); |
585 | 585 | ||
586 | alg = &salg->base; | 586 | alg = &salg->base; |
587 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), | 587 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), |
588 | sizeof(*ctx)); | 588 | sizeof(*ctx)); |
589 | err = PTR_ERR(inst); | 589 | err = PTR_ERR(inst); |
590 | if (IS_ERR(inst)) | 590 | if (IS_ERR(inst)) |
591 | goto out_put_alg; | 591 | goto out_put_alg; |
592 | 592 | ||
593 | ctx = ahash_instance_ctx(inst); | 593 | ctx = ahash_instance_ctx(inst); |
594 | ctx->queue = queue; | 594 | ctx->queue = queue; |
595 | 595 | ||
596 | err = crypto_init_shash_spawn(&ctx->spawn, salg, | 596 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
597 | ahash_crypto_instance(inst)); | 597 | ahash_crypto_instance(inst)); |
598 | if (err) | 598 | if (err) |
599 | goto out_free_inst; | 599 | goto out_free_inst; |
600 | 600 | ||
601 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; | 601 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; |
602 | 602 | ||
603 | inst->alg.halg.digestsize = salg->digestsize; | 603 | inst->alg.halg.digestsize = salg->digestsize; |
604 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | 604 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
605 | 605 | ||
606 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; | 606 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
607 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; | 607 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; |
608 | 608 | ||
609 | inst->alg.init = cryptd_hash_init_enqueue; | 609 | inst->alg.init = cryptd_hash_init_enqueue; |
610 | inst->alg.update = cryptd_hash_update_enqueue; | 610 | inst->alg.update = cryptd_hash_update_enqueue; |
611 | inst->alg.final = cryptd_hash_final_enqueue; | 611 | inst->alg.final = cryptd_hash_final_enqueue; |
612 | inst->alg.finup = cryptd_hash_finup_enqueue; | 612 | inst->alg.finup = cryptd_hash_finup_enqueue; |
613 | inst->alg.export = cryptd_hash_export; | 613 | inst->alg.export = cryptd_hash_export; |
614 | inst->alg.import = cryptd_hash_import; | 614 | inst->alg.import = cryptd_hash_import; |
615 | inst->alg.setkey = cryptd_hash_setkey; | 615 | inst->alg.setkey = cryptd_hash_setkey; |
616 | inst->alg.digest = cryptd_hash_digest_enqueue; | 616 | inst->alg.digest = cryptd_hash_digest_enqueue; |
617 | 617 | ||
618 | err = ahash_register_instance(tmpl, inst); | 618 | err = ahash_register_instance(tmpl, inst); |
619 | if (err) { | 619 | if (err) { |
620 | crypto_drop_shash(&ctx->spawn); | 620 | crypto_drop_shash(&ctx->spawn); |
621 | out_free_inst: | 621 | out_free_inst: |
622 | kfree(inst); | 622 | kfree(inst); |
623 | } | 623 | } |
624 | 624 | ||
625 | out_put_alg: | 625 | out_put_alg: |
626 | crypto_mod_put(alg); | 626 | crypto_mod_put(alg); |
627 | return err; | 627 | return err; |
628 | } | 628 | } |
629 | 629 | ||
630 | static void cryptd_aead_crypt(struct aead_request *req, | 630 | static void cryptd_aead_crypt(struct aead_request *req, |
631 | struct crypto_aead *child, | 631 | struct crypto_aead *child, |
632 | int err, | 632 | int err, |
633 | int (*crypt)(struct aead_request *req)) | 633 | int (*crypt)(struct aead_request *req)) |
634 | { | 634 | { |
635 | struct cryptd_aead_request_ctx *rctx; | 635 | struct cryptd_aead_request_ctx *rctx; |
636 | rctx = aead_request_ctx(req); | 636 | rctx = aead_request_ctx(req); |
637 | 637 | ||
638 | if (unlikely(err == -EINPROGRESS)) | 638 | if (unlikely(err == -EINPROGRESS)) |
639 | goto out; | 639 | goto out; |
640 | aead_request_set_tfm(req, child); | 640 | aead_request_set_tfm(req, child); |
641 | err = crypt( req ); | 641 | err = crypt( req ); |
642 | req->base.complete = rctx->complete; | 642 | req->base.complete = rctx->complete; |
643 | out: | 643 | out: |
644 | local_bh_disable(); | 644 | local_bh_disable(); |
645 | rctx->complete(&req->base, err); | 645 | rctx->complete(&req->base, err); |
646 | local_bh_enable(); | 646 | local_bh_enable(); |
647 | } | 647 | } |
648 | 648 | ||
649 | static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) | 649 | static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) |
650 | { | 650 | { |
651 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | 651 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); |
652 | struct crypto_aead *child = ctx->child; | 652 | struct crypto_aead *child = ctx->child; |
653 | struct aead_request *req; | 653 | struct aead_request *req; |
654 | 654 | ||
655 | req = container_of(areq, struct aead_request, base); | 655 | req = container_of(areq, struct aead_request, base); |
656 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); | 656 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); |
657 | } | 657 | } |
658 | 658 | ||
659 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | 659 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) |
660 | { | 660 | { |
661 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | 661 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); |
662 | struct crypto_aead *child = ctx->child; | 662 | struct crypto_aead *child = ctx->child; |
663 | struct aead_request *req; | 663 | struct aead_request *req; |
664 | 664 | ||
665 | req = container_of(areq, struct aead_request, base); | 665 | req = container_of(areq, struct aead_request, base); |
666 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); | 666 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); |
667 | } | 667 | } |
668 | 668 | ||
669 | static int cryptd_aead_enqueue(struct aead_request *req, | 669 | static int cryptd_aead_enqueue(struct aead_request *req, |
670 | crypto_completion_t compl) | 670 | crypto_completion_t compl) |
671 | { | 671 | { |
672 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); | 672 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); |
673 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 673 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
674 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); | 674 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); |
675 | 675 | ||
676 | rctx->complete = req->base.complete; | 676 | rctx->complete = req->base.complete; |
677 | req->base.complete = compl; | 677 | req->base.complete = compl; |
678 | return cryptd_enqueue_request(queue, &req->base); | 678 | return cryptd_enqueue_request(queue, &req->base); |
679 | } | 679 | } |
680 | 680 | ||
681 | static int cryptd_aead_encrypt_enqueue(struct aead_request *req) | 681 | static int cryptd_aead_encrypt_enqueue(struct aead_request *req) |
682 | { | 682 | { |
683 | return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); | 683 | return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); |
684 | } | 684 | } |
685 | 685 | ||
686 | static int cryptd_aead_decrypt_enqueue(struct aead_request *req) | 686 | static int cryptd_aead_decrypt_enqueue(struct aead_request *req) |
687 | { | 687 | { |
688 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); | 688 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); |
689 | } | 689 | } |
690 | 690 | ||
691 | static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) | 691 | static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) |
692 | { | 692 | { |
693 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 693 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
694 | struct aead_instance_ctx *ictx = crypto_instance_ctx(inst); | 694 | struct aead_instance_ctx *ictx = crypto_instance_ctx(inst); |
695 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; | 695 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; |
696 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 696 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
697 | struct crypto_aead *cipher; | 697 | struct crypto_aead *cipher; |
698 | 698 | ||
699 | cipher = crypto_spawn_aead(spawn); | 699 | cipher = crypto_spawn_aead(spawn); |
700 | if (IS_ERR(cipher)) | 700 | if (IS_ERR(cipher)) |
701 | return PTR_ERR(cipher); | 701 | return PTR_ERR(cipher); |
702 | 702 | ||
703 | crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); | 703 | crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); |
704 | ctx->child = cipher; | 704 | ctx->child = cipher; |
705 | tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); | 705 | tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); |
706 | return 0; | 706 | return 0; |
707 | } | 707 | } |
708 | 708 | ||
709 | static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm) | 709 | static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm) |
710 | { | 710 | { |
711 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 711 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
712 | crypto_free_aead(ctx->child); | 712 | crypto_free_aead(ctx->child); |
713 | } | 713 | } |
714 | 714 | ||
715 | static int cryptd_create_aead(struct crypto_template *tmpl, | 715 | static int cryptd_create_aead(struct crypto_template *tmpl, |
716 | struct rtattr **tb, | 716 | struct rtattr **tb, |
717 | struct cryptd_queue *queue) | 717 | struct cryptd_queue *queue) |
718 | { | 718 | { |
719 | struct aead_instance_ctx *ctx; | 719 | struct aead_instance_ctx *ctx; |
720 | struct crypto_instance *inst; | 720 | struct crypto_instance *inst; |
721 | struct crypto_alg *alg; | 721 | struct crypto_alg *alg; |
722 | int err; | 722 | int err; |
723 | 723 | ||
724 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD, | 724 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD, |
725 | CRYPTO_ALG_TYPE_MASK); | 725 | CRYPTO_ALG_TYPE_MASK); |
726 | if (IS_ERR(alg)) | 726 | if (IS_ERR(alg)) |
727 | return PTR_ERR(alg); | 727 | return PTR_ERR(alg); |
728 | 728 | ||
729 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); | 729 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
730 | err = PTR_ERR(inst); | 730 | err = PTR_ERR(inst); |
731 | if (IS_ERR(inst)) | 731 | if (IS_ERR(inst)) |
732 | goto out_put_alg; | 732 | goto out_put_alg; |
733 | 733 | ||
734 | ctx = crypto_instance_ctx(inst); | 734 | ctx = crypto_instance_ctx(inst); |
735 | ctx->queue = queue; | 735 | ctx->queue = queue; |
736 | 736 | ||
737 | err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, | 737 | err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, |
738 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 738 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
739 | if (err) | 739 | if (err) |
740 | goto out_free_inst; | 740 | goto out_free_inst; |
741 | 741 | ||
742 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | 742 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; |
743 | inst->alg.cra_type = alg->cra_type; | 743 | inst->alg.cra_type = alg->cra_type; |
744 | inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); | 744 | inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); |
745 | inst->alg.cra_init = cryptd_aead_init_tfm; | 745 | inst->alg.cra_init = cryptd_aead_init_tfm; |
746 | inst->alg.cra_exit = cryptd_aead_exit_tfm; | 746 | inst->alg.cra_exit = cryptd_aead_exit_tfm; |
747 | inst->alg.cra_aead.setkey = alg->cra_aead.setkey; | 747 | inst->alg.cra_aead.setkey = alg->cra_aead.setkey; |
748 | inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; | 748 | inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; |
749 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | 749 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; |
750 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | 750 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; |
751 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | 751 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; |
752 | inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue; | 752 | inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue; |
753 | inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue; | 753 | inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue; |
754 | inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt; | 754 | inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt; |
755 | inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt; | 755 | inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt; |
756 | 756 | ||
757 | err = crypto_register_instance(tmpl, inst); | 757 | err = crypto_register_instance(tmpl, inst); |
758 | if (err) { | 758 | if (err) { |
759 | crypto_drop_spawn(&ctx->aead_spawn.base); | 759 | crypto_drop_spawn(&ctx->aead_spawn.base); |
760 | out_free_inst: | 760 | out_free_inst: |
761 | kfree(inst); | 761 | kfree(inst); |
762 | } | 762 | } |
763 | out_put_alg: | 763 | out_put_alg: |
764 | crypto_mod_put(alg); | 764 | crypto_mod_put(alg); |
765 | return err; | 765 | return err; |
766 | } | 766 | } |
767 | 767 | ||
768 | static struct cryptd_queue queue; | 768 | static struct cryptd_queue queue; |
769 | 769 | ||
770 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | 770 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
771 | { | 771 | { |
772 | struct crypto_attr_type *algt; | 772 | struct crypto_attr_type *algt; |
773 | 773 | ||
774 | algt = crypto_get_attr_type(tb); | 774 | algt = crypto_get_attr_type(tb); |
775 | if (IS_ERR(algt)) | 775 | if (IS_ERR(algt)) |
776 | return PTR_ERR(algt); | 776 | return PTR_ERR(algt); |
777 | 777 | ||
778 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 778 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
779 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 779 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
780 | return cryptd_create_blkcipher(tmpl, tb, &queue); | 780 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
781 | case CRYPTO_ALG_TYPE_DIGEST: | 781 | case CRYPTO_ALG_TYPE_DIGEST: |
782 | return cryptd_create_hash(tmpl, tb, &queue); | 782 | return cryptd_create_hash(tmpl, tb, &queue); |
783 | case CRYPTO_ALG_TYPE_AEAD: | 783 | case CRYPTO_ALG_TYPE_AEAD: |
784 | return cryptd_create_aead(tmpl, tb, &queue); | 784 | return cryptd_create_aead(tmpl, tb, &queue); |
785 | } | 785 | } |
786 | 786 | ||
787 | return -EINVAL; | 787 | return -EINVAL; |
788 | } | 788 | } |
789 | 789 | ||
790 | static void cryptd_free(struct crypto_instance *inst) | 790 | static void cryptd_free(struct crypto_instance *inst) |
791 | { | 791 | { |
792 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 792 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
793 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | 793 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
794 | struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); | 794 | struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); |
795 | 795 | ||
796 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | 796 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
797 | case CRYPTO_ALG_TYPE_AHASH: | 797 | case CRYPTO_ALG_TYPE_AHASH: |
798 | crypto_drop_shash(&hctx->spawn); | 798 | crypto_drop_shash(&hctx->spawn); |
799 | kfree(ahash_instance(inst)); | 799 | kfree(ahash_instance(inst)); |
800 | return; | 800 | return; |
801 | case CRYPTO_ALG_TYPE_AEAD: | 801 | case CRYPTO_ALG_TYPE_AEAD: |
802 | crypto_drop_spawn(&aead_ctx->aead_spawn.base); | 802 | crypto_drop_spawn(&aead_ctx->aead_spawn.base); |
803 | kfree(inst); | 803 | kfree(inst); |
804 | return; | 804 | return; |
805 | default: | 805 | default: |
806 | crypto_drop_spawn(&ctx->spawn); | 806 | crypto_drop_spawn(&ctx->spawn); |
807 | kfree(inst); | 807 | kfree(inst); |
808 | } | 808 | } |
809 | } | 809 | } |
810 | 810 | ||
811 | static struct crypto_template cryptd_tmpl = { | 811 | static struct crypto_template cryptd_tmpl = { |
812 | .name = "cryptd", | 812 | .name = "cryptd", |
813 | .create = cryptd_create, | 813 | .create = cryptd_create, |
814 | .free = cryptd_free, | 814 | .free = cryptd_free, |
815 | .module = THIS_MODULE, | 815 | .module = THIS_MODULE, |
816 | }; | 816 | }; |
817 | 817 | ||
818 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | 818 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
819 | u32 type, u32 mask) | 819 | u32 type, u32 mask) |
820 | { | 820 | { |
821 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 821 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
822 | struct crypto_tfm *tfm; | 822 | struct crypto_tfm *tfm; |
823 | 823 | ||
824 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 824 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
825 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 825 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
826 | return ERR_PTR(-EINVAL); | 826 | return ERR_PTR(-EINVAL); |
827 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | 827 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
828 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; | 828 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; |
829 | mask &= ~CRYPTO_ALG_TYPE_MASK; | 829 | mask &= ~CRYPTO_ALG_TYPE_MASK; |
830 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | 830 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); |
831 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | 831 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); |
832 | if (IS_ERR(tfm)) | 832 | if (IS_ERR(tfm)) |
833 | return ERR_CAST(tfm); | 833 | return ERR_CAST(tfm); |
834 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { | 834 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
835 | crypto_free_tfm(tfm); | 835 | crypto_free_tfm(tfm); |
836 | return ERR_PTR(-EINVAL); | 836 | return ERR_PTR(-EINVAL); |
837 | } | 837 | } |
838 | 838 | ||
839 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); | 839 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
840 | } | 840 | } |
841 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | 841 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); |
842 | 842 | ||
843 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | 843 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) |
844 | { | 844 | { |
845 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | 845 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
846 | return ctx->child; | 846 | return ctx->child; |
847 | } | 847 | } |
848 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | 848 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); |
849 | 849 | ||
850 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | 850 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
851 | { | 851 | { |
852 | crypto_free_ablkcipher(&tfm->base); | 852 | crypto_free_ablkcipher(&tfm->base); |
853 | } | 853 | } |
854 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 854 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
855 | 855 | ||
856 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | 856 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, |
857 | u32 type, u32 mask) | 857 | u32 type, u32 mask) |
858 | { | 858 | { |
859 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 859 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
860 | struct crypto_ahash *tfm; | 860 | struct crypto_ahash *tfm; |
861 | 861 | ||
862 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 862 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
863 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 863 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
864 | return ERR_PTR(-EINVAL); | 864 | return ERR_PTR(-EINVAL); |
865 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); | 865 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); |
866 | if (IS_ERR(tfm)) | 866 | if (IS_ERR(tfm)) |
867 | return ERR_CAST(tfm); | 867 | return ERR_CAST(tfm); |
868 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | 868 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { |
869 | crypto_free_ahash(tfm); | 869 | crypto_free_ahash(tfm); |
870 | return ERR_PTR(-EINVAL); | 870 | return ERR_PTR(-EINVAL); |
871 | } | 871 | } |
872 | 872 | ||
873 | return __cryptd_ahash_cast(tfm); | 873 | return __cryptd_ahash_cast(tfm); |
874 | } | 874 | } |
875 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); | 875 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); |
876 | 876 | ||
877 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | 877 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) |
878 | { | 878 | { |
879 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | 879 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
880 | 880 | ||
881 | return ctx->child; | 881 | return ctx->child; |
882 | } | 882 | } |
883 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | 883 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); |
884 | 884 | ||
885 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) | 885 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) |
886 | { | 886 | { |
887 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 887 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
888 | return &rctx->desc; | 888 | return &rctx->desc; |
889 | } | 889 | } |
890 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); | 890 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); |
891 | 891 | ||
892 | void cryptd_free_ahash(struct cryptd_ahash *tfm) | 892 | void cryptd_free_ahash(struct cryptd_ahash *tfm) |
893 | { | 893 | { |
894 | crypto_free_ahash(&tfm->base); | 894 | crypto_free_ahash(&tfm->base); |
895 | } | 895 | } |
896 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | 896 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); |
897 | 897 | ||
898 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, | 898 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, |
899 | u32 type, u32 mask) | 899 | u32 type, u32 mask) |
900 | { | 900 | { |
901 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 901 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
902 | struct crypto_aead *tfm; | 902 | struct crypto_aead *tfm; |
903 | 903 | ||
904 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 904 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
905 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 905 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
906 | return ERR_PTR(-EINVAL); | 906 | return ERR_PTR(-EINVAL); |
907 | tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); | 907 | tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); |
908 | if (IS_ERR(tfm)) | 908 | if (IS_ERR(tfm)) |
909 | return ERR_CAST(tfm); | 909 | return ERR_CAST(tfm); |
910 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | 910 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { |
911 | crypto_free_aead(tfm); | 911 | crypto_free_aead(tfm); |
912 | return ERR_PTR(-EINVAL); | 912 | return ERR_PTR(-EINVAL); |
913 | } | 913 | } |
914 | return __cryptd_aead_cast(tfm); | 914 | return __cryptd_aead_cast(tfm); |
915 | } | 915 | } |
916 | EXPORT_SYMBOL_GPL(cryptd_alloc_aead); | 916 | EXPORT_SYMBOL_GPL(cryptd_alloc_aead); |
917 | 917 | ||
918 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) | 918 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) |
919 | { | 919 | { |
920 | struct cryptd_aead_ctx *ctx; | 920 | struct cryptd_aead_ctx *ctx; |
921 | ctx = crypto_aead_ctx(&tfm->base); | 921 | ctx = crypto_aead_ctx(&tfm->base); |
922 | return ctx->child; | 922 | return ctx->child; |
923 | } | 923 | } |
924 | EXPORT_SYMBOL_GPL(cryptd_aead_child); | 924 | EXPORT_SYMBOL_GPL(cryptd_aead_child); |
925 | 925 | ||
926 | void cryptd_free_aead(struct cryptd_aead *tfm) | 926 | void cryptd_free_aead(struct cryptd_aead *tfm) |
927 | { | 927 | { |
928 | crypto_free_aead(&tfm->base); | 928 | crypto_free_aead(&tfm->base); |
929 | } | 929 | } |
930 | EXPORT_SYMBOL_GPL(cryptd_free_aead); | 930 | EXPORT_SYMBOL_GPL(cryptd_free_aead); |
931 | 931 | ||
932 | static int __init cryptd_init(void) | 932 | static int __init cryptd_init(void) |
933 | { | 933 | { |
934 | int err; | 934 | int err; |
935 | 935 | ||
936 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); | 936 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
937 | if (err) | 937 | if (err) |
938 | return err; | 938 | return err; |
939 | 939 | ||
940 | err = crypto_register_template(&cryptd_tmpl); | 940 | err = crypto_register_template(&cryptd_tmpl); |
941 | if (err) | 941 | if (err) |
942 | cryptd_fini_queue(&queue); | 942 | cryptd_fini_queue(&queue); |
943 | 943 | ||
944 | return err; | 944 | return err; |
945 | } | 945 | } |
946 | 946 | ||
947 | static void __exit cryptd_exit(void) | 947 | static void __exit cryptd_exit(void) |
948 | { | 948 | { |
949 | cryptd_fini_queue(&queue); | 949 | cryptd_fini_queue(&queue); |
950 | crypto_unregister_template(&cryptd_tmpl); | 950 | crypto_unregister_template(&cryptd_tmpl); |
951 | } | 951 | } |
952 | 952 | ||
953 | subsys_initcall(cryptd_init); | 953 | subsys_initcall(cryptd_init); |
954 | module_exit(cryptd_exit); | 954 | module_exit(cryptd_exit); |
955 | 955 | ||
956 | MODULE_LICENSE("GPL"); | 956 | MODULE_LICENSE("GPL"); |
957 | MODULE_DESCRIPTION("Software async crypto daemon"); | 957 | MODULE_DESCRIPTION("Software async crypto daemon"); |
958 | MODULE_ALIAS_CRYPTO("cryptd"); | ||
958 | 959 |
crypto/ctr.c
1 | /* | 1 | /* |
2 | * CTR: Counter mode | 2 | * CTR: Counter mode |
3 | * | 3 | * |
4 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> | 4 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/algapi.h> | 13 | #include <crypto/algapi.h> |
14 | #include <crypto/ctr.h> | 14 | #include <crypto/ctr.h> |
15 | #include <crypto/internal/skcipher.h> | 15 | #include <crypto/internal/skcipher.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/random.h> | 20 | #include <linux/random.h> |
21 | #include <linux/scatterlist.h> | 21 | #include <linux/scatterlist.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | 23 | ||
24 | struct crypto_ctr_ctx { | 24 | struct crypto_ctr_ctx { |
25 | struct crypto_cipher *child; | 25 | struct crypto_cipher *child; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | struct crypto_rfc3686_ctx { | 28 | struct crypto_rfc3686_ctx { |
29 | struct crypto_ablkcipher *child; | 29 | struct crypto_ablkcipher *child; |
30 | u8 nonce[CTR_RFC3686_NONCE_SIZE]; | 30 | u8 nonce[CTR_RFC3686_NONCE_SIZE]; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct crypto_rfc3686_req_ctx { | 33 | struct crypto_rfc3686_req_ctx { |
34 | u8 iv[CTR_RFC3686_BLOCK_SIZE]; | 34 | u8 iv[CTR_RFC3686_BLOCK_SIZE]; |
35 | struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR; | 35 | struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, | 38 | static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, |
39 | unsigned int keylen) | 39 | unsigned int keylen) |
40 | { | 40 | { |
41 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent); | 41 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent); |
42 | struct crypto_cipher *child = ctx->child; | 42 | struct crypto_cipher *child = ctx->child; |
43 | int err; | 43 | int err; |
44 | 44 | ||
45 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 45 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
46 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 46 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & |
47 | CRYPTO_TFM_REQ_MASK); | 47 | CRYPTO_TFM_REQ_MASK); |
48 | err = crypto_cipher_setkey(child, key, keylen); | 48 | err = crypto_cipher_setkey(child, key, keylen); |
49 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 49 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & |
50 | CRYPTO_TFM_RES_MASK); | 50 | CRYPTO_TFM_RES_MASK); |
51 | 51 | ||
52 | return err; | 52 | return err; |
53 | } | 53 | } |
54 | 54 | ||
55 | static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, | 55 | static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, |
56 | struct crypto_cipher *tfm) | 56 | struct crypto_cipher *tfm) |
57 | { | 57 | { |
58 | unsigned int bsize = crypto_cipher_blocksize(tfm); | 58 | unsigned int bsize = crypto_cipher_blocksize(tfm); |
59 | unsigned long alignmask = crypto_cipher_alignmask(tfm); | 59 | unsigned long alignmask = crypto_cipher_alignmask(tfm); |
60 | u8 *ctrblk = walk->iv; | 60 | u8 *ctrblk = walk->iv; |
61 | u8 tmp[bsize + alignmask]; | 61 | u8 tmp[bsize + alignmask]; |
62 | u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); | 62 | u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); |
63 | u8 *src = walk->src.virt.addr; | 63 | u8 *src = walk->src.virt.addr; |
64 | u8 *dst = walk->dst.virt.addr; | 64 | u8 *dst = walk->dst.virt.addr; |
65 | unsigned int nbytes = walk->nbytes; | 65 | unsigned int nbytes = walk->nbytes; |
66 | 66 | ||
67 | crypto_cipher_encrypt_one(tfm, keystream, ctrblk); | 67 | crypto_cipher_encrypt_one(tfm, keystream, ctrblk); |
68 | crypto_xor(keystream, src, nbytes); | 68 | crypto_xor(keystream, src, nbytes); |
69 | memcpy(dst, keystream, nbytes); | 69 | memcpy(dst, keystream, nbytes); |
70 | 70 | ||
71 | crypto_inc(ctrblk, bsize); | 71 | crypto_inc(ctrblk, bsize); |
72 | } | 72 | } |
73 | 73 | ||
74 | static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, | 74 | static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, |
75 | struct crypto_cipher *tfm) | 75 | struct crypto_cipher *tfm) |
76 | { | 76 | { |
77 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 77 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
78 | crypto_cipher_alg(tfm)->cia_encrypt; | 78 | crypto_cipher_alg(tfm)->cia_encrypt; |
79 | unsigned int bsize = crypto_cipher_blocksize(tfm); | 79 | unsigned int bsize = crypto_cipher_blocksize(tfm); |
80 | u8 *ctrblk = walk->iv; | 80 | u8 *ctrblk = walk->iv; |
81 | u8 *src = walk->src.virt.addr; | 81 | u8 *src = walk->src.virt.addr; |
82 | u8 *dst = walk->dst.virt.addr; | 82 | u8 *dst = walk->dst.virt.addr; |
83 | unsigned int nbytes = walk->nbytes; | 83 | unsigned int nbytes = walk->nbytes; |
84 | 84 | ||
85 | do { | 85 | do { |
86 | /* create keystream */ | 86 | /* create keystream */ |
87 | fn(crypto_cipher_tfm(tfm), dst, ctrblk); | 87 | fn(crypto_cipher_tfm(tfm), dst, ctrblk); |
88 | crypto_xor(dst, src, bsize); | 88 | crypto_xor(dst, src, bsize); |
89 | 89 | ||
90 | /* increment counter in counterblock */ | 90 | /* increment counter in counterblock */ |
91 | crypto_inc(ctrblk, bsize); | 91 | crypto_inc(ctrblk, bsize); |
92 | 92 | ||
93 | src += bsize; | 93 | src += bsize; |
94 | dst += bsize; | 94 | dst += bsize; |
95 | } while ((nbytes -= bsize) >= bsize); | 95 | } while ((nbytes -= bsize) >= bsize); |
96 | 96 | ||
97 | return nbytes; | 97 | return nbytes; |
98 | } | 98 | } |
99 | 99 | ||
100 | static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, | 100 | static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, |
101 | struct crypto_cipher *tfm) | 101 | struct crypto_cipher *tfm) |
102 | { | 102 | { |
103 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 103 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
104 | crypto_cipher_alg(tfm)->cia_encrypt; | 104 | crypto_cipher_alg(tfm)->cia_encrypt; |
105 | unsigned int bsize = crypto_cipher_blocksize(tfm); | 105 | unsigned int bsize = crypto_cipher_blocksize(tfm); |
106 | unsigned long alignmask = crypto_cipher_alignmask(tfm); | 106 | unsigned long alignmask = crypto_cipher_alignmask(tfm); |
107 | unsigned int nbytes = walk->nbytes; | 107 | unsigned int nbytes = walk->nbytes; |
108 | u8 *ctrblk = walk->iv; | 108 | u8 *ctrblk = walk->iv; |
109 | u8 *src = walk->src.virt.addr; | 109 | u8 *src = walk->src.virt.addr; |
110 | u8 tmp[bsize + alignmask]; | 110 | u8 tmp[bsize + alignmask]; |
111 | u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); | 111 | u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); |
112 | 112 | ||
113 | do { | 113 | do { |
114 | /* create keystream */ | 114 | /* create keystream */ |
115 | fn(crypto_cipher_tfm(tfm), keystream, ctrblk); | 115 | fn(crypto_cipher_tfm(tfm), keystream, ctrblk); |
116 | crypto_xor(src, keystream, bsize); | 116 | crypto_xor(src, keystream, bsize); |
117 | 117 | ||
118 | /* increment counter in counterblock */ | 118 | /* increment counter in counterblock */ |
119 | crypto_inc(ctrblk, bsize); | 119 | crypto_inc(ctrblk, bsize); |
120 | 120 | ||
121 | src += bsize; | 121 | src += bsize; |
122 | } while ((nbytes -= bsize) >= bsize); | 122 | } while ((nbytes -= bsize) >= bsize); |
123 | 123 | ||
124 | return nbytes; | 124 | return nbytes; |
125 | } | 125 | } |
126 | 126 | ||
127 | static int crypto_ctr_crypt(struct blkcipher_desc *desc, | 127 | static int crypto_ctr_crypt(struct blkcipher_desc *desc, |
128 | struct scatterlist *dst, struct scatterlist *src, | 128 | struct scatterlist *dst, struct scatterlist *src, |
129 | unsigned int nbytes) | 129 | unsigned int nbytes) |
130 | { | 130 | { |
131 | struct blkcipher_walk walk; | 131 | struct blkcipher_walk walk; |
132 | struct crypto_blkcipher *tfm = desc->tfm; | 132 | struct crypto_blkcipher *tfm = desc->tfm; |
133 | struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); | 133 | struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); |
134 | struct crypto_cipher *child = ctx->child; | 134 | struct crypto_cipher *child = ctx->child; |
135 | unsigned int bsize = crypto_cipher_blocksize(child); | 135 | unsigned int bsize = crypto_cipher_blocksize(child); |
136 | int err; | 136 | int err; |
137 | 137 | ||
138 | blkcipher_walk_init(&walk, dst, src, nbytes); | 138 | blkcipher_walk_init(&walk, dst, src, nbytes); |
139 | err = blkcipher_walk_virt_block(desc, &walk, bsize); | 139 | err = blkcipher_walk_virt_block(desc, &walk, bsize); |
140 | 140 | ||
141 | while (walk.nbytes >= bsize) { | 141 | while (walk.nbytes >= bsize) { |
142 | if (walk.src.virt.addr == walk.dst.virt.addr) | 142 | if (walk.src.virt.addr == walk.dst.virt.addr) |
143 | nbytes = crypto_ctr_crypt_inplace(&walk, child); | 143 | nbytes = crypto_ctr_crypt_inplace(&walk, child); |
144 | else | 144 | else |
145 | nbytes = crypto_ctr_crypt_segment(&walk, child); | 145 | nbytes = crypto_ctr_crypt_segment(&walk, child); |
146 | 146 | ||
147 | err = blkcipher_walk_done(desc, &walk, nbytes); | 147 | err = blkcipher_walk_done(desc, &walk, nbytes); |
148 | } | 148 | } |
149 | 149 | ||
150 | if (walk.nbytes) { | 150 | if (walk.nbytes) { |
151 | crypto_ctr_crypt_final(&walk, child); | 151 | crypto_ctr_crypt_final(&walk, child); |
152 | err = blkcipher_walk_done(desc, &walk, 0); | 152 | err = blkcipher_walk_done(desc, &walk, 0); |
153 | } | 153 | } |
154 | 154 | ||
155 | return err; | 155 | return err; |
156 | } | 156 | } |
157 | 157 | ||
158 | static int crypto_ctr_init_tfm(struct crypto_tfm *tfm) | 158 | static int crypto_ctr_init_tfm(struct crypto_tfm *tfm) |
159 | { | 159 | { |
160 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 160 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
161 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 161 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
162 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 162 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
163 | struct crypto_cipher *cipher; | 163 | struct crypto_cipher *cipher; |
164 | 164 | ||
165 | cipher = crypto_spawn_cipher(spawn); | 165 | cipher = crypto_spawn_cipher(spawn); |
166 | if (IS_ERR(cipher)) | 166 | if (IS_ERR(cipher)) |
167 | return PTR_ERR(cipher); | 167 | return PTR_ERR(cipher); |
168 | 168 | ||
169 | ctx->child = cipher; | 169 | ctx->child = cipher; |
170 | 170 | ||
171 | return 0; | 171 | return 0; |
172 | } | 172 | } |
173 | 173 | ||
174 | static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) | 174 | static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) |
175 | { | 175 | { |
176 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 176 | struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
177 | 177 | ||
178 | crypto_free_cipher(ctx->child); | 178 | crypto_free_cipher(ctx->child); |
179 | } | 179 | } |
180 | 180 | ||
181 | static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) | 181 | static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) |
182 | { | 182 | { |
183 | struct crypto_instance *inst; | 183 | struct crypto_instance *inst; |
184 | struct crypto_alg *alg; | 184 | struct crypto_alg *alg; |
185 | int err; | 185 | int err; |
186 | 186 | ||
187 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 187 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
188 | if (err) | 188 | if (err) |
189 | return ERR_PTR(err); | 189 | return ERR_PTR(err); |
190 | 190 | ||
191 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, | 191 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, |
192 | CRYPTO_ALG_TYPE_MASK); | 192 | CRYPTO_ALG_TYPE_MASK); |
193 | if (IS_ERR(alg)) | 193 | if (IS_ERR(alg)) |
194 | return ERR_CAST(alg); | 194 | return ERR_CAST(alg); |
195 | 195 | ||
196 | /* Block size must be >= 4 bytes. */ | 196 | /* Block size must be >= 4 bytes. */ |
197 | err = -EINVAL; | 197 | err = -EINVAL; |
198 | if (alg->cra_blocksize < 4) | 198 | if (alg->cra_blocksize < 4) |
199 | goto out_put_alg; | 199 | goto out_put_alg; |
200 | 200 | ||
201 | /* If this is false we'd fail the alignment of crypto_inc. */ | 201 | /* If this is false we'd fail the alignment of crypto_inc. */ |
202 | if (alg->cra_blocksize % 4) | 202 | if (alg->cra_blocksize % 4) |
203 | goto out_put_alg; | 203 | goto out_put_alg; |
204 | 204 | ||
205 | inst = crypto_alloc_instance("ctr", alg); | 205 | inst = crypto_alloc_instance("ctr", alg); |
206 | if (IS_ERR(inst)) | 206 | if (IS_ERR(inst)) |
207 | goto out; | 207 | goto out; |
208 | 208 | ||
209 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 209 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; |
210 | inst->alg.cra_priority = alg->cra_priority; | 210 | inst->alg.cra_priority = alg->cra_priority; |
211 | inst->alg.cra_blocksize = 1; | 211 | inst->alg.cra_blocksize = 1; |
212 | inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1); | 212 | inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1); |
213 | inst->alg.cra_type = &crypto_blkcipher_type; | 213 | inst->alg.cra_type = &crypto_blkcipher_type; |
214 | 214 | ||
215 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 215 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; |
216 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; | 216 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; |
217 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; | 217 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; |
218 | 218 | ||
219 | inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx); | 219 | inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx); |
220 | 220 | ||
221 | inst->alg.cra_init = crypto_ctr_init_tfm; | 221 | inst->alg.cra_init = crypto_ctr_init_tfm; |
222 | inst->alg.cra_exit = crypto_ctr_exit_tfm; | 222 | inst->alg.cra_exit = crypto_ctr_exit_tfm; |
223 | 223 | ||
224 | inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey; | 224 | inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey; |
225 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; | 225 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; |
226 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; | 226 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; |
227 | 227 | ||
228 | inst->alg.cra_blkcipher.geniv = "chainiv"; | 228 | inst->alg.cra_blkcipher.geniv = "chainiv"; |
229 | 229 | ||
230 | out: | 230 | out: |
231 | crypto_mod_put(alg); | 231 | crypto_mod_put(alg); |
232 | return inst; | 232 | return inst; |
233 | 233 | ||
234 | out_put_alg: | 234 | out_put_alg: |
235 | inst = ERR_PTR(err); | 235 | inst = ERR_PTR(err); |
236 | goto out; | 236 | goto out; |
237 | } | 237 | } |
238 | 238 | ||
239 | static void crypto_ctr_free(struct crypto_instance *inst) | 239 | static void crypto_ctr_free(struct crypto_instance *inst) |
240 | { | 240 | { |
241 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 241 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
242 | kfree(inst); | 242 | kfree(inst); |
243 | } | 243 | } |
244 | 244 | ||
245 | static struct crypto_template crypto_ctr_tmpl = { | 245 | static struct crypto_template crypto_ctr_tmpl = { |
246 | .name = "ctr", | 246 | .name = "ctr", |
247 | .alloc = crypto_ctr_alloc, | 247 | .alloc = crypto_ctr_alloc, |
248 | .free = crypto_ctr_free, | 248 | .free = crypto_ctr_free, |
249 | .module = THIS_MODULE, | 249 | .module = THIS_MODULE, |
250 | }; | 250 | }; |
251 | 251 | ||
252 | static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent, | 252 | static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent, |
253 | const u8 *key, unsigned int keylen) | 253 | const u8 *key, unsigned int keylen) |
254 | { | 254 | { |
255 | struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent); | 255 | struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent); |
256 | struct crypto_ablkcipher *child = ctx->child; | 256 | struct crypto_ablkcipher *child = ctx->child; |
257 | int err; | 257 | int err; |
258 | 258 | ||
259 | /* the nonce is stored in bytes at end of key */ | 259 | /* the nonce is stored in bytes at end of key */ |
260 | if (keylen < CTR_RFC3686_NONCE_SIZE) | 260 | if (keylen < CTR_RFC3686_NONCE_SIZE) |
261 | return -EINVAL; | 261 | return -EINVAL; |
262 | 262 | ||
263 | memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), | 263 | memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), |
264 | CTR_RFC3686_NONCE_SIZE); | 264 | CTR_RFC3686_NONCE_SIZE); |
265 | 265 | ||
266 | keylen -= CTR_RFC3686_NONCE_SIZE; | 266 | keylen -= CTR_RFC3686_NONCE_SIZE; |
267 | 267 | ||
268 | crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 268 | crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
269 | crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | 269 | crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & |
270 | CRYPTO_TFM_REQ_MASK); | 270 | CRYPTO_TFM_REQ_MASK); |
271 | err = crypto_ablkcipher_setkey(child, key, keylen); | 271 | err = crypto_ablkcipher_setkey(child, key, keylen); |
272 | crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) & | 272 | crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) & |
273 | CRYPTO_TFM_RES_MASK); | 273 | CRYPTO_TFM_RES_MASK); |
274 | 274 | ||
275 | return err; | 275 | return err; |
276 | } | 276 | } |
277 | 277 | ||
278 | static int crypto_rfc3686_crypt(struct ablkcipher_request *req) | 278 | static int crypto_rfc3686_crypt(struct ablkcipher_request *req) |
279 | { | 279 | { |
280 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 280 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
281 | struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 281 | struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
282 | struct crypto_ablkcipher *child = ctx->child; | 282 | struct crypto_ablkcipher *child = ctx->child; |
283 | unsigned long align = crypto_ablkcipher_alignmask(tfm); | 283 | unsigned long align = crypto_ablkcipher_alignmask(tfm); |
284 | struct crypto_rfc3686_req_ctx *rctx = | 284 | struct crypto_rfc3686_req_ctx *rctx = |
285 | (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); | 285 | (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); |
286 | struct ablkcipher_request *subreq = &rctx->subreq; | 286 | struct ablkcipher_request *subreq = &rctx->subreq; |
287 | u8 *iv = rctx->iv; | 287 | u8 *iv = rctx->iv; |
288 | 288 | ||
289 | /* set up counter block */ | 289 | /* set up counter block */ |
290 | memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); | 290 | memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); |
291 | memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); | 291 | memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); |
292 | 292 | ||
293 | /* initialize counter portion of counter block */ | 293 | /* initialize counter portion of counter block */ |
294 | *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = | 294 | *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = |
295 | cpu_to_be32(1); | 295 | cpu_to_be32(1); |
296 | 296 | ||
297 | ablkcipher_request_set_tfm(subreq, child); | 297 | ablkcipher_request_set_tfm(subreq, child); |
298 | ablkcipher_request_set_callback(subreq, req->base.flags, | 298 | ablkcipher_request_set_callback(subreq, req->base.flags, |
299 | req->base.complete, req->base.data); | 299 | req->base.complete, req->base.data); |
300 | ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, | 300 | ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, |
301 | iv); | 301 | iv); |
302 | 302 | ||
303 | return crypto_ablkcipher_encrypt(subreq); | 303 | return crypto_ablkcipher_encrypt(subreq); |
304 | } | 304 | } |
305 | 305 | ||
306 | static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) | 306 | static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) |
307 | { | 307 | { |
308 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 308 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
309 | struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); | 309 | struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); |
310 | struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); | 310 | struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); |
311 | struct crypto_ablkcipher *cipher; | 311 | struct crypto_ablkcipher *cipher; |
312 | unsigned long align; | 312 | unsigned long align; |
313 | 313 | ||
314 | cipher = crypto_spawn_skcipher(spawn); | 314 | cipher = crypto_spawn_skcipher(spawn); |
315 | if (IS_ERR(cipher)) | 315 | if (IS_ERR(cipher)) |
316 | return PTR_ERR(cipher); | 316 | return PTR_ERR(cipher); |
317 | 317 | ||
318 | ctx->child = cipher; | 318 | ctx->child = cipher; |
319 | 319 | ||
320 | align = crypto_tfm_alg_alignmask(tfm); | 320 | align = crypto_tfm_alg_alignmask(tfm); |
321 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 321 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
322 | tfm->crt_ablkcipher.reqsize = align + | 322 | tfm->crt_ablkcipher.reqsize = align + |
323 | sizeof(struct crypto_rfc3686_req_ctx) + | 323 | sizeof(struct crypto_rfc3686_req_ctx) + |
324 | crypto_ablkcipher_reqsize(cipher); | 324 | crypto_ablkcipher_reqsize(cipher); |
325 | 325 | ||
326 | return 0; | 326 | return 0; |
327 | } | 327 | } |
328 | 328 | ||
329 | static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm) | 329 | static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm) |
330 | { | 330 | { |
331 | struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); | 331 | struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); |
332 | 332 | ||
333 | crypto_free_ablkcipher(ctx->child); | 333 | crypto_free_ablkcipher(ctx->child); |
334 | } | 334 | } |
335 | 335 | ||
336 | static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) | 336 | static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) |
337 | { | 337 | { |
338 | struct crypto_attr_type *algt; | 338 | struct crypto_attr_type *algt; |
339 | struct crypto_instance *inst; | 339 | struct crypto_instance *inst; |
340 | struct crypto_alg *alg; | 340 | struct crypto_alg *alg; |
341 | struct crypto_skcipher_spawn *spawn; | 341 | struct crypto_skcipher_spawn *spawn; |
342 | const char *cipher_name; | 342 | const char *cipher_name; |
343 | int err; | 343 | int err; |
344 | 344 | ||
345 | algt = crypto_get_attr_type(tb); | 345 | algt = crypto_get_attr_type(tb); |
346 | if (IS_ERR(algt)) | 346 | if (IS_ERR(algt)) |
347 | return ERR_CAST(algt); | 347 | return ERR_CAST(algt); |
348 | 348 | ||
349 | if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask) | 349 | if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask) |
350 | return ERR_PTR(-EINVAL); | 350 | return ERR_PTR(-EINVAL); |
351 | 351 | ||
352 | cipher_name = crypto_attr_alg_name(tb[1]); | 352 | cipher_name = crypto_attr_alg_name(tb[1]); |
353 | if (IS_ERR(cipher_name)) | 353 | if (IS_ERR(cipher_name)) |
354 | return ERR_CAST(cipher_name); | 354 | return ERR_CAST(cipher_name); |
355 | 355 | ||
356 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | 356 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
357 | if (!inst) | 357 | if (!inst) |
358 | return ERR_PTR(-ENOMEM); | 358 | return ERR_PTR(-ENOMEM); |
359 | 359 | ||
360 | spawn = crypto_instance_ctx(inst); | 360 | spawn = crypto_instance_ctx(inst); |
361 | 361 | ||
362 | crypto_set_skcipher_spawn(spawn, inst); | 362 | crypto_set_skcipher_spawn(spawn, inst); |
363 | err = crypto_grab_skcipher(spawn, cipher_name, 0, | 363 | err = crypto_grab_skcipher(spawn, cipher_name, 0, |
364 | crypto_requires_sync(algt->type, | 364 | crypto_requires_sync(algt->type, |
365 | algt->mask)); | 365 | algt->mask)); |
366 | if (err) | 366 | if (err) |
367 | goto err_free_inst; | 367 | goto err_free_inst; |
368 | 368 | ||
369 | alg = crypto_skcipher_spawn_alg(spawn); | 369 | alg = crypto_skcipher_spawn_alg(spawn); |
370 | 370 | ||
371 | /* We only support 16-byte blocks. */ | 371 | /* We only support 16-byte blocks. */ |
372 | err = -EINVAL; | 372 | err = -EINVAL; |
373 | if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) | 373 | if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) |
374 | goto err_drop_spawn; | 374 | goto err_drop_spawn; |
375 | 375 | ||
376 | /* Not a stream cipher? */ | 376 | /* Not a stream cipher? */ |
377 | if (alg->cra_blocksize != 1) | 377 | if (alg->cra_blocksize != 1) |
378 | goto err_drop_spawn; | 378 | goto err_drop_spawn; |
379 | 379 | ||
380 | err = -ENAMETOOLONG; | 380 | err = -ENAMETOOLONG; |
381 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", | 381 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", |
382 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) | 382 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) |
383 | goto err_drop_spawn; | 383 | goto err_drop_spawn; |
384 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 384 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
385 | "rfc3686(%s)", alg->cra_driver_name) >= | 385 | "rfc3686(%s)", alg->cra_driver_name) >= |
386 | CRYPTO_MAX_ALG_NAME) | 386 | CRYPTO_MAX_ALG_NAME) |
387 | goto err_drop_spawn; | 387 | goto err_drop_spawn; |
388 | 388 | ||
389 | inst->alg.cra_priority = alg->cra_priority; | 389 | inst->alg.cra_priority = alg->cra_priority; |
390 | inst->alg.cra_blocksize = 1; | 390 | inst->alg.cra_blocksize = 1; |
391 | inst->alg.cra_alignmask = alg->cra_alignmask; | 391 | inst->alg.cra_alignmask = alg->cra_alignmask; |
392 | 392 | ||
393 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 393 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
394 | (alg->cra_flags & CRYPTO_ALG_ASYNC); | 394 | (alg->cra_flags & CRYPTO_ALG_ASYNC); |
395 | inst->alg.cra_type = &crypto_ablkcipher_type; | 395 | inst->alg.cra_type = &crypto_ablkcipher_type; |
396 | 396 | ||
397 | inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE; | 397 | inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE; |
398 | inst->alg.cra_ablkcipher.min_keysize = | 398 | inst->alg.cra_ablkcipher.min_keysize = |
399 | alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE; | 399 | alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE; |
400 | inst->alg.cra_ablkcipher.max_keysize = | 400 | inst->alg.cra_ablkcipher.max_keysize = |
401 | alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; | 401 | alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; |
402 | 402 | ||
403 | inst->alg.cra_ablkcipher.geniv = "seqiv"; | 403 | inst->alg.cra_ablkcipher.geniv = "seqiv"; |
404 | 404 | ||
405 | inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey; | 405 | inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey; |
406 | inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt; | 406 | inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt; |
407 | inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt; | 407 | inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt; |
408 | 408 | ||
409 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); | 409 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); |
410 | 410 | ||
411 | inst->alg.cra_init = crypto_rfc3686_init_tfm; | 411 | inst->alg.cra_init = crypto_rfc3686_init_tfm; |
412 | inst->alg.cra_exit = crypto_rfc3686_exit_tfm; | 412 | inst->alg.cra_exit = crypto_rfc3686_exit_tfm; |
413 | 413 | ||
414 | return inst; | 414 | return inst; |
415 | 415 | ||
416 | err_drop_spawn: | 416 | err_drop_spawn: |
417 | crypto_drop_skcipher(spawn); | 417 | crypto_drop_skcipher(spawn); |
418 | err_free_inst: | 418 | err_free_inst: |
419 | kfree(inst); | 419 | kfree(inst); |
420 | return ERR_PTR(err); | 420 | return ERR_PTR(err); |
421 | } | 421 | } |
422 | 422 | ||
423 | static void crypto_rfc3686_free(struct crypto_instance *inst) | 423 | static void crypto_rfc3686_free(struct crypto_instance *inst) |
424 | { | 424 | { |
425 | struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); | 425 | struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); |
426 | 426 | ||
427 | crypto_drop_skcipher(spawn); | 427 | crypto_drop_skcipher(spawn); |
428 | kfree(inst); | 428 | kfree(inst); |
429 | } | 429 | } |
430 | 430 | ||
431 | static struct crypto_template crypto_rfc3686_tmpl = { | 431 | static struct crypto_template crypto_rfc3686_tmpl = { |
432 | .name = "rfc3686", | 432 | .name = "rfc3686", |
433 | .alloc = crypto_rfc3686_alloc, | 433 | .alloc = crypto_rfc3686_alloc, |
434 | .free = crypto_rfc3686_free, | 434 | .free = crypto_rfc3686_free, |
435 | .module = THIS_MODULE, | 435 | .module = THIS_MODULE, |
436 | }; | 436 | }; |
437 | 437 | ||
438 | static int __init crypto_ctr_module_init(void) | 438 | static int __init crypto_ctr_module_init(void) |
439 | { | 439 | { |
440 | int err; | 440 | int err; |
441 | 441 | ||
442 | err = crypto_register_template(&crypto_ctr_tmpl); | 442 | err = crypto_register_template(&crypto_ctr_tmpl); |
443 | if (err) | 443 | if (err) |
444 | goto out; | 444 | goto out; |
445 | 445 | ||
446 | err = crypto_register_template(&crypto_rfc3686_tmpl); | 446 | err = crypto_register_template(&crypto_rfc3686_tmpl); |
447 | if (err) | 447 | if (err) |
448 | goto out_drop_ctr; | 448 | goto out_drop_ctr; |
449 | 449 | ||
450 | out: | 450 | out: |
451 | return err; | 451 | return err; |
452 | 452 | ||
453 | out_drop_ctr: | 453 | out_drop_ctr: |
454 | crypto_unregister_template(&crypto_ctr_tmpl); | 454 | crypto_unregister_template(&crypto_ctr_tmpl); |
455 | goto out; | 455 | goto out; |
456 | } | 456 | } |
457 | 457 | ||
458 | static void __exit crypto_ctr_module_exit(void) | 458 | static void __exit crypto_ctr_module_exit(void) |
459 | { | 459 | { |
460 | crypto_unregister_template(&crypto_rfc3686_tmpl); | 460 | crypto_unregister_template(&crypto_rfc3686_tmpl); |
461 | crypto_unregister_template(&crypto_ctr_tmpl); | 461 | crypto_unregister_template(&crypto_ctr_tmpl); |
462 | } | 462 | } |
463 | 463 | ||
464 | module_init(crypto_ctr_module_init); | 464 | module_init(crypto_ctr_module_init); |
465 | module_exit(crypto_ctr_module_exit); | 465 | module_exit(crypto_ctr_module_exit); |
466 | 466 | ||
467 | MODULE_LICENSE("GPL"); | 467 | MODULE_LICENSE("GPL"); |
468 | MODULE_DESCRIPTION("CTR Counter block mode"); | 468 | MODULE_DESCRIPTION("CTR Counter block mode"); |
469 | MODULE_ALIAS_CRYPTO("rfc3686"); | 469 | MODULE_ALIAS_CRYPTO("rfc3686"); |
470 | MODULE_ALIAS_CRYPTO("ctr"); | ||
470 | 471 |
crypto/cts.c
1 | /* | 1 | /* |
2 | * CTS: Cipher Text Stealing mode | 2 | * CTS: Cipher Text Stealing mode |
3 | * | 3 | * |
4 | * COPYRIGHT (c) 2008 | 4 | * COPYRIGHT (c) 2008 |
5 | * The Regents of the University of Michigan | 5 | * The Regents of the University of Michigan |
6 | * ALL RIGHTS RESERVED | 6 | * ALL RIGHTS RESERVED |
7 | * | 7 | * |
8 | * Permission is granted to use, copy, create derivative works | 8 | * Permission is granted to use, copy, create derivative works |
9 | * and redistribute this software and such derivative works | 9 | * and redistribute this software and such derivative works |
10 | * for any purpose, so long as the name of The University of | 10 | * for any purpose, so long as the name of The University of |
11 | * Michigan is not used in any advertising or publicity | 11 | * Michigan is not used in any advertising or publicity |
12 | * pertaining to the use of distribution of this software | 12 | * pertaining to the use of distribution of this software |
13 | * without specific, written prior authorization. If the | 13 | * without specific, written prior authorization. If the |
14 | * above copyright notice or any other identification of the | 14 | * above copyright notice or any other identification of the |
15 | * University of Michigan is included in any copy of any | 15 | * University of Michigan is included in any copy of any |
16 | * portion of this software, then the disclaimer below must | 16 | * portion of this software, then the disclaimer below must |
17 | * also be included. | 17 | * also be included. |
18 | * | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION | 19 | * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION |
20 | * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY | 20 | * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY |
21 | * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF | 21 | * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF |
22 | * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING | 22 | * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING |
23 | * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF | 23 | * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF |
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE | 24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE |
25 | * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE | 25 | * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE |
26 | * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR | 26 | * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR |
27 | * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING | 27 | * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING |
28 | * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN | 28 | * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN |
29 | * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF | 29 | * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF |
30 | * SUCH DAMAGES. | 30 | * SUCH DAMAGES. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | /* Derived from various: | 33 | /* Derived from various: |
34 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 34 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
35 | */ | 35 | */ |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * This is the Cipher Text Stealing mode as described by | 38 | * This is the Cipher Text Stealing mode as described by |
39 | * Section 8 of rfc2040 and referenced by rfc3962. | 39 | * Section 8 of rfc2040 and referenced by rfc3962. |
40 | * rfc3962 includes errata information in its Appendix A. | 40 | * rfc3962 includes errata information in its Appendix A. |
41 | */ | 41 | */ |
42 | 42 | ||
43 | #include <crypto/algapi.h> | 43 | #include <crypto/algapi.h> |
44 | #include <linux/err.h> | 44 | #include <linux/err.h> |
45 | #include <linux/init.h> | 45 | #include <linux/init.h> |
46 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
47 | #include <linux/log2.h> | 47 | #include <linux/log2.h> |
48 | #include <linux/module.h> | 48 | #include <linux/module.h> |
49 | #include <linux/scatterlist.h> | 49 | #include <linux/scatterlist.h> |
50 | #include <crypto/scatterwalk.h> | 50 | #include <crypto/scatterwalk.h> |
51 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
52 | 52 | ||
53 | struct crypto_cts_ctx { | 53 | struct crypto_cts_ctx { |
54 | struct crypto_blkcipher *child; | 54 | struct crypto_blkcipher *child; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static int crypto_cts_setkey(struct crypto_tfm *parent, const u8 *key, | 57 | static int crypto_cts_setkey(struct crypto_tfm *parent, const u8 *key, |
58 | unsigned int keylen) | 58 | unsigned int keylen) |
59 | { | 59 | { |
60 | struct crypto_cts_ctx *ctx = crypto_tfm_ctx(parent); | 60 | struct crypto_cts_ctx *ctx = crypto_tfm_ctx(parent); |
61 | struct crypto_blkcipher *child = ctx->child; | 61 | struct crypto_blkcipher *child = ctx->child; |
62 | int err; | 62 | int err; |
63 | 63 | ||
64 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 64 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
65 | crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & | 65 | crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & |
66 | CRYPTO_TFM_REQ_MASK); | 66 | CRYPTO_TFM_REQ_MASK); |
67 | err = crypto_blkcipher_setkey(child, key, keylen); | 67 | err = crypto_blkcipher_setkey(child, key, keylen); |
68 | crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & | 68 | crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & |
69 | CRYPTO_TFM_RES_MASK); | 69 | CRYPTO_TFM_RES_MASK); |
70 | return err; | 70 | return err; |
71 | } | 71 | } |
72 | 72 | ||
73 | static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, | 73 | static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, |
74 | struct blkcipher_desc *desc, | 74 | struct blkcipher_desc *desc, |
75 | struct scatterlist *dst, | 75 | struct scatterlist *dst, |
76 | struct scatterlist *src, | 76 | struct scatterlist *src, |
77 | unsigned int offset, | 77 | unsigned int offset, |
78 | unsigned int nbytes) | 78 | unsigned int nbytes) |
79 | { | 79 | { |
80 | int bsize = crypto_blkcipher_blocksize(desc->tfm); | 80 | int bsize = crypto_blkcipher_blocksize(desc->tfm); |
81 | u8 tmp[bsize], tmp2[bsize]; | 81 | u8 tmp[bsize], tmp2[bsize]; |
82 | struct blkcipher_desc lcldesc; | 82 | struct blkcipher_desc lcldesc; |
83 | struct scatterlist sgsrc[1], sgdst[1]; | 83 | struct scatterlist sgsrc[1], sgdst[1]; |
84 | int lastn = nbytes - bsize; | 84 | int lastn = nbytes - bsize; |
85 | u8 iv[bsize]; | 85 | u8 iv[bsize]; |
86 | u8 s[bsize * 2], d[bsize * 2]; | 86 | u8 s[bsize * 2], d[bsize * 2]; |
87 | int err; | 87 | int err; |
88 | 88 | ||
89 | if (lastn < 0) | 89 | if (lastn < 0) |
90 | return -EINVAL; | 90 | return -EINVAL; |
91 | 91 | ||
92 | sg_init_table(sgsrc, 1); | 92 | sg_init_table(sgsrc, 1); |
93 | sg_init_table(sgdst, 1); | 93 | sg_init_table(sgdst, 1); |
94 | 94 | ||
95 | memset(s, 0, sizeof(s)); | 95 | memset(s, 0, sizeof(s)); |
96 | scatterwalk_map_and_copy(s, src, offset, nbytes, 0); | 96 | scatterwalk_map_and_copy(s, src, offset, nbytes, 0); |
97 | 97 | ||
98 | memcpy(iv, desc->info, bsize); | 98 | memcpy(iv, desc->info, bsize); |
99 | 99 | ||
100 | lcldesc.tfm = ctx->child; | 100 | lcldesc.tfm = ctx->child; |
101 | lcldesc.info = iv; | 101 | lcldesc.info = iv; |
102 | lcldesc.flags = desc->flags; | 102 | lcldesc.flags = desc->flags; |
103 | 103 | ||
104 | sg_set_buf(&sgsrc[0], s, bsize); | 104 | sg_set_buf(&sgsrc[0], s, bsize); |
105 | sg_set_buf(&sgdst[0], tmp, bsize); | 105 | sg_set_buf(&sgdst[0], tmp, bsize); |
106 | err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); | 106 | err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); |
107 | 107 | ||
108 | memcpy(d + bsize, tmp, lastn); | 108 | memcpy(d + bsize, tmp, lastn); |
109 | 109 | ||
110 | lcldesc.info = tmp; | 110 | lcldesc.info = tmp; |
111 | 111 | ||
112 | sg_set_buf(&sgsrc[0], s + bsize, bsize); | 112 | sg_set_buf(&sgsrc[0], s + bsize, bsize); |
113 | sg_set_buf(&sgdst[0], tmp2, bsize); | 113 | sg_set_buf(&sgdst[0], tmp2, bsize); |
114 | err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); | 114 | err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); |
115 | 115 | ||
116 | memcpy(d, tmp2, bsize); | 116 | memcpy(d, tmp2, bsize); |
117 | 117 | ||
118 | scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); | 118 | scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); |
119 | 119 | ||
120 | memcpy(desc->info, tmp2, bsize); | 120 | memcpy(desc->info, tmp2, bsize); |
121 | 121 | ||
122 | return err; | 122 | return err; |
123 | } | 123 | } |
124 | 124 | ||
125 | static int crypto_cts_encrypt(struct blkcipher_desc *desc, | 125 | static int crypto_cts_encrypt(struct blkcipher_desc *desc, |
126 | struct scatterlist *dst, struct scatterlist *src, | 126 | struct scatterlist *dst, struct scatterlist *src, |
127 | unsigned int nbytes) | 127 | unsigned int nbytes) |
128 | { | 128 | { |
129 | struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 129 | struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); |
130 | int bsize = crypto_blkcipher_blocksize(desc->tfm); | 130 | int bsize = crypto_blkcipher_blocksize(desc->tfm); |
131 | int tot_blocks = (nbytes + bsize - 1) / bsize; | 131 | int tot_blocks = (nbytes + bsize - 1) / bsize; |
132 | int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; | 132 | int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; |
133 | struct blkcipher_desc lcldesc; | 133 | struct blkcipher_desc lcldesc; |
134 | int err; | 134 | int err; |
135 | 135 | ||
136 | lcldesc.tfm = ctx->child; | 136 | lcldesc.tfm = ctx->child; |
137 | lcldesc.info = desc->info; | 137 | lcldesc.info = desc->info; |
138 | lcldesc.flags = desc->flags; | 138 | lcldesc.flags = desc->flags; |
139 | 139 | ||
140 | if (tot_blocks == 1) { | 140 | if (tot_blocks == 1) { |
141 | err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize); | 141 | err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize); |
142 | } else if (nbytes <= bsize * 2) { | 142 | } else if (nbytes <= bsize * 2) { |
143 | err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes); | 143 | err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes); |
144 | } else { | 144 | } else { |
145 | /* do normal function for tot_blocks - 2 */ | 145 | /* do normal function for tot_blocks - 2 */ |
146 | err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, | 146 | err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, |
147 | cbc_blocks * bsize); | 147 | cbc_blocks * bsize); |
148 | if (err == 0) { | 148 | if (err == 0) { |
149 | /* do cts for final two blocks */ | 149 | /* do cts for final two blocks */ |
150 | err = cts_cbc_encrypt(ctx, desc, dst, src, | 150 | err = cts_cbc_encrypt(ctx, desc, dst, src, |
151 | cbc_blocks * bsize, | 151 | cbc_blocks * bsize, |
152 | nbytes - (cbc_blocks * bsize)); | 152 | nbytes - (cbc_blocks * bsize)); |
153 | } | 153 | } |
154 | } | 154 | } |
155 | 155 | ||
156 | return err; | 156 | return err; |
157 | } | 157 | } |
158 | 158 | ||
159 | static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, | 159 | static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, |
160 | struct blkcipher_desc *desc, | 160 | struct blkcipher_desc *desc, |
161 | struct scatterlist *dst, | 161 | struct scatterlist *dst, |
162 | struct scatterlist *src, | 162 | struct scatterlist *src, |
163 | unsigned int offset, | 163 | unsigned int offset, |
164 | unsigned int nbytes) | 164 | unsigned int nbytes) |
165 | { | 165 | { |
166 | int bsize = crypto_blkcipher_blocksize(desc->tfm); | 166 | int bsize = crypto_blkcipher_blocksize(desc->tfm); |
167 | u8 tmp[bsize]; | 167 | u8 tmp[bsize]; |
168 | struct blkcipher_desc lcldesc; | 168 | struct blkcipher_desc lcldesc; |
169 | struct scatterlist sgsrc[1], sgdst[1]; | 169 | struct scatterlist sgsrc[1], sgdst[1]; |
170 | int lastn = nbytes - bsize; | 170 | int lastn = nbytes - bsize; |
171 | u8 iv[bsize]; | 171 | u8 iv[bsize]; |
172 | u8 s[bsize * 2], d[bsize * 2]; | 172 | u8 s[bsize * 2], d[bsize * 2]; |
173 | int err; | 173 | int err; |
174 | 174 | ||
175 | if (lastn < 0) | 175 | if (lastn < 0) |
176 | return -EINVAL; | 176 | return -EINVAL; |
177 | 177 | ||
178 | sg_init_table(sgsrc, 1); | 178 | sg_init_table(sgsrc, 1); |
179 | sg_init_table(sgdst, 1); | 179 | sg_init_table(sgdst, 1); |
180 | 180 | ||
181 | scatterwalk_map_and_copy(s, src, offset, nbytes, 0); | 181 | scatterwalk_map_and_copy(s, src, offset, nbytes, 0); |
182 | 182 | ||
183 | lcldesc.tfm = ctx->child; | 183 | lcldesc.tfm = ctx->child; |
184 | lcldesc.info = iv; | 184 | lcldesc.info = iv; |
185 | lcldesc.flags = desc->flags; | 185 | lcldesc.flags = desc->flags; |
186 | 186 | ||
187 | /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/ | 187 | /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/ |
188 | memset(iv, 0, sizeof(iv)); | 188 | memset(iv, 0, sizeof(iv)); |
189 | sg_set_buf(&sgsrc[0], s, bsize); | 189 | sg_set_buf(&sgsrc[0], s, bsize); |
190 | sg_set_buf(&sgdst[0], tmp, bsize); | 190 | sg_set_buf(&sgdst[0], tmp, bsize); |
191 | err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); | 191 | err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); |
192 | if (err) | 192 | if (err) |
193 | return err; | 193 | return err; |
194 | /* 2. Pad Cn with zeros at the end to create C of length BB */ | 194 | /* 2. Pad Cn with zeros at the end to create C of length BB */ |
195 | memset(iv, 0, sizeof(iv)); | 195 | memset(iv, 0, sizeof(iv)); |
196 | memcpy(iv, s + bsize, lastn); | 196 | memcpy(iv, s + bsize, lastn); |
197 | /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */ | 197 | /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */ |
198 | crypto_xor(tmp, iv, bsize); | 198 | crypto_xor(tmp, iv, bsize); |
199 | /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */ | 199 | /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */ |
200 | memcpy(d + bsize, tmp, lastn); | 200 | memcpy(d + bsize, tmp, lastn); |
201 | 201 | ||
202 | /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ | 202 | /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ |
203 | memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); | 203 | memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); |
204 | /* 6. Decrypt En to create Pn-1 */ | 204 | /* 6. Decrypt En to create Pn-1 */ |
205 | memzero_explicit(iv, sizeof(iv)); | 205 | memzero_explicit(iv, sizeof(iv)); |
206 | 206 | ||
207 | sg_set_buf(&sgsrc[0], s + bsize, bsize); | 207 | sg_set_buf(&sgsrc[0], s + bsize, bsize); |
208 | sg_set_buf(&sgdst[0], d, bsize); | 208 | sg_set_buf(&sgdst[0], d, bsize); |
209 | err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); | 209 | err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); |
210 | 210 | ||
211 | /* XOR with previous block */ | 211 | /* XOR with previous block */ |
212 | crypto_xor(d, desc->info, bsize); | 212 | crypto_xor(d, desc->info, bsize); |
213 | 213 | ||
214 | scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); | 214 | scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); |
215 | 215 | ||
216 | memcpy(desc->info, s, bsize); | 216 | memcpy(desc->info, s, bsize); |
217 | return err; | 217 | return err; |
218 | } | 218 | } |
219 | 219 | ||
220 | static int crypto_cts_decrypt(struct blkcipher_desc *desc, | 220 | static int crypto_cts_decrypt(struct blkcipher_desc *desc, |
221 | struct scatterlist *dst, struct scatterlist *src, | 221 | struct scatterlist *dst, struct scatterlist *src, |
222 | unsigned int nbytes) | 222 | unsigned int nbytes) |
223 | { | 223 | { |
224 | struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 224 | struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); |
225 | int bsize = crypto_blkcipher_blocksize(desc->tfm); | 225 | int bsize = crypto_blkcipher_blocksize(desc->tfm); |
226 | int tot_blocks = (nbytes + bsize - 1) / bsize; | 226 | int tot_blocks = (nbytes + bsize - 1) / bsize; |
227 | int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; | 227 | int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; |
228 | struct blkcipher_desc lcldesc; | 228 | struct blkcipher_desc lcldesc; |
229 | int err; | 229 | int err; |
230 | 230 | ||
231 | lcldesc.tfm = ctx->child; | 231 | lcldesc.tfm = ctx->child; |
232 | lcldesc.info = desc->info; | 232 | lcldesc.info = desc->info; |
233 | lcldesc.flags = desc->flags; | 233 | lcldesc.flags = desc->flags; |
234 | 234 | ||
235 | if (tot_blocks == 1) { | 235 | if (tot_blocks == 1) { |
236 | err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize); | 236 | err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize); |
237 | } else if (nbytes <= bsize * 2) { | 237 | } else if (nbytes <= bsize * 2) { |
238 | err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes); | 238 | err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes); |
239 | } else { | 239 | } else { |
240 | /* do normal function for tot_blocks - 2 */ | 240 | /* do normal function for tot_blocks - 2 */ |
241 | err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, | 241 | err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, |
242 | cbc_blocks * bsize); | 242 | cbc_blocks * bsize); |
243 | if (err == 0) { | 243 | if (err == 0) { |
244 | /* do cts for final two blocks */ | 244 | /* do cts for final two blocks */ |
245 | err = cts_cbc_decrypt(ctx, desc, dst, src, | 245 | err = cts_cbc_decrypt(ctx, desc, dst, src, |
246 | cbc_blocks * bsize, | 246 | cbc_blocks * bsize, |
247 | nbytes - (cbc_blocks * bsize)); | 247 | nbytes - (cbc_blocks * bsize)); |
248 | } | 248 | } |
249 | } | 249 | } |
250 | return err; | 250 | return err; |
251 | } | 251 | } |
252 | 252 | ||
253 | static int crypto_cts_init_tfm(struct crypto_tfm *tfm) | 253 | static int crypto_cts_init_tfm(struct crypto_tfm *tfm) |
254 | { | 254 | { |
255 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 255 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
256 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 256 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
257 | struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); | 257 | struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); |
258 | struct crypto_blkcipher *cipher; | 258 | struct crypto_blkcipher *cipher; |
259 | 259 | ||
260 | cipher = crypto_spawn_blkcipher(spawn); | 260 | cipher = crypto_spawn_blkcipher(spawn); |
261 | if (IS_ERR(cipher)) | 261 | if (IS_ERR(cipher)) |
262 | return PTR_ERR(cipher); | 262 | return PTR_ERR(cipher); |
263 | 263 | ||
264 | ctx->child = cipher; | 264 | ctx->child = cipher; |
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | 267 | ||
268 | static void crypto_cts_exit_tfm(struct crypto_tfm *tfm) | 268 | static void crypto_cts_exit_tfm(struct crypto_tfm *tfm) |
269 | { | 269 | { |
270 | struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); | 270 | struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); |
271 | crypto_free_blkcipher(ctx->child); | 271 | crypto_free_blkcipher(ctx->child); |
272 | } | 272 | } |
273 | 273 | ||
274 | static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) | 274 | static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) |
275 | { | 275 | { |
276 | struct crypto_instance *inst; | 276 | struct crypto_instance *inst; |
277 | struct crypto_alg *alg; | 277 | struct crypto_alg *alg; |
278 | int err; | 278 | int err; |
279 | 279 | ||
280 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 280 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
281 | if (err) | 281 | if (err) |
282 | return ERR_PTR(err); | 282 | return ERR_PTR(err); |
283 | 283 | ||
284 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, | 284 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, |
285 | CRYPTO_ALG_TYPE_MASK); | 285 | CRYPTO_ALG_TYPE_MASK); |
286 | if (IS_ERR(alg)) | 286 | if (IS_ERR(alg)) |
287 | return ERR_CAST(alg); | 287 | return ERR_CAST(alg); |
288 | 288 | ||
289 | inst = ERR_PTR(-EINVAL); | 289 | inst = ERR_PTR(-EINVAL); |
290 | if (!is_power_of_2(alg->cra_blocksize)) | 290 | if (!is_power_of_2(alg->cra_blocksize)) |
291 | goto out_put_alg; | 291 | goto out_put_alg; |
292 | 292 | ||
293 | inst = crypto_alloc_instance("cts", alg); | 293 | inst = crypto_alloc_instance("cts", alg); |
294 | if (IS_ERR(inst)) | 294 | if (IS_ERR(inst)) |
295 | goto out_put_alg; | 295 | goto out_put_alg; |
296 | 296 | ||
297 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 297 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; |
298 | inst->alg.cra_priority = alg->cra_priority; | 298 | inst->alg.cra_priority = alg->cra_priority; |
299 | inst->alg.cra_blocksize = alg->cra_blocksize; | 299 | inst->alg.cra_blocksize = alg->cra_blocksize; |
300 | inst->alg.cra_alignmask = alg->cra_alignmask; | 300 | inst->alg.cra_alignmask = alg->cra_alignmask; |
301 | inst->alg.cra_type = &crypto_blkcipher_type; | 301 | inst->alg.cra_type = &crypto_blkcipher_type; |
302 | 302 | ||
303 | /* We access the data as u32s when xoring. */ | 303 | /* We access the data as u32s when xoring. */ |
304 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 304 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; |
305 | 305 | ||
306 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 306 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; |
307 | inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | 307 | inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
308 | inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | 308 | inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
309 | 309 | ||
310 | inst->alg.cra_blkcipher.geniv = "seqiv"; | 310 | inst->alg.cra_blkcipher.geniv = "seqiv"; |
311 | 311 | ||
312 | inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); | 312 | inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); |
313 | 313 | ||
314 | inst->alg.cra_init = crypto_cts_init_tfm; | 314 | inst->alg.cra_init = crypto_cts_init_tfm; |
315 | inst->alg.cra_exit = crypto_cts_exit_tfm; | 315 | inst->alg.cra_exit = crypto_cts_exit_tfm; |
316 | 316 | ||
317 | inst->alg.cra_blkcipher.setkey = crypto_cts_setkey; | 317 | inst->alg.cra_blkcipher.setkey = crypto_cts_setkey; |
318 | inst->alg.cra_blkcipher.encrypt = crypto_cts_encrypt; | 318 | inst->alg.cra_blkcipher.encrypt = crypto_cts_encrypt; |
319 | inst->alg.cra_blkcipher.decrypt = crypto_cts_decrypt; | 319 | inst->alg.cra_blkcipher.decrypt = crypto_cts_decrypt; |
320 | 320 | ||
321 | out_put_alg: | 321 | out_put_alg: |
322 | crypto_mod_put(alg); | 322 | crypto_mod_put(alg); |
323 | return inst; | 323 | return inst; |
324 | } | 324 | } |
325 | 325 | ||
326 | static void crypto_cts_free(struct crypto_instance *inst) | 326 | static void crypto_cts_free(struct crypto_instance *inst) |
327 | { | 327 | { |
328 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 328 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
329 | kfree(inst); | 329 | kfree(inst); |
330 | } | 330 | } |
331 | 331 | ||
332 | static struct crypto_template crypto_cts_tmpl = { | 332 | static struct crypto_template crypto_cts_tmpl = { |
333 | .name = "cts", | 333 | .name = "cts", |
334 | .alloc = crypto_cts_alloc, | 334 | .alloc = crypto_cts_alloc, |
335 | .free = crypto_cts_free, | 335 | .free = crypto_cts_free, |
336 | .module = THIS_MODULE, | 336 | .module = THIS_MODULE, |
337 | }; | 337 | }; |
338 | 338 | ||
339 | static int __init crypto_cts_module_init(void) | 339 | static int __init crypto_cts_module_init(void) |
340 | { | 340 | { |
341 | return crypto_register_template(&crypto_cts_tmpl); | 341 | return crypto_register_template(&crypto_cts_tmpl); |
342 | } | 342 | } |
343 | 343 | ||
344 | static void __exit crypto_cts_module_exit(void) | 344 | static void __exit crypto_cts_module_exit(void) |
345 | { | 345 | { |
346 | crypto_unregister_template(&crypto_cts_tmpl); | 346 | crypto_unregister_template(&crypto_cts_tmpl); |
347 | } | 347 | } |
348 | 348 | ||
349 | module_init(crypto_cts_module_init); | 349 | module_init(crypto_cts_module_init); |
350 | module_exit(crypto_cts_module_exit); | 350 | module_exit(crypto_cts_module_exit); |
351 | 351 | ||
352 | MODULE_LICENSE("Dual BSD/GPL"); | 352 | MODULE_LICENSE("Dual BSD/GPL"); |
353 | MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC"); | 353 | MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC"); |
354 | MODULE_ALIAS_CRYPTO("cts"); | ||
354 | 355 |
crypto/ecb.c
1 | /* | 1 | /* |
2 | * ECB: Electronic CodeBook mode | 2 | * ECB: Electronic CodeBook mode |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/algapi.h> | 13 | #include <crypto/algapi.h> |
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/scatterlist.h> | 18 | #include <linux/scatterlist.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | 20 | ||
21 | struct crypto_ecb_ctx { | 21 | struct crypto_ecb_ctx { |
22 | struct crypto_cipher *child; | 22 | struct crypto_cipher *child; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key, | 25 | static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key, |
26 | unsigned int keylen) | 26 | unsigned int keylen) |
27 | { | 27 | { |
28 | struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent); | 28 | struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent); |
29 | struct crypto_cipher *child = ctx->child; | 29 | struct crypto_cipher *child = ctx->child; |
30 | int err; | 30 | int err; |
31 | 31 | ||
32 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 32 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
33 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 33 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & |
34 | CRYPTO_TFM_REQ_MASK); | 34 | CRYPTO_TFM_REQ_MASK); |
35 | err = crypto_cipher_setkey(child, key, keylen); | 35 | err = crypto_cipher_setkey(child, key, keylen); |
36 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 36 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & |
37 | CRYPTO_TFM_RES_MASK); | 37 | CRYPTO_TFM_RES_MASK); |
38 | return err; | 38 | return err; |
39 | } | 39 | } |
40 | 40 | ||
41 | static int crypto_ecb_crypt(struct blkcipher_desc *desc, | 41 | static int crypto_ecb_crypt(struct blkcipher_desc *desc, |
42 | struct blkcipher_walk *walk, | 42 | struct blkcipher_walk *walk, |
43 | struct crypto_cipher *tfm, | 43 | struct crypto_cipher *tfm, |
44 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) | 44 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) |
45 | { | 45 | { |
46 | int bsize = crypto_cipher_blocksize(tfm); | 46 | int bsize = crypto_cipher_blocksize(tfm); |
47 | unsigned int nbytes; | 47 | unsigned int nbytes; |
48 | int err; | 48 | int err; |
49 | 49 | ||
50 | err = blkcipher_walk_virt(desc, walk); | 50 | err = blkcipher_walk_virt(desc, walk); |
51 | 51 | ||
52 | while ((nbytes = walk->nbytes)) { | 52 | while ((nbytes = walk->nbytes)) { |
53 | u8 *wsrc = walk->src.virt.addr; | 53 | u8 *wsrc = walk->src.virt.addr; |
54 | u8 *wdst = walk->dst.virt.addr; | 54 | u8 *wdst = walk->dst.virt.addr; |
55 | 55 | ||
56 | do { | 56 | do { |
57 | fn(crypto_cipher_tfm(tfm), wdst, wsrc); | 57 | fn(crypto_cipher_tfm(tfm), wdst, wsrc); |
58 | 58 | ||
59 | wsrc += bsize; | 59 | wsrc += bsize; |
60 | wdst += bsize; | 60 | wdst += bsize; |
61 | } while ((nbytes -= bsize) >= bsize); | 61 | } while ((nbytes -= bsize) >= bsize); |
62 | 62 | ||
63 | err = blkcipher_walk_done(desc, walk, nbytes); | 63 | err = blkcipher_walk_done(desc, walk, nbytes); |
64 | } | 64 | } |
65 | 65 | ||
66 | return err; | 66 | return err; |
67 | } | 67 | } |
68 | 68 | ||
69 | static int crypto_ecb_encrypt(struct blkcipher_desc *desc, | 69 | static int crypto_ecb_encrypt(struct blkcipher_desc *desc, |
70 | struct scatterlist *dst, struct scatterlist *src, | 70 | struct scatterlist *dst, struct scatterlist *src, |
71 | unsigned int nbytes) | 71 | unsigned int nbytes) |
72 | { | 72 | { |
73 | struct blkcipher_walk walk; | 73 | struct blkcipher_walk walk; |
74 | struct crypto_blkcipher *tfm = desc->tfm; | 74 | struct crypto_blkcipher *tfm = desc->tfm; |
75 | struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); | 75 | struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); |
76 | struct crypto_cipher *child = ctx->child; | 76 | struct crypto_cipher *child = ctx->child; |
77 | 77 | ||
78 | blkcipher_walk_init(&walk, dst, src, nbytes); | 78 | blkcipher_walk_init(&walk, dst, src, nbytes); |
79 | return crypto_ecb_crypt(desc, &walk, child, | 79 | return crypto_ecb_crypt(desc, &walk, child, |
80 | crypto_cipher_alg(child)->cia_encrypt); | 80 | crypto_cipher_alg(child)->cia_encrypt); |
81 | } | 81 | } |
82 | 82 | ||
83 | static int crypto_ecb_decrypt(struct blkcipher_desc *desc, | 83 | static int crypto_ecb_decrypt(struct blkcipher_desc *desc, |
84 | struct scatterlist *dst, struct scatterlist *src, | 84 | struct scatterlist *dst, struct scatterlist *src, |
85 | unsigned int nbytes) | 85 | unsigned int nbytes) |
86 | { | 86 | { |
87 | struct blkcipher_walk walk; | 87 | struct blkcipher_walk walk; |
88 | struct crypto_blkcipher *tfm = desc->tfm; | 88 | struct crypto_blkcipher *tfm = desc->tfm; |
89 | struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); | 89 | struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); |
90 | struct crypto_cipher *child = ctx->child; | 90 | struct crypto_cipher *child = ctx->child; |
91 | 91 | ||
92 | blkcipher_walk_init(&walk, dst, src, nbytes); | 92 | blkcipher_walk_init(&walk, dst, src, nbytes); |
93 | return crypto_ecb_crypt(desc, &walk, child, | 93 | return crypto_ecb_crypt(desc, &walk, child, |
94 | crypto_cipher_alg(child)->cia_decrypt); | 94 | crypto_cipher_alg(child)->cia_decrypt); |
95 | } | 95 | } |
96 | 96 | ||
97 | static int crypto_ecb_init_tfm(struct crypto_tfm *tfm) | 97 | static int crypto_ecb_init_tfm(struct crypto_tfm *tfm) |
98 | { | 98 | { |
99 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 99 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
100 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 100 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
101 | struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); | 101 | struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); |
102 | struct crypto_cipher *cipher; | 102 | struct crypto_cipher *cipher; |
103 | 103 | ||
104 | cipher = crypto_spawn_cipher(spawn); | 104 | cipher = crypto_spawn_cipher(spawn); |
105 | if (IS_ERR(cipher)) | 105 | if (IS_ERR(cipher)) |
106 | return PTR_ERR(cipher); | 106 | return PTR_ERR(cipher); |
107 | 107 | ||
108 | ctx->child = cipher; | 108 | ctx->child = cipher; |
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
111 | 111 | ||
112 | static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) | 112 | static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) |
113 | { | 113 | { |
114 | struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); | 114 | struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); |
115 | crypto_free_cipher(ctx->child); | 115 | crypto_free_cipher(ctx->child); |
116 | } | 116 | } |
117 | 117 | ||
118 | static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) | 118 | static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) |
119 | { | 119 | { |
120 | struct crypto_instance *inst; | 120 | struct crypto_instance *inst; |
121 | struct crypto_alg *alg; | 121 | struct crypto_alg *alg; |
122 | int err; | 122 | int err; |
123 | 123 | ||
124 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 124 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
125 | if (err) | 125 | if (err) |
126 | return ERR_PTR(err); | 126 | return ERR_PTR(err); |
127 | 127 | ||
128 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 128 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
129 | CRYPTO_ALG_TYPE_MASK); | 129 | CRYPTO_ALG_TYPE_MASK); |
130 | if (IS_ERR(alg)) | 130 | if (IS_ERR(alg)) |
131 | return ERR_CAST(alg); | 131 | return ERR_CAST(alg); |
132 | 132 | ||
133 | inst = crypto_alloc_instance("ecb", alg); | 133 | inst = crypto_alloc_instance("ecb", alg); |
134 | if (IS_ERR(inst)) | 134 | if (IS_ERR(inst)) |
135 | goto out_put_alg; | 135 | goto out_put_alg; |
136 | 136 | ||
137 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 137 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; |
138 | inst->alg.cra_priority = alg->cra_priority; | 138 | inst->alg.cra_priority = alg->cra_priority; |
139 | inst->alg.cra_blocksize = alg->cra_blocksize; | 139 | inst->alg.cra_blocksize = alg->cra_blocksize; |
140 | inst->alg.cra_alignmask = alg->cra_alignmask; | 140 | inst->alg.cra_alignmask = alg->cra_alignmask; |
141 | inst->alg.cra_type = &crypto_blkcipher_type; | 141 | inst->alg.cra_type = &crypto_blkcipher_type; |
142 | 142 | ||
143 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; | 143 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; |
144 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; | 144 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; |
145 | 145 | ||
146 | inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx); | 146 | inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx); |
147 | 147 | ||
148 | inst->alg.cra_init = crypto_ecb_init_tfm; | 148 | inst->alg.cra_init = crypto_ecb_init_tfm; |
149 | inst->alg.cra_exit = crypto_ecb_exit_tfm; | 149 | inst->alg.cra_exit = crypto_ecb_exit_tfm; |
150 | 150 | ||
151 | inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey; | 151 | inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey; |
152 | inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt; | 152 | inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt; |
153 | inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt; | 153 | inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt; |
154 | 154 | ||
155 | out_put_alg: | 155 | out_put_alg: |
156 | crypto_mod_put(alg); | 156 | crypto_mod_put(alg); |
157 | return inst; | 157 | return inst; |
158 | } | 158 | } |
159 | 159 | ||
160 | static void crypto_ecb_free(struct crypto_instance *inst) | 160 | static void crypto_ecb_free(struct crypto_instance *inst) |
161 | { | 161 | { |
162 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 162 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
163 | kfree(inst); | 163 | kfree(inst); |
164 | } | 164 | } |
165 | 165 | ||
166 | static struct crypto_template crypto_ecb_tmpl = { | 166 | static struct crypto_template crypto_ecb_tmpl = { |
167 | .name = "ecb", | 167 | .name = "ecb", |
168 | .alloc = crypto_ecb_alloc, | 168 | .alloc = crypto_ecb_alloc, |
169 | .free = crypto_ecb_free, | 169 | .free = crypto_ecb_free, |
170 | .module = THIS_MODULE, | 170 | .module = THIS_MODULE, |
171 | }; | 171 | }; |
172 | 172 | ||
173 | static int __init crypto_ecb_module_init(void) | 173 | static int __init crypto_ecb_module_init(void) |
174 | { | 174 | { |
175 | return crypto_register_template(&crypto_ecb_tmpl); | 175 | return crypto_register_template(&crypto_ecb_tmpl); |
176 | } | 176 | } |
177 | 177 | ||
178 | static void __exit crypto_ecb_module_exit(void) | 178 | static void __exit crypto_ecb_module_exit(void) |
179 | { | 179 | { |
180 | crypto_unregister_template(&crypto_ecb_tmpl); | 180 | crypto_unregister_template(&crypto_ecb_tmpl); |
181 | } | 181 | } |
182 | 182 | ||
183 | module_init(crypto_ecb_module_init); | 183 | module_init(crypto_ecb_module_init); |
184 | module_exit(crypto_ecb_module_exit); | 184 | module_exit(crypto_ecb_module_exit); |
185 | 185 | ||
186 | MODULE_LICENSE("GPL"); | 186 | MODULE_LICENSE("GPL"); |
187 | MODULE_DESCRIPTION("ECB block cipher algorithm"); | 187 | MODULE_DESCRIPTION("ECB block cipher algorithm"); |
188 | MODULE_ALIAS_CRYPTO("ecb"); | ||
188 | 189 |
crypto/eseqiv.c
1 | /* | 1 | /* |
2 | * eseqiv: Encrypted Sequence Number IV Generator | 2 | * eseqiv: Encrypted Sequence Number IV Generator |
3 | * | 3 | * |
4 | * This generator generates an IV based on a sequence number by xoring it | 4 | * This generator generates an IV based on a sequence number by xoring it |
5 | * with a salt and then encrypting it with the same key as used to encrypt | 5 | * with a salt and then encrypting it with the same key as used to encrypt |
6 | * the plain text. This algorithm requires that the block size be equal | 6 | * the plain text. This algorithm requires that the block size be equal |
7 | * to the IV size. It is mainly useful for CBC. | 7 | * to the IV size. It is mainly useful for CBC. |
8 | * | 8 | * |
9 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 9 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the Free | 12 | * under the terms of the GNU General Public License as published by the Free |
13 | * Software Foundation; either version 2 of the License, or (at your option) | 13 | * Software Foundation; either version 2 of the License, or (at your option) |
14 | * any later version. | 14 | * any later version. |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <crypto/internal/skcipher.h> | 18 | #include <crypto/internal/skcipher.h> |
19 | #include <crypto/rng.h> | 19 | #include <crypto/rng.h> |
20 | #include <crypto/scatterwalk.h> | 20 | #include <crypto/scatterwalk.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/string.h> | 28 | #include <linux/string.h> |
29 | 29 | ||
30 | struct eseqiv_request_ctx { | 30 | struct eseqiv_request_ctx { |
31 | struct scatterlist src[2]; | 31 | struct scatterlist src[2]; |
32 | struct scatterlist dst[2]; | 32 | struct scatterlist dst[2]; |
33 | char tail[]; | 33 | char tail[]; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | struct eseqiv_ctx { | 36 | struct eseqiv_ctx { |
37 | spinlock_t lock; | 37 | spinlock_t lock; |
38 | unsigned int reqoff; | 38 | unsigned int reqoff; |
39 | char salt[]; | 39 | char salt[]; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | static void eseqiv_complete2(struct skcipher_givcrypt_request *req) | 42 | static void eseqiv_complete2(struct skcipher_givcrypt_request *req) |
43 | { | 43 | { |
44 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 44 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
45 | struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); | 45 | struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); |
46 | 46 | ||
47 | memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, | 47 | memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, |
48 | crypto_ablkcipher_alignmask(geniv) + 1), | 48 | crypto_ablkcipher_alignmask(geniv) + 1), |
49 | crypto_ablkcipher_ivsize(geniv)); | 49 | crypto_ablkcipher_ivsize(geniv)); |
50 | } | 50 | } |
51 | 51 | ||
52 | static void eseqiv_complete(struct crypto_async_request *base, int err) | 52 | static void eseqiv_complete(struct crypto_async_request *base, int err) |
53 | { | 53 | { |
54 | struct skcipher_givcrypt_request *req = base->data; | 54 | struct skcipher_givcrypt_request *req = base->data; |
55 | 55 | ||
56 | if (err) | 56 | if (err) |
57 | goto out; | 57 | goto out; |
58 | 58 | ||
59 | eseqiv_complete2(req); | 59 | eseqiv_complete2(req); |
60 | 60 | ||
61 | out: | 61 | out: |
62 | skcipher_givcrypt_complete(req, err); | 62 | skcipher_givcrypt_complete(req, err); |
63 | } | 63 | } |
64 | 64 | ||
65 | static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) | 65 | static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) |
66 | { | 66 | { |
67 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 67 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
68 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 68 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
69 | struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); | 69 | struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); |
70 | struct ablkcipher_request *subreq; | 70 | struct ablkcipher_request *subreq; |
71 | crypto_completion_t compl; | 71 | crypto_completion_t compl; |
72 | void *data; | 72 | void *data; |
73 | struct scatterlist *osrc, *odst; | 73 | struct scatterlist *osrc, *odst; |
74 | struct scatterlist *dst; | 74 | struct scatterlist *dst; |
75 | struct page *srcp; | 75 | struct page *srcp; |
76 | struct page *dstp; | 76 | struct page *dstp; |
77 | u8 *giv; | 77 | u8 *giv; |
78 | u8 *vsrc; | 78 | u8 *vsrc; |
79 | u8 *vdst; | 79 | u8 *vdst; |
80 | __be64 seq; | 80 | __be64 seq; |
81 | unsigned int ivsize; | 81 | unsigned int ivsize; |
82 | unsigned int len; | 82 | unsigned int len; |
83 | int err; | 83 | int err; |
84 | 84 | ||
85 | subreq = (void *)(reqctx->tail + ctx->reqoff); | 85 | subreq = (void *)(reqctx->tail + ctx->reqoff); |
86 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); | 86 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); |
87 | 87 | ||
88 | giv = req->giv; | 88 | giv = req->giv; |
89 | compl = req->creq.base.complete; | 89 | compl = req->creq.base.complete; |
90 | data = req->creq.base.data; | 90 | data = req->creq.base.data; |
91 | 91 | ||
92 | osrc = req->creq.src; | 92 | osrc = req->creq.src; |
93 | odst = req->creq.dst; | 93 | odst = req->creq.dst; |
94 | srcp = sg_page(osrc); | 94 | srcp = sg_page(osrc); |
95 | dstp = sg_page(odst); | 95 | dstp = sg_page(odst); |
96 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset; | 96 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset; |
97 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset; | 97 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset; |
98 | 98 | ||
99 | ivsize = crypto_ablkcipher_ivsize(geniv); | 99 | ivsize = crypto_ablkcipher_ivsize(geniv); |
100 | 100 | ||
101 | if (vsrc != giv + ivsize && vdst != giv + ivsize) { | 101 | if (vsrc != giv + ivsize && vdst != giv + ivsize) { |
102 | giv = PTR_ALIGN((u8 *)reqctx->tail, | 102 | giv = PTR_ALIGN((u8 *)reqctx->tail, |
103 | crypto_ablkcipher_alignmask(geniv) + 1); | 103 | crypto_ablkcipher_alignmask(geniv) + 1); |
104 | compl = eseqiv_complete; | 104 | compl = eseqiv_complete; |
105 | data = req; | 105 | data = req; |
106 | } | 106 | } |
107 | 107 | ||
108 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, | 108 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, |
109 | data); | 109 | data); |
110 | 110 | ||
111 | sg_init_table(reqctx->src, 2); | 111 | sg_init_table(reqctx->src, 2); |
112 | sg_set_buf(reqctx->src, giv, ivsize); | 112 | sg_set_buf(reqctx->src, giv, ivsize); |
113 | scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2); | 113 | scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2); |
114 | 114 | ||
115 | dst = reqctx->src; | 115 | dst = reqctx->src; |
116 | if (osrc != odst) { | 116 | if (osrc != odst) { |
117 | sg_init_table(reqctx->dst, 2); | 117 | sg_init_table(reqctx->dst, 2); |
118 | sg_set_buf(reqctx->dst, giv, ivsize); | 118 | sg_set_buf(reqctx->dst, giv, ivsize); |
119 | scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2); | 119 | scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2); |
120 | 120 | ||
121 | dst = reqctx->dst; | 121 | dst = reqctx->dst; |
122 | } | 122 | } |
123 | 123 | ||
124 | ablkcipher_request_set_crypt(subreq, reqctx->src, dst, | 124 | ablkcipher_request_set_crypt(subreq, reqctx->src, dst, |
125 | req->creq.nbytes + ivsize, | 125 | req->creq.nbytes + ivsize, |
126 | req->creq.info); | 126 | req->creq.info); |
127 | 127 | ||
128 | memcpy(req->creq.info, ctx->salt, ivsize); | 128 | memcpy(req->creq.info, ctx->salt, ivsize); |
129 | 129 | ||
130 | len = ivsize; | 130 | len = ivsize; |
131 | if (ivsize > sizeof(u64)) { | 131 | if (ivsize > sizeof(u64)) { |
132 | memset(req->giv, 0, ivsize - sizeof(u64)); | 132 | memset(req->giv, 0, ivsize - sizeof(u64)); |
133 | len = sizeof(u64); | 133 | len = sizeof(u64); |
134 | } | 134 | } |
135 | seq = cpu_to_be64(req->seq); | 135 | seq = cpu_to_be64(req->seq); |
136 | memcpy(req->giv + ivsize - len, &seq, len); | 136 | memcpy(req->giv + ivsize - len, &seq, len); |
137 | 137 | ||
138 | err = crypto_ablkcipher_encrypt(subreq); | 138 | err = crypto_ablkcipher_encrypt(subreq); |
139 | if (err) | 139 | if (err) |
140 | goto out; | 140 | goto out; |
141 | 141 | ||
142 | if (giv != req->giv) | 142 | if (giv != req->giv) |
143 | eseqiv_complete2(req); | 143 | eseqiv_complete2(req); |
144 | 144 | ||
145 | out: | 145 | out: |
146 | return err; | 146 | return err; |
147 | } | 147 | } |
148 | 148 | ||
149 | static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req) | 149 | static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req) |
150 | { | 150 | { |
151 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 151 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
152 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 152 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
153 | int err = 0; | 153 | int err = 0; |
154 | 154 | ||
155 | spin_lock_bh(&ctx->lock); | 155 | spin_lock_bh(&ctx->lock); |
156 | if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first) | 156 | if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first) |
157 | goto unlock; | 157 | goto unlock; |
158 | 158 | ||
159 | crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt; | 159 | crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt; |
160 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | 160 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, |
161 | crypto_ablkcipher_ivsize(geniv)); | 161 | crypto_ablkcipher_ivsize(geniv)); |
162 | 162 | ||
163 | unlock: | 163 | unlock: |
164 | spin_unlock_bh(&ctx->lock); | 164 | spin_unlock_bh(&ctx->lock); |
165 | 165 | ||
166 | if (err) | 166 | if (err) |
167 | return err; | 167 | return err; |
168 | 168 | ||
169 | return eseqiv_givencrypt(req); | 169 | return eseqiv_givencrypt(req); |
170 | } | 170 | } |
171 | 171 | ||
172 | static int eseqiv_init(struct crypto_tfm *tfm) | 172 | static int eseqiv_init(struct crypto_tfm *tfm) |
173 | { | 173 | { |
174 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); | 174 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); |
175 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 175 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
176 | unsigned long alignmask; | 176 | unsigned long alignmask; |
177 | unsigned int reqsize; | 177 | unsigned int reqsize; |
178 | 178 | ||
179 | spin_lock_init(&ctx->lock); | 179 | spin_lock_init(&ctx->lock); |
180 | 180 | ||
181 | alignmask = crypto_tfm_ctx_alignment() - 1; | 181 | alignmask = crypto_tfm_ctx_alignment() - 1; |
182 | reqsize = sizeof(struct eseqiv_request_ctx); | 182 | reqsize = sizeof(struct eseqiv_request_ctx); |
183 | 183 | ||
184 | if (alignmask & reqsize) { | 184 | if (alignmask & reqsize) { |
185 | alignmask &= reqsize; | 185 | alignmask &= reqsize; |
186 | alignmask--; | 186 | alignmask--; |
187 | } | 187 | } |
188 | 188 | ||
189 | alignmask = ~alignmask; | 189 | alignmask = ~alignmask; |
190 | alignmask &= crypto_ablkcipher_alignmask(geniv); | 190 | alignmask &= crypto_ablkcipher_alignmask(geniv); |
191 | 191 | ||
192 | reqsize += alignmask; | 192 | reqsize += alignmask; |
193 | reqsize += crypto_ablkcipher_ivsize(geniv); | 193 | reqsize += crypto_ablkcipher_ivsize(geniv); |
194 | reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); | 194 | reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); |
195 | 195 | ||
196 | ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx); | 196 | ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx); |
197 | 197 | ||
198 | tfm->crt_ablkcipher.reqsize = reqsize + | 198 | tfm->crt_ablkcipher.reqsize = reqsize + |
199 | sizeof(struct ablkcipher_request); | 199 | sizeof(struct ablkcipher_request); |
200 | 200 | ||
201 | return skcipher_geniv_init(tfm); | 201 | return skcipher_geniv_init(tfm); |
202 | } | 202 | } |
203 | 203 | ||
204 | static struct crypto_template eseqiv_tmpl; | 204 | static struct crypto_template eseqiv_tmpl; |
205 | 205 | ||
206 | static struct crypto_instance *eseqiv_alloc(struct rtattr **tb) | 206 | static struct crypto_instance *eseqiv_alloc(struct rtattr **tb) |
207 | { | 207 | { |
208 | struct crypto_instance *inst; | 208 | struct crypto_instance *inst; |
209 | int err; | 209 | int err; |
210 | 210 | ||
211 | err = crypto_get_default_rng(); | 211 | err = crypto_get_default_rng(); |
212 | if (err) | 212 | if (err) |
213 | return ERR_PTR(err); | 213 | return ERR_PTR(err); |
214 | 214 | ||
215 | inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0); | 215 | inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0); |
216 | if (IS_ERR(inst)) | 216 | if (IS_ERR(inst)) |
217 | goto put_rng; | 217 | goto put_rng; |
218 | 218 | ||
219 | err = -EINVAL; | 219 | err = -EINVAL; |
220 | if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize) | 220 | if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize) |
221 | goto free_inst; | 221 | goto free_inst; |
222 | 222 | ||
223 | inst->alg.cra_ablkcipher.givencrypt = eseqiv_givencrypt_first; | 223 | inst->alg.cra_ablkcipher.givencrypt = eseqiv_givencrypt_first; |
224 | 224 | ||
225 | inst->alg.cra_init = eseqiv_init; | 225 | inst->alg.cra_init = eseqiv_init; |
226 | inst->alg.cra_exit = skcipher_geniv_exit; | 226 | inst->alg.cra_exit = skcipher_geniv_exit; |
227 | 227 | ||
228 | inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx); | 228 | inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx); |
229 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; | 229 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; |
230 | 230 | ||
231 | out: | 231 | out: |
232 | return inst; | 232 | return inst; |
233 | 233 | ||
234 | free_inst: | 234 | free_inst: |
235 | skcipher_geniv_free(inst); | 235 | skcipher_geniv_free(inst); |
236 | inst = ERR_PTR(err); | 236 | inst = ERR_PTR(err); |
237 | put_rng: | 237 | put_rng: |
238 | crypto_put_default_rng(); | 238 | crypto_put_default_rng(); |
239 | goto out; | 239 | goto out; |
240 | } | 240 | } |
241 | 241 | ||
242 | static void eseqiv_free(struct crypto_instance *inst) | 242 | static void eseqiv_free(struct crypto_instance *inst) |
243 | { | 243 | { |
244 | skcipher_geniv_free(inst); | 244 | skcipher_geniv_free(inst); |
245 | crypto_put_default_rng(); | 245 | crypto_put_default_rng(); |
246 | } | 246 | } |
247 | 247 | ||
248 | static struct crypto_template eseqiv_tmpl = { | 248 | static struct crypto_template eseqiv_tmpl = { |
249 | .name = "eseqiv", | 249 | .name = "eseqiv", |
250 | .alloc = eseqiv_alloc, | 250 | .alloc = eseqiv_alloc, |
251 | .free = eseqiv_free, | 251 | .free = eseqiv_free, |
252 | .module = THIS_MODULE, | 252 | .module = THIS_MODULE, |
253 | }; | 253 | }; |
254 | 254 | ||
255 | static int __init eseqiv_module_init(void) | 255 | static int __init eseqiv_module_init(void) |
256 | { | 256 | { |
257 | return crypto_register_template(&eseqiv_tmpl); | 257 | return crypto_register_template(&eseqiv_tmpl); |
258 | } | 258 | } |
259 | 259 | ||
260 | static void __exit eseqiv_module_exit(void) | 260 | static void __exit eseqiv_module_exit(void) |
261 | { | 261 | { |
262 | crypto_unregister_template(&eseqiv_tmpl); | 262 | crypto_unregister_template(&eseqiv_tmpl); |
263 | } | 263 | } |
264 | 264 | ||
265 | module_init(eseqiv_module_init); | 265 | module_init(eseqiv_module_init); |
266 | module_exit(eseqiv_module_exit); | 266 | module_exit(eseqiv_module_exit); |
267 | 267 | ||
268 | MODULE_LICENSE("GPL"); | 268 | MODULE_LICENSE("GPL"); |
269 | MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator"); | 269 | MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator"); |
270 | MODULE_ALIAS_CRYPTO("eseqiv"); | ||
270 | 271 |
crypto/gcm.c
1 | /* | 1 | /* |
2 | * GCM: Galois/Counter Mode. | 2 | * GCM: Galois/Counter Mode. |
3 | * | 3 | * |
4 | * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> | 4 | * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation. | 8 | * by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <crypto/gf128mul.h> | 11 | #include <crypto/gf128mul.h> |
12 | #include <crypto/internal/aead.h> | 12 | #include <crypto/internal/aead.h> |
13 | #include <crypto/internal/skcipher.h> | 13 | #include <crypto/internal/skcipher.h> |
14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
15 | #include <crypto/scatterwalk.h> | 15 | #include <crypto/scatterwalk.h> |
16 | #include <crypto/hash.h> | 16 | #include <crypto/hash.h> |
17 | #include "internal.h" | 17 | #include "internal.h" |
18 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | 24 | ||
25 | struct gcm_instance_ctx { | 25 | struct gcm_instance_ctx { |
26 | struct crypto_skcipher_spawn ctr; | 26 | struct crypto_skcipher_spawn ctr; |
27 | struct crypto_ahash_spawn ghash; | 27 | struct crypto_ahash_spawn ghash; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | struct crypto_gcm_ctx { | 30 | struct crypto_gcm_ctx { |
31 | struct crypto_ablkcipher *ctr; | 31 | struct crypto_ablkcipher *ctr; |
32 | struct crypto_ahash *ghash; | 32 | struct crypto_ahash *ghash; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | struct crypto_rfc4106_ctx { | 35 | struct crypto_rfc4106_ctx { |
36 | struct crypto_aead *child; | 36 | struct crypto_aead *child; |
37 | u8 nonce[4]; | 37 | u8 nonce[4]; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | struct crypto_rfc4543_instance_ctx { | 40 | struct crypto_rfc4543_instance_ctx { |
41 | struct crypto_aead_spawn aead; | 41 | struct crypto_aead_spawn aead; |
42 | struct crypto_skcipher_spawn null; | 42 | struct crypto_skcipher_spawn null; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | struct crypto_rfc4543_ctx { | 45 | struct crypto_rfc4543_ctx { |
46 | struct crypto_aead *child; | 46 | struct crypto_aead *child; |
47 | struct crypto_blkcipher *null; | 47 | struct crypto_blkcipher *null; |
48 | u8 nonce[4]; | 48 | u8 nonce[4]; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | struct crypto_rfc4543_req_ctx { | 51 | struct crypto_rfc4543_req_ctx { |
52 | u8 auth_tag[16]; | 52 | u8 auth_tag[16]; |
53 | u8 assocbuf[32]; | 53 | u8 assocbuf[32]; |
54 | struct scatterlist cipher[1]; | 54 | struct scatterlist cipher[1]; |
55 | struct scatterlist payload[2]; | 55 | struct scatterlist payload[2]; |
56 | struct scatterlist assoc[2]; | 56 | struct scatterlist assoc[2]; |
57 | struct aead_request subreq; | 57 | struct aead_request subreq; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | struct crypto_gcm_ghash_ctx { | 60 | struct crypto_gcm_ghash_ctx { |
61 | unsigned int cryptlen; | 61 | unsigned int cryptlen; |
62 | struct scatterlist *src; | 62 | struct scatterlist *src; |
63 | void (*complete)(struct aead_request *req, int err); | 63 | void (*complete)(struct aead_request *req, int err); |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct crypto_gcm_req_priv_ctx { | 66 | struct crypto_gcm_req_priv_ctx { |
67 | u8 auth_tag[16]; | 67 | u8 auth_tag[16]; |
68 | u8 iauth_tag[16]; | 68 | u8 iauth_tag[16]; |
69 | struct scatterlist src[2]; | 69 | struct scatterlist src[2]; |
70 | struct scatterlist dst[2]; | 70 | struct scatterlist dst[2]; |
71 | struct crypto_gcm_ghash_ctx ghash_ctx; | 71 | struct crypto_gcm_ghash_ctx ghash_ctx; |
72 | union { | 72 | union { |
73 | struct ahash_request ahreq; | 73 | struct ahash_request ahreq; |
74 | struct ablkcipher_request abreq; | 74 | struct ablkcipher_request abreq; |
75 | } u; | 75 | } u; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | struct crypto_gcm_setkey_result { | 78 | struct crypto_gcm_setkey_result { |
79 | int err; | 79 | int err; |
80 | struct completion completion; | 80 | struct completion completion; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static void *gcm_zeroes; | 83 | static void *gcm_zeroes; |
84 | 84 | ||
85 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | 85 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( |
86 | struct aead_request *req) | 86 | struct aead_request *req) |
87 | { | 87 | { |
88 | unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); | 88 | unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); |
89 | 89 | ||
90 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | 90 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) | 93 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) |
94 | { | 94 | { |
95 | struct crypto_gcm_setkey_result *result = req->data; | 95 | struct crypto_gcm_setkey_result *result = req->data; |
96 | 96 | ||
97 | if (err == -EINPROGRESS) | 97 | if (err == -EINPROGRESS) |
98 | return; | 98 | return; |
99 | 99 | ||
100 | result->err = err; | 100 | result->err = err; |
101 | complete(&result->completion); | 101 | complete(&result->completion); |
102 | } | 102 | } |
103 | 103 | ||
104 | static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | 104 | static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, |
105 | unsigned int keylen) | 105 | unsigned int keylen) |
106 | { | 106 | { |
107 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | 107 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); |
108 | struct crypto_ahash *ghash = ctx->ghash; | 108 | struct crypto_ahash *ghash = ctx->ghash; |
109 | struct crypto_ablkcipher *ctr = ctx->ctr; | 109 | struct crypto_ablkcipher *ctr = ctx->ctr; |
110 | struct { | 110 | struct { |
111 | be128 hash; | 111 | be128 hash; |
112 | u8 iv[8]; | 112 | u8 iv[8]; |
113 | 113 | ||
114 | struct crypto_gcm_setkey_result result; | 114 | struct crypto_gcm_setkey_result result; |
115 | 115 | ||
116 | struct scatterlist sg[1]; | 116 | struct scatterlist sg[1]; |
117 | struct ablkcipher_request req; | 117 | struct ablkcipher_request req; |
118 | } *data; | 118 | } *data; |
119 | int err; | 119 | int err; |
120 | 120 | ||
121 | crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); | 121 | crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); |
122 | crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & | 122 | crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & |
123 | CRYPTO_TFM_REQ_MASK); | 123 | CRYPTO_TFM_REQ_MASK); |
124 | 124 | ||
125 | err = crypto_ablkcipher_setkey(ctr, key, keylen); | 125 | err = crypto_ablkcipher_setkey(ctr, key, keylen); |
126 | if (err) | 126 | if (err) |
127 | return err; | 127 | return err; |
128 | 128 | ||
129 | crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & | 129 | crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & |
130 | CRYPTO_TFM_RES_MASK); | 130 | CRYPTO_TFM_RES_MASK); |
131 | 131 | ||
132 | data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), | 132 | data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), |
133 | GFP_KERNEL); | 133 | GFP_KERNEL); |
134 | if (!data) | 134 | if (!data) |
135 | return -ENOMEM; | 135 | return -ENOMEM; |
136 | 136 | ||
137 | init_completion(&data->result.completion); | 137 | init_completion(&data->result.completion); |
138 | sg_init_one(data->sg, &data->hash, sizeof(data->hash)); | 138 | sg_init_one(data->sg, &data->hash, sizeof(data->hash)); |
139 | ablkcipher_request_set_tfm(&data->req, ctr); | 139 | ablkcipher_request_set_tfm(&data->req, ctr); |
140 | ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | | 140 | ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | |
141 | CRYPTO_TFM_REQ_MAY_BACKLOG, | 141 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
142 | crypto_gcm_setkey_done, | 142 | crypto_gcm_setkey_done, |
143 | &data->result); | 143 | &data->result); |
144 | ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, | 144 | ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, |
145 | sizeof(data->hash), data->iv); | 145 | sizeof(data->hash), data->iv); |
146 | 146 | ||
147 | err = crypto_ablkcipher_encrypt(&data->req); | 147 | err = crypto_ablkcipher_encrypt(&data->req); |
148 | if (err == -EINPROGRESS || err == -EBUSY) { | 148 | if (err == -EINPROGRESS || err == -EBUSY) { |
149 | err = wait_for_completion_interruptible( | 149 | err = wait_for_completion_interruptible( |
150 | &data->result.completion); | 150 | &data->result.completion); |
151 | if (!err) | 151 | if (!err) |
152 | err = data->result.err; | 152 | err = data->result.err; |
153 | } | 153 | } |
154 | 154 | ||
155 | if (err) | 155 | if (err) |
156 | goto out; | 156 | goto out; |
157 | 157 | ||
158 | crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); | 158 | crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); |
159 | crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & | 159 | crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & |
160 | CRYPTO_TFM_REQ_MASK); | 160 | CRYPTO_TFM_REQ_MASK); |
161 | err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); | 161 | err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); |
162 | crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & | 162 | crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & |
163 | CRYPTO_TFM_RES_MASK); | 163 | CRYPTO_TFM_RES_MASK); |
164 | 164 | ||
165 | out: | 165 | out: |
166 | kfree(data); | 166 | kfree(data); |
167 | return err; | 167 | return err; |
168 | } | 168 | } |
169 | 169 | ||
170 | static int crypto_gcm_setauthsize(struct crypto_aead *tfm, | 170 | static int crypto_gcm_setauthsize(struct crypto_aead *tfm, |
171 | unsigned int authsize) | 171 | unsigned int authsize) |
172 | { | 172 | { |
173 | switch (authsize) { | 173 | switch (authsize) { |
174 | case 4: | 174 | case 4: |
175 | case 8: | 175 | case 8: |
176 | case 12: | 176 | case 12: |
177 | case 13: | 177 | case 13: |
178 | case 14: | 178 | case 14: |
179 | case 15: | 179 | case 15: |
180 | case 16: | 180 | case 16: |
181 | break; | 181 | break; |
182 | default: | 182 | default: |
183 | return -EINVAL; | 183 | return -EINVAL; |
184 | } | 184 | } |
185 | 185 | ||
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, | 189 | static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, |
190 | struct aead_request *req, | 190 | struct aead_request *req, |
191 | unsigned int cryptlen) | 191 | unsigned int cryptlen) |
192 | { | 192 | { |
193 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 193 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
194 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | 194 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); |
195 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 195 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
196 | struct scatterlist *dst; | 196 | struct scatterlist *dst; |
197 | __be32 counter = cpu_to_be32(1); | 197 | __be32 counter = cpu_to_be32(1); |
198 | 198 | ||
199 | memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); | 199 | memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); |
200 | memcpy(req->iv + 12, &counter, 4); | 200 | memcpy(req->iv + 12, &counter, 4); |
201 | 201 | ||
202 | sg_init_table(pctx->src, 2); | 202 | sg_init_table(pctx->src, 2); |
203 | sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); | 203 | sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); |
204 | scatterwalk_sg_chain(pctx->src, 2, req->src); | 204 | scatterwalk_sg_chain(pctx->src, 2, req->src); |
205 | 205 | ||
206 | dst = pctx->src; | 206 | dst = pctx->src; |
207 | if (req->src != req->dst) { | 207 | if (req->src != req->dst) { |
208 | sg_init_table(pctx->dst, 2); | 208 | sg_init_table(pctx->dst, 2); |
209 | sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); | 209 | sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); |
210 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); | 210 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); |
211 | dst = pctx->dst; | 211 | dst = pctx->dst; |
212 | } | 212 | } |
213 | 213 | ||
214 | ablkcipher_request_set_tfm(ablk_req, ctx->ctr); | 214 | ablkcipher_request_set_tfm(ablk_req, ctx->ctr); |
215 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, | 215 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, |
216 | cryptlen + sizeof(pctx->auth_tag), | 216 | cryptlen + sizeof(pctx->auth_tag), |
217 | req->iv); | 217 | req->iv); |
218 | } | 218 | } |
219 | 219 | ||
220 | static inline unsigned int gcm_remain(unsigned int len) | 220 | static inline unsigned int gcm_remain(unsigned int len) |
221 | { | 221 | { |
222 | len &= 0xfU; | 222 | len &= 0xfU; |
223 | return len ? 16 - len : 0; | 223 | return len ? 16 - len : 0; |
224 | } | 224 | } |
225 | 225 | ||
226 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err); | 226 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err); |
227 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err); | 227 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err); |
228 | 228 | ||
229 | static int gcm_hash_update(struct aead_request *req, | 229 | static int gcm_hash_update(struct aead_request *req, |
230 | struct crypto_gcm_req_priv_ctx *pctx, | 230 | struct crypto_gcm_req_priv_ctx *pctx, |
231 | crypto_completion_t compl, | 231 | crypto_completion_t compl, |
232 | struct scatterlist *src, | 232 | struct scatterlist *src, |
233 | unsigned int len) | 233 | unsigned int len) |
234 | { | 234 | { |
235 | struct ahash_request *ahreq = &pctx->u.ahreq; | 235 | struct ahash_request *ahreq = &pctx->u.ahreq; |
236 | 236 | ||
237 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 237 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
238 | compl, req); | 238 | compl, req); |
239 | ahash_request_set_crypt(ahreq, src, NULL, len); | 239 | ahash_request_set_crypt(ahreq, src, NULL, len); |
240 | 240 | ||
241 | return crypto_ahash_update(ahreq); | 241 | return crypto_ahash_update(ahreq); |
242 | } | 242 | } |
243 | 243 | ||
244 | static int gcm_hash_remain(struct aead_request *req, | 244 | static int gcm_hash_remain(struct aead_request *req, |
245 | struct crypto_gcm_req_priv_ctx *pctx, | 245 | struct crypto_gcm_req_priv_ctx *pctx, |
246 | unsigned int remain, | 246 | unsigned int remain, |
247 | crypto_completion_t compl) | 247 | crypto_completion_t compl) |
248 | { | 248 | { |
249 | struct ahash_request *ahreq = &pctx->u.ahreq; | 249 | struct ahash_request *ahreq = &pctx->u.ahreq; |
250 | 250 | ||
251 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 251 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
252 | compl, req); | 252 | compl, req); |
253 | sg_init_one(pctx->src, gcm_zeroes, remain); | 253 | sg_init_one(pctx->src, gcm_zeroes, remain); |
254 | ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); | 254 | ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); |
255 | 255 | ||
256 | return crypto_ahash_update(ahreq); | 256 | return crypto_ahash_update(ahreq); |
257 | } | 257 | } |
258 | 258 | ||
259 | static int gcm_hash_len(struct aead_request *req, | 259 | static int gcm_hash_len(struct aead_request *req, |
260 | struct crypto_gcm_req_priv_ctx *pctx) | 260 | struct crypto_gcm_req_priv_ctx *pctx) |
261 | { | 261 | { |
262 | struct ahash_request *ahreq = &pctx->u.ahreq; | 262 | struct ahash_request *ahreq = &pctx->u.ahreq; |
263 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 263 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
264 | u128 lengths; | 264 | u128 lengths; |
265 | 265 | ||
266 | lengths.a = cpu_to_be64(req->assoclen * 8); | 266 | lengths.a = cpu_to_be64(req->assoclen * 8); |
267 | lengths.b = cpu_to_be64(gctx->cryptlen * 8); | 267 | lengths.b = cpu_to_be64(gctx->cryptlen * 8); |
268 | memcpy(pctx->iauth_tag, &lengths, 16); | 268 | memcpy(pctx->iauth_tag, &lengths, 16); |
269 | sg_init_one(pctx->src, pctx->iauth_tag, 16); | 269 | sg_init_one(pctx->src, pctx->iauth_tag, 16); |
270 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 270 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
271 | gcm_hash_len_done, req); | 271 | gcm_hash_len_done, req); |
272 | ahash_request_set_crypt(ahreq, pctx->src, | 272 | ahash_request_set_crypt(ahreq, pctx->src, |
273 | NULL, sizeof(lengths)); | 273 | NULL, sizeof(lengths)); |
274 | 274 | ||
275 | return crypto_ahash_update(ahreq); | 275 | return crypto_ahash_update(ahreq); |
276 | } | 276 | } |
277 | 277 | ||
278 | static int gcm_hash_final(struct aead_request *req, | 278 | static int gcm_hash_final(struct aead_request *req, |
279 | struct crypto_gcm_req_priv_ctx *pctx) | 279 | struct crypto_gcm_req_priv_ctx *pctx) |
280 | { | 280 | { |
281 | struct ahash_request *ahreq = &pctx->u.ahreq; | 281 | struct ahash_request *ahreq = &pctx->u.ahreq; |
282 | 282 | ||
283 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 283 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
284 | gcm_hash_final_done, req); | 284 | gcm_hash_final_done, req); |
285 | ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0); | 285 | ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0); |
286 | 286 | ||
287 | return crypto_ahash_final(ahreq); | 287 | return crypto_ahash_final(ahreq); |
288 | } | 288 | } |
289 | 289 | ||
290 | static void __gcm_hash_final_done(struct aead_request *req, int err) | 290 | static void __gcm_hash_final_done(struct aead_request *req, int err) |
291 | { | 291 | { |
292 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 292 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
293 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 293 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
294 | 294 | ||
295 | if (!err) | 295 | if (!err) |
296 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | 296 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); |
297 | 297 | ||
298 | gctx->complete(req, err); | 298 | gctx->complete(req, err); |
299 | } | 299 | } |
300 | 300 | ||
301 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err) | 301 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err) |
302 | { | 302 | { |
303 | struct aead_request *req = areq->data; | 303 | struct aead_request *req = areq->data; |
304 | 304 | ||
305 | __gcm_hash_final_done(req, err); | 305 | __gcm_hash_final_done(req, err); |
306 | } | 306 | } |
307 | 307 | ||
308 | static void __gcm_hash_len_done(struct aead_request *req, int err) | 308 | static void __gcm_hash_len_done(struct aead_request *req, int err) |
309 | { | 309 | { |
310 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 310 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
311 | 311 | ||
312 | if (!err) { | 312 | if (!err) { |
313 | err = gcm_hash_final(req, pctx); | 313 | err = gcm_hash_final(req, pctx); |
314 | if (err == -EINPROGRESS || err == -EBUSY) | 314 | if (err == -EINPROGRESS || err == -EBUSY) |
315 | return; | 315 | return; |
316 | } | 316 | } |
317 | 317 | ||
318 | __gcm_hash_final_done(req, err); | 318 | __gcm_hash_final_done(req, err); |
319 | } | 319 | } |
320 | 320 | ||
321 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err) | 321 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err) |
322 | { | 322 | { |
323 | struct aead_request *req = areq->data; | 323 | struct aead_request *req = areq->data; |
324 | 324 | ||
325 | __gcm_hash_len_done(req, err); | 325 | __gcm_hash_len_done(req, err); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err) | 328 | static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err) |
329 | { | 329 | { |
330 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 330 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
331 | 331 | ||
332 | if (!err) { | 332 | if (!err) { |
333 | err = gcm_hash_len(req, pctx); | 333 | err = gcm_hash_len(req, pctx); |
334 | if (err == -EINPROGRESS || err == -EBUSY) | 334 | if (err == -EINPROGRESS || err == -EBUSY) |
335 | return; | 335 | return; |
336 | } | 336 | } |
337 | 337 | ||
338 | __gcm_hash_len_done(req, err); | 338 | __gcm_hash_len_done(req, err); |
339 | } | 339 | } |
340 | 340 | ||
341 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | 341 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, |
342 | int err) | 342 | int err) |
343 | { | 343 | { |
344 | struct aead_request *req = areq->data; | 344 | struct aead_request *req = areq->data; |
345 | 345 | ||
346 | __gcm_hash_crypt_remain_done(req, err); | 346 | __gcm_hash_crypt_remain_done(req, err); |
347 | } | 347 | } |
348 | 348 | ||
349 | static void __gcm_hash_crypt_done(struct aead_request *req, int err) | 349 | static void __gcm_hash_crypt_done(struct aead_request *req, int err) |
350 | { | 350 | { |
351 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 351 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
352 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 352 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
353 | unsigned int remain; | 353 | unsigned int remain; |
354 | 354 | ||
355 | if (!err) { | 355 | if (!err) { |
356 | remain = gcm_remain(gctx->cryptlen); | 356 | remain = gcm_remain(gctx->cryptlen); |
357 | BUG_ON(!remain); | 357 | BUG_ON(!remain); |
358 | err = gcm_hash_remain(req, pctx, remain, | 358 | err = gcm_hash_remain(req, pctx, remain, |
359 | gcm_hash_crypt_remain_done); | 359 | gcm_hash_crypt_remain_done); |
360 | if (err == -EINPROGRESS || err == -EBUSY) | 360 | if (err == -EINPROGRESS || err == -EBUSY) |
361 | return; | 361 | return; |
362 | } | 362 | } |
363 | 363 | ||
364 | __gcm_hash_crypt_remain_done(req, err); | 364 | __gcm_hash_crypt_remain_done(req, err); |
365 | } | 365 | } |
366 | 366 | ||
367 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) | 367 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) |
368 | { | 368 | { |
369 | struct aead_request *req = areq->data; | 369 | struct aead_request *req = areq->data; |
370 | 370 | ||
371 | __gcm_hash_crypt_done(req, err); | 371 | __gcm_hash_crypt_done(req, err); |
372 | } | 372 | } |
373 | 373 | ||
374 | static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) | 374 | static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) |
375 | { | 375 | { |
376 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 376 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
377 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 377 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
378 | crypto_completion_t compl; | 378 | crypto_completion_t compl; |
379 | unsigned int remain = 0; | 379 | unsigned int remain = 0; |
380 | 380 | ||
381 | if (!err && gctx->cryptlen) { | 381 | if (!err && gctx->cryptlen) { |
382 | remain = gcm_remain(gctx->cryptlen); | 382 | remain = gcm_remain(gctx->cryptlen); |
383 | compl = remain ? gcm_hash_crypt_done : | 383 | compl = remain ? gcm_hash_crypt_done : |
384 | gcm_hash_crypt_remain_done; | 384 | gcm_hash_crypt_remain_done; |
385 | err = gcm_hash_update(req, pctx, compl, | 385 | err = gcm_hash_update(req, pctx, compl, |
386 | gctx->src, gctx->cryptlen); | 386 | gctx->src, gctx->cryptlen); |
387 | if (err == -EINPROGRESS || err == -EBUSY) | 387 | if (err == -EINPROGRESS || err == -EBUSY) |
388 | return; | 388 | return; |
389 | } | 389 | } |
390 | 390 | ||
391 | if (remain) | 391 | if (remain) |
392 | __gcm_hash_crypt_done(req, err); | 392 | __gcm_hash_crypt_done(req, err); |
393 | else | 393 | else |
394 | __gcm_hash_crypt_remain_done(req, err); | 394 | __gcm_hash_crypt_remain_done(req, err); |
395 | } | 395 | } |
396 | 396 | ||
397 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | 397 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, |
398 | int err) | 398 | int err) |
399 | { | 399 | { |
400 | struct aead_request *req = areq->data; | 400 | struct aead_request *req = areq->data; |
401 | 401 | ||
402 | __gcm_hash_assoc_remain_done(req, err); | 402 | __gcm_hash_assoc_remain_done(req, err); |
403 | } | 403 | } |
404 | 404 | ||
405 | static void __gcm_hash_assoc_done(struct aead_request *req, int err) | 405 | static void __gcm_hash_assoc_done(struct aead_request *req, int err) |
406 | { | 406 | { |
407 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 407 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
408 | unsigned int remain; | 408 | unsigned int remain; |
409 | 409 | ||
410 | if (!err) { | 410 | if (!err) { |
411 | remain = gcm_remain(req->assoclen); | 411 | remain = gcm_remain(req->assoclen); |
412 | BUG_ON(!remain); | 412 | BUG_ON(!remain); |
413 | err = gcm_hash_remain(req, pctx, remain, | 413 | err = gcm_hash_remain(req, pctx, remain, |
414 | gcm_hash_assoc_remain_done); | 414 | gcm_hash_assoc_remain_done); |
415 | if (err == -EINPROGRESS || err == -EBUSY) | 415 | if (err == -EINPROGRESS || err == -EBUSY) |
416 | return; | 416 | return; |
417 | } | 417 | } |
418 | 418 | ||
419 | __gcm_hash_assoc_remain_done(req, err); | 419 | __gcm_hash_assoc_remain_done(req, err); |
420 | } | 420 | } |
421 | 421 | ||
422 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) | 422 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) |
423 | { | 423 | { |
424 | struct aead_request *req = areq->data; | 424 | struct aead_request *req = areq->data; |
425 | 425 | ||
426 | __gcm_hash_assoc_done(req, err); | 426 | __gcm_hash_assoc_done(req, err); |
427 | } | 427 | } |
428 | 428 | ||
429 | static void __gcm_hash_init_done(struct aead_request *req, int err) | 429 | static void __gcm_hash_init_done(struct aead_request *req, int err) |
430 | { | 430 | { |
431 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 431 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
432 | crypto_completion_t compl; | 432 | crypto_completion_t compl; |
433 | unsigned int remain = 0; | 433 | unsigned int remain = 0; |
434 | 434 | ||
435 | if (!err && req->assoclen) { | 435 | if (!err && req->assoclen) { |
436 | remain = gcm_remain(req->assoclen); | 436 | remain = gcm_remain(req->assoclen); |
437 | compl = remain ? gcm_hash_assoc_done : | 437 | compl = remain ? gcm_hash_assoc_done : |
438 | gcm_hash_assoc_remain_done; | 438 | gcm_hash_assoc_remain_done; |
439 | err = gcm_hash_update(req, pctx, compl, | 439 | err = gcm_hash_update(req, pctx, compl, |
440 | req->assoc, req->assoclen); | 440 | req->assoc, req->assoclen); |
441 | if (err == -EINPROGRESS || err == -EBUSY) | 441 | if (err == -EINPROGRESS || err == -EBUSY) |
442 | return; | 442 | return; |
443 | } | 443 | } |
444 | 444 | ||
445 | if (remain) | 445 | if (remain) |
446 | __gcm_hash_assoc_done(req, err); | 446 | __gcm_hash_assoc_done(req, err); |
447 | else | 447 | else |
448 | __gcm_hash_assoc_remain_done(req, err); | 448 | __gcm_hash_assoc_remain_done(req, err); |
449 | } | 449 | } |
450 | 450 | ||
451 | static void gcm_hash_init_done(struct crypto_async_request *areq, int err) | 451 | static void gcm_hash_init_done(struct crypto_async_request *areq, int err) |
452 | { | 452 | { |
453 | struct aead_request *req = areq->data; | 453 | struct aead_request *req = areq->data; |
454 | 454 | ||
455 | __gcm_hash_init_done(req, err); | 455 | __gcm_hash_init_done(req, err); |
456 | } | 456 | } |
457 | 457 | ||
458 | static int gcm_hash(struct aead_request *req, | 458 | static int gcm_hash(struct aead_request *req, |
459 | struct crypto_gcm_req_priv_ctx *pctx) | 459 | struct crypto_gcm_req_priv_ctx *pctx) |
460 | { | 460 | { |
461 | struct ahash_request *ahreq = &pctx->u.ahreq; | 461 | struct ahash_request *ahreq = &pctx->u.ahreq; |
462 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 462 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
463 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 463 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
464 | unsigned int remain; | 464 | unsigned int remain; |
465 | crypto_completion_t compl; | 465 | crypto_completion_t compl; |
466 | int err; | 466 | int err; |
467 | 467 | ||
468 | ahash_request_set_tfm(ahreq, ctx->ghash); | 468 | ahash_request_set_tfm(ahreq, ctx->ghash); |
469 | 469 | ||
470 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 470 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
471 | gcm_hash_init_done, req); | 471 | gcm_hash_init_done, req); |
472 | err = crypto_ahash_init(ahreq); | 472 | err = crypto_ahash_init(ahreq); |
473 | if (err) | 473 | if (err) |
474 | return err; | 474 | return err; |
475 | remain = gcm_remain(req->assoclen); | 475 | remain = gcm_remain(req->assoclen); |
476 | compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; | 476 | compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; |
477 | err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen); | 477 | err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen); |
478 | if (err) | 478 | if (err) |
479 | return err; | 479 | return err; |
480 | if (remain) { | 480 | if (remain) { |
481 | err = gcm_hash_remain(req, pctx, remain, | 481 | err = gcm_hash_remain(req, pctx, remain, |
482 | gcm_hash_assoc_remain_done); | 482 | gcm_hash_assoc_remain_done); |
483 | if (err) | 483 | if (err) |
484 | return err; | 484 | return err; |
485 | } | 485 | } |
486 | remain = gcm_remain(gctx->cryptlen); | 486 | remain = gcm_remain(gctx->cryptlen); |
487 | compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; | 487 | compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; |
488 | err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen); | 488 | err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen); |
489 | if (err) | 489 | if (err) |
490 | return err; | 490 | return err; |
491 | if (remain) { | 491 | if (remain) { |
492 | err = gcm_hash_remain(req, pctx, remain, | 492 | err = gcm_hash_remain(req, pctx, remain, |
493 | gcm_hash_crypt_remain_done); | 493 | gcm_hash_crypt_remain_done); |
494 | if (err) | 494 | if (err) |
495 | return err; | 495 | return err; |
496 | } | 496 | } |
497 | err = gcm_hash_len(req, pctx); | 497 | err = gcm_hash_len(req, pctx); |
498 | if (err) | 498 | if (err) |
499 | return err; | 499 | return err; |
500 | err = gcm_hash_final(req, pctx); | 500 | err = gcm_hash_final(req, pctx); |
501 | if (err) | 501 | if (err) |
502 | return err; | 502 | return err; |
503 | 503 | ||
504 | return 0; | 504 | return 0; |
505 | } | 505 | } |
506 | 506 | ||
507 | static void gcm_enc_copy_hash(struct aead_request *req, | 507 | static void gcm_enc_copy_hash(struct aead_request *req, |
508 | struct crypto_gcm_req_priv_ctx *pctx) | 508 | struct crypto_gcm_req_priv_ctx *pctx) |
509 | { | 509 | { |
510 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 510 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
511 | u8 *auth_tag = pctx->auth_tag; | 511 | u8 *auth_tag = pctx->auth_tag; |
512 | 512 | ||
513 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, | 513 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, |
514 | crypto_aead_authsize(aead), 1); | 514 | crypto_aead_authsize(aead), 1); |
515 | } | 515 | } |
516 | 516 | ||
517 | static void gcm_enc_hash_done(struct aead_request *req, int err) | 517 | static void gcm_enc_hash_done(struct aead_request *req, int err) |
518 | { | 518 | { |
519 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 519 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
520 | 520 | ||
521 | if (!err) | 521 | if (!err) |
522 | gcm_enc_copy_hash(req, pctx); | 522 | gcm_enc_copy_hash(req, pctx); |
523 | 523 | ||
524 | aead_request_complete(req, err); | 524 | aead_request_complete(req, err); |
525 | } | 525 | } |
526 | 526 | ||
527 | static void gcm_encrypt_done(struct crypto_async_request *areq, int err) | 527 | static void gcm_encrypt_done(struct crypto_async_request *areq, int err) |
528 | { | 528 | { |
529 | struct aead_request *req = areq->data; | 529 | struct aead_request *req = areq->data; |
530 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 530 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
531 | 531 | ||
532 | if (!err) { | 532 | if (!err) { |
533 | err = gcm_hash(req, pctx); | 533 | err = gcm_hash(req, pctx); |
534 | if (err == -EINPROGRESS || err == -EBUSY) | 534 | if (err == -EINPROGRESS || err == -EBUSY) |
535 | return; | 535 | return; |
536 | else if (!err) { | 536 | else if (!err) { |
537 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | 537 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); |
538 | gcm_enc_copy_hash(req, pctx); | 538 | gcm_enc_copy_hash(req, pctx); |
539 | } | 539 | } |
540 | } | 540 | } |
541 | 541 | ||
542 | aead_request_complete(req, err); | 542 | aead_request_complete(req, err); |
543 | } | 543 | } |
544 | 544 | ||
545 | static int crypto_gcm_encrypt(struct aead_request *req) | 545 | static int crypto_gcm_encrypt(struct aead_request *req) |
546 | { | 546 | { |
547 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 547 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
548 | struct ablkcipher_request *abreq = &pctx->u.abreq; | 548 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
549 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 549 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
550 | int err; | 550 | int err; |
551 | 551 | ||
552 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); | 552 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); |
553 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 553 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
554 | gcm_encrypt_done, req); | 554 | gcm_encrypt_done, req); |
555 | 555 | ||
556 | gctx->src = req->dst; | 556 | gctx->src = req->dst; |
557 | gctx->cryptlen = req->cryptlen; | 557 | gctx->cryptlen = req->cryptlen; |
558 | gctx->complete = gcm_enc_hash_done; | 558 | gctx->complete = gcm_enc_hash_done; |
559 | 559 | ||
560 | err = crypto_ablkcipher_encrypt(abreq); | 560 | err = crypto_ablkcipher_encrypt(abreq); |
561 | if (err) | 561 | if (err) |
562 | return err; | 562 | return err; |
563 | 563 | ||
564 | err = gcm_hash(req, pctx); | 564 | err = gcm_hash(req, pctx); |
565 | if (err) | 565 | if (err) |
566 | return err; | 566 | return err; |
567 | 567 | ||
568 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | 568 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); |
569 | gcm_enc_copy_hash(req, pctx); | 569 | gcm_enc_copy_hash(req, pctx); |
570 | 570 | ||
571 | return 0; | 571 | return 0; |
572 | } | 572 | } |
573 | 573 | ||
574 | static int crypto_gcm_verify(struct aead_request *req, | 574 | static int crypto_gcm_verify(struct aead_request *req, |
575 | struct crypto_gcm_req_priv_ctx *pctx) | 575 | struct crypto_gcm_req_priv_ctx *pctx) |
576 | { | 576 | { |
577 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 577 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
578 | u8 *auth_tag = pctx->auth_tag; | 578 | u8 *auth_tag = pctx->auth_tag; |
579 | u8 *iauth_tag = pctx->iauth_tag; | 579 | u8 *iauth_tag = pctx->iauth_tag; |
580 | unsigned int authsize = crypto_aead_authsize(aead); | 580 | unsigned int authsize = crypto_aead_authsize(aead); |
581 | unsigned int cryptlen = req->cryptlen - authsize; | 581 | unsigned int cryptlen = req->cryptlen - authsize; |
582 | 582 | ||
583 | crypto_xor(auth_tag, iauth_tag, 16); | 583 | crypto_xor(auth_tag, iauth_tag, 16); |
584 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); | 584 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); |
585 | return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; | 585 | return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; |
586 | } | 586 | } |
587 | 587 | ||
588 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) | 588 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) |
589 | { | 589 | { |
590 | struct aead_request *req = areq->data; | 590 | struct aead_request *req = areq->data; |
591 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 591 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
592 | 592 | ||
593 | if (!err) | 593 | if (!err) |
594 | err = crypto_gcm_verify(req, pctx); | 594 | err = crypto_gcm_verify(req, pctx); |
595 | 595 | ||
596 | aead_request_complete(req, err); | 596 | aead_request_complete(req, err); |
597 | } | 597 | } |
598 | 598 | ||
599 | static void gcm_dec_hash_done(struct aead_request *req, int err) | 599 | static void gcm_dec_hash_done(struct aead_request *req, int err) |
600 | { | 600 | { |
601 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 601 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
602 | struct ablkcipher_request *abreq = &pctx->u.abreq; | 602 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
603 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 603 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
604 | 604 | ||
605 | if (!err) { | 605 | if (!err) { |
606 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 606 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
607 | gcm_decrypt_done, req); | 607 | gcm_decrypt_done, req); |
608 | crypto_gcm_init_crypt(abreq, req, gctx->cryptlen); | 608 | crypto_gcm_init_crypt(abreq, req, gctx->cryptlen); |
609 | err = crypto_ablkcipher_decrypt(abreq); | 609 | err = crypto_ablkcipher_decrypt(abreq); |
610 | if (err == -EINPROGRESS || err == -EBUSY) | 610 | if (err == -EINPROGRESS || err == -EBUSY) |
611 | return; | 611 | return; |
612 | else if (!err) | 612 | else if (!err) |
613 | err = crypto_gcm_verify(req, pctx); | 613 | err = crypto_gcm_verify(req, pctx); |
614 | } | 614 | } |
615 | 615 | ||
616 | aead_request_complete(req, err); | 616 | aead_request_complete(req, err); |
617 | } | 617 | } |
618 | 618 | ||
619 | static int crypto_gcm_decrypt(struct aead_request *req) | 619 | static int crypto_gcm_decrypt(struct aead_request *req) |
620 | { | 620 | { |
621 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 621 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
622 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 622 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
623 | struct ablkcipher_request *abreq = &pctx->u.abreq; | 623 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
624 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 624 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
625 | unsigned int authsize = crypto_aead_authsize(aead); | 625 | unsigned int authsize = crypto_aead_authsize(aead); |
626 | unsigned int cryptlen = req->cryptlen; | 626 | unsigned int cryptlen = req->cryptlen; |
627 | int err; | 627 | int err; |
628 | 628 | ||
629 | if (cryptlen < authsize) | 629 | if (cryptlen < authsize) |
630 | return -EINVAL; | 630 | return -EINVAL; |
631 | cryptlen -= authsize; | 631 | cryptlen -= authsize; |
632 | 632 | ||
633 | gctx->src = req->src; | 633 | gctx->src = req->src; |
634 | gctx->cryptlen = cryptlen; | 634 | gctx->cryptlen = cryptlen; |
635 | gctx->complete = gcm_dec_hash_done; | 635 | gctx->complete = gcm_dec_hash_done; |
636 | 636 | ||
637 | err = gcm_hash(req, pctx); | 637 | err = gcm_hash(req, pctx); |
638 | if (err) | 638 | if (err) |
639 | return err; | 639 | return err; |
640 | 640 | ||
641 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 641 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
642 | gcm_decrypt_done, req); | 642 | gcm_decrypt_done, req); |
643 | crypto_gcm_init_crypt(abreq, req, cryptlen); | 643 | crypto_gcm_init_crypt(abreq, req, cryptlen); |
644 | err = crypto_ablkcipher_decrypt(abreq); | 644 | err = crypto_ablkcipher_decrypt(abreq); |
645 | if (err) | 645 | if (err) |
646 | return err; | 646 | return err; |
647 | 647 | ||
648 | return crypto_gcm_verify(req, pctx); | 648 | return crypto_gcm_verify(req, pctx); |
649 | } | 649 | } |
650 | 650 | ||
651 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | 651 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) |
652 | { | 652 | { |
653 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 653 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
654 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); | 654 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); |
655 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 655 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); |
656 | struct crypto_ablkcipher *ctr; | 656 | struct crypto_ablkcipher *ctr; |
657 | struct crypto_ahash *ghash; | 657 | struct crypto_ahash *ghash; |
658 | unsigned long align; | 658 | unsigned long align; |
659 | int err; | 659 | int err; |
660 | 660 | ||
661 | ghash = crypto_spawn_ahash(&ictx->ghash); | 661 | ghash = crypto_spawn_ahash(&ictx->ghash); |
662 | if (IS_ERR(ghash)) | 662 | if (IS_ERR(ghash)) |
663 | return PTR_ERR(ghash); | 663 | return PTR_ERR(ghash); |
664 | 664 | ||
665 | ctr = crypto_spawn_skcipher(&ictx->ctr); | 665 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
666 | err = PTR_ERR(ctr); | 666 | err = PTR_ERR(ctr); |
667 | if (IS_ERR(ctr)) | 667 | if (IS_ERR(ctr)) |
668 | goto err_free_hash; | 668 | goto err_free_hash; |
669 | 669 | ||
670 | ctx->ctr = ctr; | 670 | ctx->ctr = ctr; |
671 | ctx->ghash = ghash; | 671 | ctx->ghash = ghash; |
672 | 672 | ||
673 | align = crypto_tfm_alg_alignmask(tfm); | 673 | align = crypto_tfm_alg_alignmask(tfm); |
674 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 674 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
675 | tfm->crt_aead.reqsize = align + | 675 | tfm->crt_aead.reqsize = align + |
676 | offsetof(struct crypto_gcm_req_priv_ctx, u) + | 676 | offsetof(struct crypto_gcm_req_priv_ctx, u) + |
677 | max(sizeof(struct ablkcipher_request) + | 677 | max(sizeof(struct ablkcipher_request) + |
678 | crypto_ablkcipher_reqsize(ctr), | 678 | crypto_ablkcipher_reqsize(ctr), |
679 | sizeof(struct ahash_request) + | 679 | sizeof(struct ahash_request) + |
680 | crypto_ahash_reqsize(ghash)); | 680 | crypto_ahash_reqsize(ghash)); |
681 | 681 | ||
682 | return 0; | 682 | return 0; |
683 | 683 | ||
684 | err_free_hash: | 684 | err_free_hash: |
685 | crypto_free_ahash(ghash); | 685 | crypto_free_ahash(ghash); |
686 | return err; | 686 | return err; |
687 | } | 687 | } |
688 | 688 | ||
689 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) | 689 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) |
690 | { | 690 | { |
691 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 691 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); |
692 | 692 | ||
693 | crypto_free_ahash(ctx->ghash); | 693 | crypto_free_ahash(ctx->ghash); |
694 | crypto_free_ablkcipher(ctx->ctr); | 694 | crypto_free_ablkcipher(ctx->ctr); |
695 | } | 695 | } |
696 | 696 | ||
697 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | 697 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, |
698 | const char *full_name, | 698 | const char *full_name, |
699 | const char *ctr_name, | 699 | const char *ctr_name, |
700 | const char *ghash_name) | 700 | const char *ghash_name) |
701 | { | 701 | { |
702 | struct crypto_attr_type *algt; | 702 | struct crypto_attr_type *algt; |
703 | struct crypto_instance *inst; | 703 | struct crypto_instance *inst; |
704 | struct crypto_alg *ctr; | 704 | struct crypto_alg *ctr; |
705 | struct crypto_alg *ghash_alg; | 705 | struct crypto_alg *ghash_alg; |
706 | struct ahash_alg *ghash_ahash_alg; | 706 | struct ahash_alg *ghash_ahash_alg; |
707 | struct gcm_instance_ctx *ctx; | 707 | struct gcm_instance_ctx *ctx; |
708 | int err; | 708 | int err; |
709 | 709 | ||
710 | algt = crypto_get_attr_type(tb); | 710 | algt = crypto_get_attr_type(tb); |
711 | if (IS_ERR(algt)) | 711 | if (IS_ERR(algt)) |
712 | return ERR_CAST(algt); | 712 | return ERR_CAST(algt); |
713 | 713 | ||
714 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 714 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
715 | return ERR_PTR(-EINVAL); | 715 | return ERR_PTR(-EINVAL); |
716 | 716 | ||
717 | ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, | 717 | ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, |
718 | CRYPTO_ALG_TYPE_HASH, | 718 | CRYPTO_ALG_TYPE_HASH, |
719 | CRYPTO_ALG_TYPE_AHASH_MASK); | 719 | CRYPTO_ALG_TYPE_AHASH_MASK); |
720 | if (IS_ERR(ghash_alg)) | 720 | if (IS_ERR(ghash_alg)) |
721 | return ERR_CAST(ghash_alg); | 721 | return ERR_CAST(ghash_alg); |
722 | 722 | ||
723 | err = -ENOMEM; | 723 | err = -ENOMEM; |
724 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 724 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
725 | if (!inst) | 725 | if (!inst) |
726 | goto out_put_ghash; | 726 | goto out_put_ghash; |
727 | 727 | ||
728 | ctx = crypto_instance_ctx(inst); | 728 | ctx = crypto_instance_ctx(inst); |
729 | ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base); | 729 | ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base); |
730 | err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg, | 730 | err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg, |
731 | inst); | 731 | inst); |
732 | if (err) | 732 | if (err) |
733 | goto err_free_inst; | 733 | goto err_free_inst; |
734 | 734 | ||
735 | crypto_set_skcipher_spawn(&ctx->ctr, inst); | 735 | crypto_set_skcipher_spawn(&ctx->ctr, inst); |
736 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, | 736 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, |
737 | crypto_requires_sync(algt->type, | 737 | crypto_requires_sync(algt->type, |
738 | algt->mask)); | 738 | algt->mask)); |
739 | if (err) | 739 | if (err) |
740 | goto err_drop_ghash; | 740 | goto err_drop_ghash; |
741 | 741 | ||
742 | ctr = crypto_skcipher_spawn_alg(&ctx->ctr); | 742 | ctr = crypto_skcipher_spawn_alg(&ctx->ctr); |
743 | 743 | ||
744 | /* We only support 16-byte blocks. */ | 744 | /* We only support 16-byte blocks. */ |
745 | if (ctr->cra_ablkcipher.ivsize != 16) | 745 | if (ctr->cra_ablkcipher.ivsize != 16) |
746 | goto out_put_ctr; | 746 | goto out_put_ctr; |
747 | 747 | ||
748 | /* Not a stream cipher? */ | 748 | /* Not a stream cipher? */ |
749 | err = -EINVAL; | 749 | err = -EINVAL; |
750 | if (ctr->cra_blocksize != 1) | 750 | if (ctr->cra_blocksize != 1) |
751 | goto out_put_ctr; | 751 | goto out_put_ctr; |
752 | 752 | ||
753 | err = -ENAMETOOLONG; | 753 | err = -ENAMETOOLONG; |
754 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 754 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
755 | "gcm_base(%s,%s)", ctr->cra_driver_name, | 755 | "gcm_base(%s,%s)", ctr->cra_driver_name, |
756 | ghash_alg->cra_driver_name) >= | 756 | ghash_alg->cra_driver_name) >= |
757 | CRYPTO_MAX_ALG_NAME) | 757 | CRYPTO_MAX_ALG_NAME) |
758 | goto out_put_ctr; | 758 | goto out_put_ctr; |
759 | 759 | ||
760 | memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); | 760 | memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); |
761 | 761 | ||
762 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 762 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
763 | inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; | 763 | inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; |
764 | inst->alg.cra_priority = ctr->cra_priority; | 764 | inst->alg.cra_priority = ctr->cra_priority; |
765 | inst->alg.cra_blocksize = 1; | 765 | inst->alg.cra_blocksize = 1; |
766 | inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1); | 766 | inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1); |
767 | inst->alg.cra_type = &crypto_aead_type; | 767 | inst->alg.cra_type = &crypto_aead_type; |
768 | inst->alg.cra_aead.ivsize = 16; | 768 | inst->alg.cra_aead.ivsize = 16; |
769 | inst->alg.cra_aead.maxauthsize = 16; | 769 | inst->alg.cra_aead.maxauthsize = 16; |
770 | inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx); | 770 | inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx); |
771 | inst->alg.cra_init = crypto_gcm_init_tfm; | 771 | inst->alg.cra_init = crypto_gcm_init_tfm; |
772 | inst->alg.cra_exit = crypto_gcm_exit_tfm; | 772 | inst->alg.cra_exit = crypto_gcm_exit_tfm; |
773 | inst->alg.cra_aead.setkey = crypto_gcm_setkey; | 773 | inst->alg.cra_aead.setkey = crypto_gcm_setkey; |
774 | inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize; | 774 | inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize; |
775 | inst->alg.cra_aead.encrypt = crypto_gcm_encrypt; | 775 | inst->alg.cra_aead.encrypt = crypto_gcm_encrypt; |
776 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; | 776 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; |
777 | 777 | ||
778 | out: | 778 | out: |
779 | crypto_mod_put(ghash_alg); | 779 | crypto_mod_put(ghash_alg); |
780 | return inst; | 780 | return inst; |
781 | 781 | ||
782 | out_put_ctr: | 782 | out_put_ctr: |
783 | crypto_drop_skcipher(&ctx->ctr); | 783 | crypto_drop_skcipher(&ctx->ctr); |
784 | err_drop_ghash: | 784 | err_drop_ghash: |
785 | crypto_drop_ahash(&ctx->ghash); | 785 | crypto_drop_ahash(&ctx->ghash); |
786 | err_free_inst: | 786 | err_free_inst: |
787 | kfree(inst); | 787 | kfree(inst); |
788 | out_put_ghash: | 788 | out_put_ghash: |
789 | inst = ERR_PTR(err); | 789 | inst = ERR_PTR(err); |
790 | goto out; | 790 | goto out; |
791 | } | 791 | } |
792 | 792 | ||
793 | static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) | 793 | static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) |
794 | { | 794 | { |
795 | const char *cipher_name; | 795 | const char *cipher_name; |
796 | char ctr_name[CRYPTO_MAX_ALG_NAME]; | 796 | char ctr_name[CRYPTO_MAX_ALG_NAME]; |
797 | char full_name[CRYPTO_MAX_ALG_NAME]; | 797 | char full_name[CRYPTO_MAX_ALG_NAME]; |
798 | 798 | ||
799 | cipher_name = crypto_attr_alg_name(tb[1]); | 799 | cipher_name = crypto_attr_alg_name(tb[1]); |
800 | if (IS_ERR(cipher_name)) | 800 | if (IS_ERR(cipher_name)) |
801 | return ERR_CAST(cipher_name); | 801 | return ERR_CAST(cipher_name); |
802 | 802 | ||
803 | if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= | 803 | if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= |
804 | CRYPTO_MAX_ALG_NAME) | 804 | CRYPTO_MAX_ALG_NAME) |
805 | return ERR_PTR(-ENAMETOOLONG); | 805 | return ERR_PTR(-ENAMETOOLONG); |
806 | 806 | ||
807 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= | 807 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= |
808 | CRYPTO_MAX_ALG_NAME) | 808 | CRYPTO_MAX_ALG_NAME) |
809 | return ERR_PTR(-ENAMETOOLONG); | 809 | return ERR_PTR(-ENAMETOOLONG); |
810 | 810 | ||
811 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash"); | 811 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash"); |
812 | } | 812 | } |
813 | 813 | ||
814 | static void crypto_gcm_free(struct crypto_instance *inst) | 814 | static void crypto_gcm_free(struct crypto_instance *inst) |
815 | { | 815 | { |
816 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); | 816 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); |
817 | 817 | ||
818 | crypto_drop_skcipher(&ctx->ctr); | 818 | crypto_drop_skcipher(&ctx->ctr); |
819 | crypto_drop_ahash(&ctx->ghash); | 819 | crypto_drop_ahash(&ctx->ghash); |
820 | kfree(inst); | 820 | kfree(inst); |
821 | } | 821 | } |
822 | 822 | ||
823 | static struct crypto_template crypto_gcm_tmpl = { | 823 | static struct crypto_template crypto_gcm_tmpl = { |
824 | .name = "gcm", | 824 | .name = "gcm", |
825 | .alloc = crypto_gcm_alloc, | 825 | .alloc = crypto_gcm_alloc, |
826 | .free = crypto_gcm_free, | 826 | .free = crypto_gcm_free, |
827 | .module = THIS_MODULE, | 827 | .module = THIS_MODULE, |
828 | }; | 828 | }; |
829 | 829 | ||
830 | static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | 830 | static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) |
831 | { | 831 | { |
832 | const char *ctr_name; | 832 | const char *ctr_name; |
833 | const char *ghash_name; | 833 | const char *ghash_name; |
834 | char full_name[CRYPTO_MAX_ALG_NAME]; | 834 | char full_name[CRYPTO_MAX_ALG_NAME]; |
835 | 835 | ||
836 | ctr_name = crypto_attr_alg_name(tb[1]); | 836 | ctr_name = crypto_attr_alg_name(tb[1]); |
837 | if (IS_ERR(ctr_name)) | 837 | if (IS_ERR(ctr_name)) |
838 | return ERR_CAST(ctr_name); | 838 | return ERR_CAST(ctr_name); |
839 | 839 | ||
840 | ghash_name = crypto_attr_alg_name(tb[2]); | 840 | ghash_name = crypto_attr_alg_name(tb[2]); |
841 | if (IS_ERR(ghash_name)) | 841 | if (IS_ERR(ghash_name)) |
842 | return ERR_CAST(ghash_name); | 842 | return ERR_CAST(ghash_name); |
843 | 843 | ||
844 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", | 844 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", |
845 | ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) | 845 | ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) |
846 | return ERR_PTR(-ENAMETOOLONG); | 846 | return ERR_PTR(-ENAMETOOLONG); |
847 | 847 | ||
848 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name); | 848 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name); |
849 | } | 849 | } |
850 | 850 | ||
851 | static struct crypto_template crypto_gcm_base_tmpl = { | 851 | static struct crypto_template crypto_gcm_base_tmpl = { |
852 | .name = "gcm_base", | 852 | .name = "gcm_base", |
853 | .alloc = crypto_gcm_base_alloc, | 853 | .alloc = crypto_gcm_base_alloc, |
854 | .free = crypto_gcm_free, | 854 | .free = crypto_gcm_free, |
855 | .module = THIS_MODULE, | 855 | .module = THIS_MODULE, |
856 | }; | 856 | }; |
857 | 857 | ||
858 | static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, | 858 | static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, |
859 | unsigned int keylen) | 859 | unsigned int keylen) |
860 | { | 860 | { |
861 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); | 861 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); |
862 | struct crypto_aead *child = ctx->child; | 862 | struct crypto_aead *child = ctx->child; |
863 | int err; | 863 | int err; |
864 | 864 | ||
865 | if (keylen < 4) | 865 | if (keylen < 4) |
866 | return -EINVAL; | 866 | return -EINVAL; |
867 | 867 | ||
868 | keylen -= 4; | 868 | keylen -= 4; |
869 | memcpy(ctx->nonce, key + keylen, 4); | 869 | memcpy(ctx->nonce, key + keylen, 4); |
870 | 870 | ||
871 | crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 871 | crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
872 | crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & | 872 | crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & |
873 | CRYPTO_TFM_REQ_MASK); | 873 | CRYPTO_TFM_REQ_MASK); |
874 | err = crypto_aead_setkey(child, key, keylen); | 874 | err = crypto_aead_setkey(child, key, keylen); |
875 | crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & | 875 | crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & |
876 | CRYPTO_TFM_RES_MASK); | 876 | CRYPTO_TFM_RES_MASK); |
877 | 877 | ||
878 | return err; | 878 | return err; |
879 | } | 879 | } |
880 | 880 | ||
881 | static int crypto_rfc4106_setauthsize(struct crypto_aead *parent, | 881 | static int crypto_rfc4106_setauthsize(struct crypto_aead *parent, |
882 | unsigned int authsize) | 882 | unsigned int authsize) |
883 | { | 883 | { |
884 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); | 884 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); |
885 | 885 | ||
886 | switch (authsize) { | 886 | switch (authsize) { |
887 | case 8: | 887 | case 8: |
888 | case 12: | 888 | case 12: |
889 | case 16: | 889 | case 16: |
890 | break; | 890 | break; |
891 | default: | 891 | default: |
892 | return -EINVAL; | 892 | return -EINVAL; |
893 | } | 893 | } |
894 | 894 | ||
895 | return crypto_aead_setauthsize(ctx->child, authsize); | 895 | return crypto_aead_setauthsize(ctx->child, authsize); |
896 | } | 896 | } |
897 | 897 | ||
898 | static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) | 898 | static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) |
899 | { | 899 | { |
900 | struct aead_request *subreq = aead_request_ctx(req); | 900 | struct aead_request *subreq = aead_request_ctx(req); |
901 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 901 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
902 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); | 902 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); |
903 | struct crypto_aead *child = ctx->child; | 903 | struct crypto_aead *child = ctx->child; |
904 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), | 904 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), |
905 | crypto_aead_alignmask(child) + 1); | 905 | crypto_aead_alignmask(child) + 1); |
906 | 906 | ||
907 | memcpy(iv, ctx->nonce, 4); | 907 | memcpy(iv, ctx->nonce, 4); |
908 | memcpy(iv + 4, req->iv, 8); | 908 | memcpy(iv + 4, req->iv, 8); |
909 | 909 | ||
910 | aead_request_set_tfm(subreq, child); | 910 | aead_request_set_tfm(subreq, child); |
911 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, | 911 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, |
912 | req->base.data); | 912 | req->base.data); |
913 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); | 913 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); |
914 | aead_request_set_assoc(subreq, req->assoc, req->assoclen); | 914 | aead_request_set_assoc(subreq, req->assoc, req->assoclen); |
915 | 915 | ||
916 | return subreq; | 916 | return subreq; |
917 | } | 917 | } |
918 | 918 | ||
919 | static int crypto_rfc4106_encrypt(struct aead_request *req) | 919 | static int crypto_rfc4106_encrypt(struct aead_request *req) |
920 | { | 920 | { |
921 | req = crypto_rfc4106_crypt(req); | 921 | req = crypto_rfc4106_crypt(req); |
922 | 922 | ||
923 | return crypto_aead_encrypt(req); | 923 | return crypto_aead_encrypt(req); |
924 | } | 924 | } |
925 | 925 | ||
926 | static int crypto_rfc4106_decrypt(struct aead_request *req) | 926 | static int crypto_rfc4106_decrypt(struct aead_request *req) |
927 | { | 927 | { |
928 | req = crypto_rfc4106_crypt(req); | 928 | req = crypto_rfc4106_crypt(req); |
929 | 929 | ||
930 | return crypto_aead_decrypt(req); | 930 | return crypto_aead_decrypt(req); |
931 | } | 931 | } |
932 | 932 | ||
933 | static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm) | 933 | static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm) |
934 | { | 934 | { |
935 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 935 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
936 | struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); | 936 | struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); |
937 | struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm); | 937 | struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm); |
938 | struct crypto_aead *aead; | 938 | struct crypto_aead *aead; |
939 | unsigned long align; | 939 | unsigned long align; |
940 | 940 | ||
941 | aead = crypto_spawn_aead(spawn); | 941 | aead = crypto_spawn_aead(spawn); |
942 | if (IS_ERR(aead)) | 942 | if (IS_ERR(aead)) |
943 | return PTR_ERR(aead); | 943 | return PTR_ERR(aead); |
944 | 944 | ||
945 | ctx->child = aead; | 945 | ctx->child = aead; |
946 | 946 | ||
947 | align = crypto_aead_alignmask(aead); | 947 | align = crypto_aead_alignmask(aead); |
948 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 948 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
949 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | 949 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + |
950 | ALIGN(crypto_aead_reqsize(aead), | 950 | ALIGN(crypto_aead_reqsize(aead), |
951 | crypto_tfm_ctx_alignment()) + | 951 | crypto_tfm_ctx_alignment()) + |
952 | align + 16; | 952 | align + 16; |
953 | 953 | ||
954 | return 0; | 954 | return 0; |
955 | } | 955 | } |
956 | 956 | ||
957 | static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm) | 957 | static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm) |
958 | { | 958 | { |
959 | struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm); | 959 | struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm); |
960 | 960 | ||
961 | crypto_free_aead(ctx->child); | 961 | crypto_free_aead(ctx->child); |
962 | } | 962 | } |
963 | 963 | ||
964 | static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb) | 964 | static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb) |
965 | { | 965 | { |
966 | struct crypto_attr_type *algt; | 966 | struct crypto_attr_type *algt; |
967 | struct crypto_instance *inst; | 967 | struct crypto_instance *inst; |
968 | struct crypto_aead_spawn *spawn; | 968 | struct crypto_aead_spawn *spawn; |
969 | struct crypto_alg *alg; | 969 | struct crypto_alg *alg; |
970 | const char *ccm_name; | 970 | const char *ccm_name; |
971 | int err; | 971 | int err; |
972 | 972 | ||
973 | algt = crypto_get_attr_type(tb); | 973 | algt = crypto_get_attr_type(tb); |
974 | if (IS_ERR(algt)) | 974 | if (IS_ERR(algt)) |
975 | return ERR_CAST(algt); | 975 | return ERR_CAST(algt); |
976 | 976 | ||
977 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 977 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
978 | return ERR_PTR(-EINVAL); | 978 | return ERR_PTR(-EINVAL); |
979 | 979 | ||
980 | ccm_name = crypto_attr_alg_name(tb[1]); | 980 | ccm_name = crypto_attr_alg_name(tb[1]); |
981 | if (IS_ERR(ccm_name)) | 981 | if (IS_ERR(ccm_name)) |
982 | return ERR_CAST(ccm_name); | 982 | return ERR_CAST(ccm_name); |
983 | 983 | ||
984 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | 984 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
985 | if (!inst) | 985 | if (!inst) |
986 | return ERR_PTR(-ENOMEM); | 986 | return ERR_PTR(-ENOMEM); |
987 | 987 | ||
988 | spawn = crypto_instance_ctx(inst); | 988 | spawn = crypto_instance_ctx(inst); |
989 | crypto_set_aead_spawn(spawn, inst); | 989 | crypto_set_aead_spawn(spawn, inst); |
990 | err = crypto_grab_aead(spawn, ccm_name, 0, | 990 | err = crypto_grab_aead(spawn, ccm_name, 0, |
991 | crypto_requires_sync(algt->type, algt->mask)); | 991 | crypto_requires_sync(algt->type, algt->mask)); |
992 | if (err) | 992 | if (err) |
993 | goto out_free_inst; | 993 | goto out_free_inst; |
994 | 994 | ||
995 | alg = crypto_aead_spawn_alg(spawn); | 995 | alg = crypto_aead_spawn_alg(spawn); |
996 | 996 | ||
997 | err = -EINVAL; | 997 | err = -EINVAL; |
998 | 998 | ||
999 | /* We only support 16-byte blocks. */ | 999 | /* We only support 16-byte blocks. */ |
1000 | if (alg->cra_aead.ivsize != 16) | 1000 | if (alg->cra_aead.ivsize != 16) |
1001 | goto out_drop_alg; | 1001 | goto out_drop_alg; |
1002 | 1002 | ||
1003 | /* Not a stream cipher? */ | 1003 | /* Not a stream cipher? */ |
1004 | if (alg->cra_blocksize != 1) | 1004 | if (alg->cra_blocksize != 1) |
1005 | goto out_drop_alg; | 1005 | goto out_drop_alg; |
1006 | 1006 | ||
1007 | err = -ENAMETOOLONG; | 1007 | err = -ENAMETOOLONG; |
1008 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 1008 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, |
1009 | "rfc4106(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || | 1009 | "rfc4106(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || |
1010 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 1010 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
1011 | "rfc4106(%s)", alg->cra_driver_name) >= | 1011 | "rfc4106(%s)", alg->cra_driver_name) >= |
1012 | CRYPTO_MAX_ALG_NAME) | 1012 | CRYPTO_MAX_ALG_NAME) |
1013 | goto out_drop_alg; | 1013 | goto out_drop_alg; |
1014 | 1014 | ||
1015 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 1015 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
1016 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | 1016 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; |
1017 | inst->alg.cra_priority = alg->cra_priority; | 1017 | inst->alg.cra_priority = alg->cra_priority; |
1018 | inst->alg.cra_blocksize = 1; | 1018 | inst->alg.cra_blocksize = 1; |
1019 | inst->alg.cra_alignmask = alg->cra_alignmask; | 1019 | inst->alg.cra_alignmask = alg->cra_alignmask; |
1020 | inst->alg.cra_type = &crypto_nivaead_type; | 1020 | inst->alg.cra_type = &crypto_nivaead_type; |
1021 | 1021 | ||
1022 | inst->alg.cra_aead.ivsize = 8; | 1022 | inst->alg.cra_aead.ivsize = 8; |
1023 | inst->alg.cra_aead.maxauthsize = 16; | 1023 | inst->alg.cra_aead.maxauthsize = 16; |
1024 | 1024 | ||
1025 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); | 1025 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); |
1026 | 1026 | ||
1027 | inst->alg.cra_init = crypto_rfc4106_init_tfm; | 1027 | inst->alg.cra_init = crypto_rfc4106_init_tfm; |
1028 | inst->alg.cra_exit = crypto_rfc4106_exit_tfm; | 1028 | inst->alg.cra_exit = crypto_rfc4106_exit_tfm; |
1029 | 1029 | ||
1030 | inst->alg.cra_aead.setkey = crypto_rfc4106_setkey; | 1030 | inst->alg.cra_aead.setkey = crypto_rfc4106_setkey; |
1031 | inst->alg.cra_aead.setauthsize = crypto_rfc4106_setauthsize; | 1031 | inst->alg.cra_aead.setauthsize = crypto_rfc4106_setauthsize; |
1032 | inst->alg.cra_aead.encrypt = crypto_rfc4106_encrypt; | 1032 | inst->alg.cra_aead.encrypt = crypto_rfc4106_encrypt; |
1033 | inst->alg.cra_aead.decrypt = crypto_rfc4106_decrypt; | 1033 | inst->alg.cra_aead.decrypt = crypto_rfc4106_decrypt; |
1034 | 1034 | ||
1035 | inst->alg.cra_aead.geniv = "seqiv"; | 1035 | inst->alg.cra_aead.geniv = "seqiv"; |
1036 | 1036 | ||
1037 | out: | 1037 | out: |
1038 | return inst; | 1038 | return inst; |
1039 | 1039 | ||
1040 | out_drop_alg: | 1040 | out_drop_alg: |
1041 | crypto_drop_aead(spawn); | 1041 | crypto_drop_aead(spawn); |
1042 | out_free_inst: | 1042 | out_free_inst: |
1043 | kfree(inst); | 1043 | kfree(inst); |
1044 | inst = ERR_PTR(err); | 1044 | inst = ERR_PTR(err); |
1045 | goto out; | 1045 | goto out; |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | static void crypto_rfc4106_free(struct crypto_instance *inst) | 1048 | static void crypto_rfc4106_free(struct crypto_instance *inst) |
1049 | { | 1049 | { |
1050 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 1050 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
1051 | kfree(inst); | 1051 | kfree(inst); |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | static struct crypto_template crypto_rfc4106_tmpl = { | 1054 | static struct crypto_template crypto_rfc4106_tmpl = { |
1055 | .name = "rfc4106", | 1055 | .name = "rfc4106", |
1056 | .alloc = crypto_rfc4106_alloc, | 1056 | .alloc = crypto_rfc4106_alloc, |
1057 | .free = crypto_rfc4106_free, | 1057 | .free = crypto_rfc4106_free, |
1058 | .module = THIS_MODULE, | 1058 | .module = THIS_MODULE, |
1059 | }; | 1059 | }; |
1060 | 1060 | ||
1061 | static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx( | 1061 | static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx( |
1062 | struct aead_request *req) | 1062 | struct aead_request *req) |
1063 | { | 1063 | { |
1064 | unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); | 1064 | unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); |
1065 | 1065 | ||
1066 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | 1066 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, | 1069 | static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, |
1070 | unsigned int keylen) | 1070 | unsigned int keylen) |
1071 | { | 1071 | { |
1072 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); | 1072 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); |
1073 | struct crypto_aead *child = ctx->child; | 1073 | struct crypto_aead *child = ctx->child; |
1074 | int err; | 1074 | int err; |
1075 | 1075 | ||
1076 | if (keylen < 4) | 1076 | if (keylen < 4) |
1077 | return -EINVAL; | 1077 | return -EINVAL; |
1078 | 1078 | ||
1079 | keylen -= 4; | 1079 | keylen -= 4; |
1080 | memcpy(ctx->nonce, key + keylen, 4); | 1080 | memcpy(ctx->nonce, key + keylen, 4); |
1081 | 1081 | ||
1082 | crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 1082 | crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
1083 | crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & | 1083 | crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & |
1084 | CRYPTO_TFM_REQ_MASK); | 1084 | CRYPTO_TFM_REQ_MASK); |
1085 | err = crypto_aead_setkey(child, key, keylen); | 1085 | err = crypto_aead_setkey(child, key, keylen); |
1086 | crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & | 1086 | crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & |
1087 | CRYPTO_TFM_RES_MASK); | 1087 | CRYPTO_TFM_RES_MASK); |
1088 | 1088 | ||
1089 | return err; | 1089 | return err; |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, | 1092 | static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, |
1093 | unsigned int authsize) | 1093 | unsigned int authsize) |
1094 | { | 1094 | { |
1095 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); | 1095 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); |
1096 | 1096 | ||
1097 | if (authsize != 16) | 1097 | if (authsize != 16) |
1098 | return -EINVAL; | 1098 | return -EINVAL; |
1099 | 1099 | ||
1100 | return crypto_aead_setauthsize(ctx->child, authsize); | 1100 | return crypto_aead_setauthsize(ctx->child, authsize); |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | static void crypto_rfc4543_done(struct crypto_async_request *areq, int err) | 1103 | static void crypto_rfc4543_done(struct crypto_async_request *areq, int err) |
1104 | { | 1104 | { |
1105 | struct aead_request *req = areq->data; | 1105 | struct aead_request *req = areq->data; |
1106 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1106 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1107 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); | 1107 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); |
1108 | 1108 | ||
1109 | if (!err) { | 1109 | if (!err) { |
1110 | scatterwalk_map_and_copy(rctx->auth_tag, req->dst, | 1110 | scatterwalk_map_and_copy(rctx->auth_tag, req->dst, |
1111 | req->cryptlen, | 1111 | req->cryptlen, |
1112 | crypto_aead_authsize(aead), 1); | 1112 | crypto_aead_authsize(aead), 1); |
1113 | } | 1113 | } |
1114 | 1114 | ||
1115 | aead_request_complete(req, err); | 1115 | aead_request_complete(req, err); |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, | 1118 | static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, |
1119 | bool enc) | 1119 | bool enc) |
1120 | { | 1120 | { |
1121 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1121 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1122 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); | 1122 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); |
1123 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); | 1123 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); |
1124 | struct aead_request *subreq = &rctx->subreq; | 1124 | struct aead_request *subreq = &rctx->subreq; |
1125 | struct scatterlist *src = req->src; | 1125 | struct scatterlist *src = req->src; |
1126 | struct scatterlist *cipher = rctx->cipher; | 1126 | struct scatterlist *cipher = rctx->cipher; |
1127 | struct scatterlist *payload = rctx->payload; | 1127 | struct scatterlist *payload = rctx->payload; |
1128 | struct scatterlist *assoc = rctx->assoc; | 1128 | struct scatterlist *assoc = rctx->assoc; |
1129 | unsigned int authsize = crypto_aead_authsize(aead); | 1129 | unsigned int authsize = crypto_aead_authsize(aead); |
1130 | unsigned int assoclen = req->assoclen; | 1130 | unsigned int assoclen = req->assoclen; |
1131 | struct page *srcp; | 1131 | struct page *srcp; |
1132 | u8 *vsrc; | 1132 | u8 *vsrc; |
1133 | u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), | 1133 | u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), |
1134 | crypto_aead_alignmask(ctx->child) + 1); | 1134 | crypto_aead_alignmask(ctx->child) + 1); |
1135 | 1135 | ||
1136 | memcpy(iv, ctx->nonce, 4); | 1136 | memcpy(iv, ctx->nonce, 4); |
1137 | memcpy(iv + 4, req->iv, 8); | 1137 | memcpy(iv + 4, req->iv, 8); |
1138 | 1138 | ||
1139 | /* construct cipher/plaintext */ | 1139 | /* construct cipher/plaintext */ |
1140 | if (enc) | 1140 | if (enc) |
1141 | memset(rctx->auth_tag, 0, authsize); | 1141 | memset(rctx->auth_tag, 0, authsize); |
1142 | else | 1142 | else |
1143 | scatterwalk_map_and_copy(rctx->auth_tag, src, | 1143 | scatterwalk_map_and_copy(rctx->auth_tag, src, |
1144 | req->cryptlen - authsize, | 1144 | req->cryptlen - authsize, |
1145 | authsize, 0); | 1145 | authsize, 0); |
1146 | 1146 | ||
1147 | sg_init_one(cipher, rctx->auth_tag, authsize); | 1147 | sg_init_one(cipher, rctx->auth_tag, authsize); |
1148 | 1148 | ||
1149 | /* construct the aad */ | 1149 | /* construct the aad */ |
1150 | srcp = sg_page(src); | 1150 | srcp = sg_page(src); |
1151 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; | 1151 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; |
1152 | 1152 | ||
1153 | sg_init_table(payload, 2); | 1153 | sg_init_table(payload, 2); |
1154 | sg_set_buf(payload, req->iv, 8); | 1154 | sg_set_buf(payload, req->iv, 8); |
1155 | scatterwalk_crypto_chain(payload, src, vsrc == req->iv + 8, 2); | 1155 | scatterwalk_crypto_chain(payload, src, vsrc == req->iv + 8, 2); |
1156 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); | 1156 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); |
1157 | 1157 | ||
1158 | if (req->assoc->length == req->assoclen) { | 1158 | if (req->assoc->length == req->assoclen) { |
1159 | sg_init_table(assoc, 2); | 1159 | sg_init_table(assoc, 2); |
1160 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, | 1160 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, |
1161 | req->assoc->offset); | 1161 | req->assoc->offset); |
1162 | } else { | 1162 | } else { |
1163 | BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); | 1163 | BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); |
1164 | 1164 | ||
1165 | scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, | 1165 | scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, |
1166 | req->assoclen, 0); | 1166 | req->assoclen, 0); |
1167 | 1167 | ||
1168 | sg_init_table(assoc, 2); | 1168 | sg_init_table(assoc, 2); |
1169 | sg_set_buf(assoc, rctx->assocbuf, req->assoclen); | 1169 | sg_set_buf(assoc, rctx->assocbuf, req->assoclen); |
1170 | } | 1170 | } |
1171 | scatterwalk_crypto_chain(assoc, payload, 0, 2); | 1171 | scatterwalk_crypto_chain(assoc, payload, 0, 2); |
1172 | 1172 | ||
1173 | aead_request_set_tfm(subreq, ctx->child); | 1173 | aead_request_set_tfm(subreq, ctx->child); |
1174 | aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done, | 1174 | aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done, |
1175 | req); | 1175 | req); |
1176 | aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv); | 1176 | aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv); |
1177 | aead_request_set_assoc(subreq, assoc, assoclen); | 1177 | aead_request_set_assoc(subreq, assoc, assoclen); |
1178 | 1178 | ||
1179 | return subreq; | 1179 | return subreq; |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) | 1182 | static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) |
1183 | { | 1183 | { |
1184 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1184 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1185 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); | 1185 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); |
1186 | unsigned int authsize = crypto_aead_authsize(aead); | 1186 | unsigned int authsize = crypto_aead_authsize(aead); |
1187 | unsigned int nbytes = req->cryptlen - (enc ? 0 : authsize); | 1187 | unsigned int nbytes = req->cryptlen - (enc ? 0 : authsize); |
1188 | struct blkcipher_desc desc = { | 1188 | struct blkcipher_desc desc = { |
1189 | .tfm = ctx->null, | 1189 | .tfm = ctx->null, |
1190 | }; | 1190 | }; |
1191 | 1191 | ||
1192 | return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes); | 1192 | return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes); |
1193 | } | 1193 | } |
1194 | 1194 | ||
1195 | static int crypto_rfc4543_encrypt(struct aead_request *req) | 1195 | static int crypto_rfc4543_encrypt(struct aead_request *req) |
1196 | { | 1196 | { |
1197 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1197 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1198 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); | 1198 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); |
1199 | struct aead_request *subreq; | 1199 | struct aead_request *subreq; |
1200 | int err; | 1200 | int err; |
1201 | 1201 | ||
1202 | if (req->src != req->dst) { | 1202 | if (req->src != req->dst) { |
1203 | err = crypto_rfc4543_copy_src_to_dst(req, true); | 1203 | err = crypto_rfc4543_copy_src_to_dst(req, true); |
1204 | if (err) | 1204 | if (err) |
1205 | return err; | 1205 | return err; |
1206 | } | 1206 | } |
1207 | 1207 | ||
1208 | subreq = crypto_rfc4543_crypt(req, true); | 1208 | subreq = crypto_rfc4543_crypt(req, true); |
1209 | err = crypto_aead_encrypt(subreq); | 1209 | err = crypto_aead_encrypt(subreq); |
1210 | if (err) | 1210 | if (err) |
1211 | return err; | 1211 | return err; |
1212 | 1212 | ||
1213 | scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen, | 1213 | scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen, |
1214 | crypto_aead_authsize(aead), 1); | 1214 | crypto_aead_authsize(aead), 1); |
1215 | 1215 | ||
1216 | return 0; | 1216 | return 0; |
1217 | } | 1217 | } |
1218 | 1218 | ||
1219 | static int crypto_rfc4543_decrypt(struct aead_request *req) | 1219 | static int crypto_rfc4543_decrypt(struct aead_request *req) |
1220 | { | 1220 | { |
1221 | int err; | 1221 | int err; |
1222 | 1222 | ||
1223 | if (req->src != req->dst) { | 1223 | if (req->src != req->dst) { |
1224 | err = crypto_rfc4543_copy_src_to_dst(req, false); | 1224 | err = crypto_rfc4543_copy_src_to_dst(req, false); |
1225 | if (err) | 1225 | if (err) |
1226 | return err; | 1226 | return err; |
1227 | } | 1227 | } |
1228 | 1228 | ||
1229 | req = crypto_rfc4543_crypt(req, false); | 1229 | req = crypto_rfc4543_crypt(req, false); |
1230 | 1230 | ||
1231 | return crypto_aead_decrypt(req); | 1231 | return crypto_aead_decrypt(req); |
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) | 1234 | static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) |
1235 | { | 1235 | { |
1236 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 1236 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
1237 | struct crypto_rfc4543_instance_ctx *ictx = crypto_instance_ctx(inst); | 1237 | struct crypto_rfc4543_instance_ctx *ictx = crypto_instance_ctx(inst); |
1238 | struct crypto_aead_spawn *spawn = &ictx->aead; | 1238 | struct crypto_aead_spawn *spawn = &ictx->aead; |
1239 | struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); | 1239 | struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); |
1240 | struct crypto_aead *aead; | 1240 | struct crypto_aead *aead; |
1241 | struct crypto_blkcipher *null; | 1241 | struct crypto_blkcipher *null; |
1242 | unsigned long align; | 1242 | unsigned long align; |
1243 | int err = 0; | 1243 | int err = 0; |
1244 | 1244 | ||
1245 | aead = crypto_spawn_aead(spawn); | 1245 | aead = crypto_spawn_aead(spawn); |
1246 | if (IS_ERR(aead)) | 1246 | if (IS_ERR(aead)) |
1247 | return PTR_ERR(aead); | 1247 | return PTR_ERR(aead); |
1248 | 1248 | ||
1249 | null = crypto_spawn_blkcipher(&ictx->null.base); | 1249 | null = crypto_spawn_blkcipher(&ictx->null.base); |
1250 | err = PTR_ERR(null); | 1250 | err = PTR_ERR(null); |
1251 | if (IS_ERR(null)) | 1251 | if (IS_ERR(null)) |
1252 | goto err_free_aead; | 1252 | goto err_free_aead; |
1253 | 1253 | ||
1254 | ctx->child = aead; | 1254 | ctx->child = aead; |
1255 | ctx->null = null; | 1255 | ctx->null = null; |
1256 | 1256 | ||
1257 | align = crypto_aead_alignmask(aead); | 1257 | align = crypto_aead_alignmask(aead); |
1258 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 1258 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
1259 | tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) + | 1259 | tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) + |
1260 | ALIGN(crypto_aead_reqsize(aead), | 1260 | ALIGN(crypto_aead_reqsize(aead), |
1261 | crypto_tfm_ctx_alignment()) + | 1261 | crypto_tfm_ctx_alignment()) + |
1262 | align + 16; | 1262 | align + 16; |
1263 | 1263 | ||
1264 | return 0; | 1264 | return 0; |
1265 | 1265 | ||
1266 | err_free_aead: | 1266 | err_free_aead: |
1267 | crypto_free_aead(aead); | 1267 | crypto_free_aead(aead); |
1268 | return err; | 1268 | return err; |
1269 | } | 1269 | } |
1270 | 1270 | ||
1271 | static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm) | 1271 | static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm) |
1272 | { | 1272 | { |
1273 | struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); | 1273 | struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); |
1274 | 1274 | ||
1275 | crypto_free_aead(ctx->child); | 1275 | crypto_free_aead(ctx->child); |
1276 | crypto_free_blkcipher(ctx->null); | 1276 | crypto_free_blkcipher(ctx->null); |
1277 | } | 1277 | } |
1278 | 1278 | ||
1279 | static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) | 1279 | static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) |
1280 | { | 1280 | { |
1281 | struct crypto_attr_type *algt; | 1281 | struct crypto_attr_type *algt; |
1282 | struct crypto_instance *inst; | 1282 | struct crypto_instance *inst; |
1283 | struct crypto_aead_spawn *spawn; | 1283 | struct crypto_aead_spawn *spawn; |
1284 | struct crypto_alg *alg; | 1284 | struct crypto_alg *alg; |
1285 | struct crypto_rfc4543_instance_ctx *ctx; | 1285 | struct crypto_rfc4543_instance_ctx *ctx; |
1286 | const char *ccm_name; | 1286 | const char *ccm_name; |
1287 | int err; | 1287 | int err; |
1288 | 1288 | ||
1289 | algt = crypto_get_attr_type(tb); | 1289 | algt = crypto_get_attr_type(tb); |
1290 | if (IS_ERR(algt)) | 1290 | if (IS_ERR(algt)) |
1291 | return ERR_CAST(algt); | 1291 | return ERR_CAST(algt); |
1292 | 1292 | ||
1293 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 1293 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
1294 | return ERR_PTR(-EINVAL); | 1294 | return ERR_PTR(-EINVAL); |
1295 | 1295 | ||
1296 | ccm_name = crypto_attr_alg_name(tb[1]); | 1296 | ccm_name = crypto_attr_alg_name(tb[1]); |
1297 | if (IS_ERR(ccm_name)) | 1297 | if (IS_ERR(ccm_name)) |
1298 | return ERR_CAST(ccm_name); | 1298 | return ERR_CAST(ccm_name); |
1299 | 1299 | ||
1300 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 1300 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
1301 | if (!inst) | 1301 | if (!inst) |
1302 | return ERR_PTR(-ENOMEM); | 1302 | return ERR_PTR(-ENOMEM); |
1303 | 1303 | ||
1304 | ctx = crypto_instance_ctx(inst); | 1304 | ctx = crypto_instance_ctx(inst); |
1305 | spawn = &ctx->aead; | 1305 | spawn = &ctx->aead; |
1306 | crypto_set_aead_spawn(spawn, inst); | 1306 | crypto_set_aead_spawn(spawn, inst); |
1307 | err = crypto_grab_aead(spawn, ccm_name, 0, | 1307 | err = crypto_grab_aead(spawn, ccm_name, 0, |
1308 | crypto_requires_sync(algt->type, algt->mask)); | 1308 | crypto_requires_sync(algt->type, algt->mask)); |
1309 | if (err) | 1309 | if (err) |
1310 | goto out_free_inst; | 1310 | goto out_free_inst; |
1311 | 1311 | ||
1312 | alg = crypto_aead_spawn_alg(spawn); | 1312 | alg = crypto_aead_spawn_alg(spawn); |
1313 | 1313 | ||
1314 | crypto_set_skcipher_spawn(&ctx->null, inst); | 1314 | crypto_set_skcipher_spawn(&ctx->null, inst); |
1315 | err = crypto_grab_skcipher(&ctx->null, "ecb(cipher_null)", 0, | 1315 | err = crypto_grab_skcipher(&ctx->null, "ecb(cipher_null)", 0, |
1316 | CRYPTO_ALG_ASYNC); | 1316 | CRYPTO_ALG_ASYNC); |
1317 | if (err) | 1317 | if (err) |
1318 | goto out_drop_alg; | 1318 | goto out_drop_alg; |
1319 | 1319 | ||
1320 | crypto_skcipher_spawn_alg(&ctx->null); | 1320 | crypto_skcipher_spawn_alg(&ctx->null); |
1321 | 1321 | ||
1322 | err = -EINVAL; | 1322 | err = -EINVAL; |
1323 | 1323 | ||
1324 | /* We only support 16-byte blocks. */ | 1324 | /* We only support 16-byte blocks. */ |
1325 | if (alg->cra_aead.ivsize != 16) | 1325 | if (alg->cra_aead.ivsize != 16) |
1326 | goto out_drop_ecbnull; | 1326 | goto out_drop_ecbnull; |
1327 | 1327 | ||
1328 | /* Not a stream cipher? */ | 1328 | /* Not a stream cipher? */ |
1329 | if (alg->cra_blocksize != 1) | 1329 | if (alg->cra_blocksize != 1) |
1330 | goto out_drop_ecbnull; | 1330 | goto out_drop_ecbnull; |
1331 | 1331 | ||
1332 | err = -ENAMETOOLONG; | 1332 | err = -ENAMETOOLONG; |
1333 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 1333 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, |
1334 | "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || | 1334 | "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || |
1335 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 1335 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
1336 | "rfc4543(%s)", alg->cra_driver_name) >= | 1336 | "rfc4543(%s)", alg->cra_driver_name) >= |
1337 | CRYPTO_MAX_ALG_NAME) | 1337 | CRYPTO_MAX_ALG_NAME) |
1338 | goto out_drop_ecbnull; | 1338 | goto out_drop_ecbnull; |
1339 | 1339 | ||
1340 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 1340 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
1341 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | 1341 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; |
1342 | inst->alg.cra_priority = alg->cra_priority; | 1342 | inst->alg.cra_priority = alg->cra_priority; |
1343 | inst->alg.cra_blocksize = 1; | 1343 | inst->alg.cra_blocksize = 1; |
1344 | inst->alg.cra_alignmask = alg->cra_alignmask; | 1344 | inst->alg.cra_alignmask = alg->cra_alignmask; |
1345 | inst->alg.cra_type = &crypto_nivaead_type; | 1345 | inst->alg.cra_type = &crypto_nivaead_type; |
1346 | 1346 | ||
1347 | inst->alg.cra_aead.ivsize = 8; | 1347 | inst->alg.cra_aead.ivsize = 8; |
1348 | inst->alg.cra_aead.maxauthsize = 16; | 1348 | inst->alg.cra_aead.maxauthsize = 16; |
1349 | 1349 | ||
1350 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); | 1350 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); |
1351 | 1351 | ||
1352 | inst->alg.cra_init = crypto_rfc4543_init_tfm; | 1352 | inst->alg.cra_init = crypto_rfc4543_init_tfm; |
1353 | inst->alg.cra_exit = crypto_rfc4543_exit_tfm; | 1353 | inst->alg.cra_exit = crypto_rfc4543_exit_tfm; |
1354 | 1354 | ||
1355 | inst->alg.cra_aead.setkey = crypto_rfc4543_setkey; | 1355 | inst->alg.cra_aead.setkey = crypto_rfc4543_setkey; |
1356 | inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize; | 1356 | inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize; |
1357 | inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt; | 1357 | inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt; |
1358 | inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt; | 1358 | inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt; |
1359 | 1359 | ||
1360 | inst->alg.cra_aead.geniv = "seqiv"; | 1360 | inst->alg.cra_aead.geniv = "seqiv"; |
1361 | 1361 | ||
1362 | out: | 1362 | out: |
1363 | return inst; | 1363 | return inst; |
1364 | 1364 | ||
1365 | out_drop_ecbnull: | 1365 | out_drop_ecbnull: |
1366 | crypto_drop_skcipher(&ctx->null); | 1366 | crypto_drop_skcipher(&ctx->null); |
1367 | out_drop_alg: | 1367 | out_drop_alg: |
1368 | crypto_drop_aead(spawn); | 1368 | crypto_drop_aead(spawn); |
1369 | out_free_inst: | 1369 | out_free_inst: |
1370 | kfree(inst); | 1370 | kfree(inst); |
1371 | inst = ERR_PTR(err); | 1371 | inst = ERR_PTR(err); |
1372 | goto out; | 1372 | goto out; |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | static void crypto_rfc4543_free(struct crypto_instance *inst) | 1375 | static void crypto_rfc4543_free(struct crypto_instance *inst) |
1376 | { | 1376 | { |
1377 | struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst); | 1377 | struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst); |
1378 | 1378 | ||
1379 | crypto_drop_aead(&ctx->aead); | 1379 | crypto_drop_aead(&ctx->aead); |
1380 | crypto_drop_skcipher(&ctx->null); | 1380 | crypto_drop_skcipher(&ctx->null); |
1381 | 1381 | ||
1382 | kfree(inst); | 1382 | kfree(inst); |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | static struct crypto_template crypto_rfc4543_tmpl = { | 1385 | static struct crypto_template crypto_rfc4543_tmpl = { |
1386 | .name = "rfc4543", | 1386 | .name = "rfc4543", |
1387 | .alloc = crypto_rfc4543_alloc, | 1387 | .alloc = crypto_rfc4543_alloc, |
1388 | .free = crypto_rfc4543_free, | 1388 | .free = crypto_rfc4543_free, |
1389 | .module = THIS_MODULE, | 1389 | .module = THIS_MODULE, |
1390 | }; | 1390 | }; |
1391 | 1391 | ||
1392 | static int __init crypto_gcm_module_init(void) | 1392 | static int __init crypto_gcm_module_init(void) |
1393 | { | 1393 | { |
1394 | int err; | 1394 | int err; |
1395 | 1395 | ||
1396 | gcm_zeroes = kzalloc(16, GFP_KERNEL); | 1396 | gcm_zeroes = kzalloc(16, GFP_KERNEL); |
1397 | if (!gcm_zeroes) | 1397 | if (!gcm_zeroes) |
1398 | return -ENOMEM; | 1398 | return -ENOMEM; |
1399 | 1399 | ||
1400 | err = crypto_register_template(&crypto_gcm_base_tmpl); | 1400 | err = crypto_register_template(&crypto_gcm_base_tmpl); |
1401 | if (err) | 1401 | if (err) |
1402 | goto out; | 1402 | goto out; |
1403 | 1403 | ||
1404 | err = crypto_register_template(&crypto_gcm_tmpl); | 1404 | err = crypto_register_template(&crypto_gcm_tmpl); |
1405 | if (err) | 1405 | if (err) |
1406 | goto out_undo_base; | 1406 | goto out_undo_base; |
1407 | 1407 | ||
1408 | err = crypto_register_template(&crypto_rfc4106_tmpl); | 1408 | err = crypto_register_template(&crypto_rfc4106_tmpl); |
1409 | if (err) | 1409 | if (err) |
1410 | goto out_undo_gcm; | 1410 | goto out_undo_gcm; |
1411 | 1411 | ||
1412 | err = crypto_register_template(&crypto_rfc4543_tmpl); | 1412 | err = crypto_register_template(&crypto_rfc4543_tmpl); |
1413 | if (err) | 1413 | if (err) |
1414 | goto out_undo_rfc4106; | 1414 | goto out_undo_rfc4106; |
1415 | 1415 | ||
1416 | return 0; | 1416 | return 0; |
1417 | 1417 | ||
1418 | out_undo_rfc4106: | 1418 | out_undo_rfc4106: |
1419 | crypto_unregister_template(&crypto_rfc4106_tmpl); | 1419 | crypto_unregister_template(&crypto_rfc4106_tmpl); |
1420 | out_undo_gcm: | 1420 | out_undo_gcm: |
1421 | crypto_unregister_template(&crypto_gcm_tmpl); | 1421 | crypto_unregister_template(&crypto_gcm_tmpl); |
1422 | out_undo_base: | 1422 | out_undo_base: |
1423 | crypto_unregister_template(&crypto_gcm_base_tmpl); | 1423 | crypto_unregister_template(&crypto_gcm_base_tmpl); |
1424 | out: | 1424 | out: |
1425 | kfree(gcm_zeroes); | 1425 | kfree(gcm_zeroes); |
1426 | return err; | 1426 | return err; |
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | static void __exit crypto_gcm_module_exit(void) | 1429 | static void __exit crypto_gcm_module_exit(void) |
1430 | { | 1430 | { |
1431 | kfree(gcm_zeroes); | 1431 | kfree(gcm_zeroes); |
1432 | crypto_unregister_template(&crypto_rfc4543_tmpl); | 1432 | crypto_unregister_template(&crypto_rfc4543_tmpl); |
1433 | crypto_unregister_template(&crypto_rfc4106_tmpl); | 1433 | crypto_unregister_template(&crypto_rfc4106_tmpl); |
1434 | crypto_unregister_template(&crypto_gcm_tmpl); | 1434 | crypto_unregister_template(&crypto_gcm_tmpl); |
1435 | crypto_unregister_template(&crypto_gcm_base_tmpl); | 1435 | crypto_unregister_template(&crypto_gcm_base_tmpl); |
1436 | } | 1436 | } |
1437 | 1437 | ||
1438 | module_init(crypto_gcm_module_init); | 1438 | module_init(crypto_gcm_module_init); |
1439 | module_exit(crypto_gcm_module_exit); | 1439 | module_exit(crypto_gcm_module_exit); |
1440 | 1440 | ||
1441 | MODULE_LICENSE("GPL"); | 1441 | MODULE_LICENSE("GPL"); |
1442 | MODULE_DESCRIPTION("Galois/Counter Mode"); | 1442 | MODULE_DESCRIPTION("Galois/Counter Mode"); |
1443 | MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); | 1443 | MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); |
1444 | MODULE_ALIAS_CRYPTO("gcm_base"); | 1444 | MODULE_ALIAS_CRYPTO("gcm_base"); |
1445 | MODULE_ALIAS_CRYPTO("rfc4106"); | 1445 | MODULE_ALIAS_CRYPTO("rfc4106"); |
1446 | MODULE_ALIAS_CRYPTO("rfc4543"); | 1446 | MODULE_ALIAS_CRYPTO("rfc4543"); |
1447 | MODULE_ALIAS_CRYPTO("gcm"); | ||
1447 | 1448 |
crypto/hmac.c
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * HMAC: Keyed-Hashing for Message Authentication (RFC2104). | 4 | * HMAC: Keyed-Hashing for Message Authentication (RFC2104). |
5 | * | 5 | * |
6 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 6 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> |
7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * | 8 | * |
9 | * The HMAC implementation is derived from USAGI. | 9 | * The HMAC implementation is derived from USAGI. |
10 | * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI | 10 | * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify it | 12 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms of the GNU General Public License as published by the Free | 13 | * under the terms of the GNU General Public License as published by the Free |
14 | * Software Foundation; either version 2 of the License, or (at your option) | 14 | * Software Foundation; either version 2 of the License, or (at your option) |
15 | * any later version. | 15 | * any later version. |
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <crypto/internal/hash.h> | 19 | #include <crypto/internal/hash.h> |
20 | #include <crypto/scatterwalk.h> | 20 | #include <crypto/scatterwalk.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | 27 | ||
28 | struct hmac_ctx { | 28 | struct hmac_ctx { |
29 | struct crypto_shash *hash; | 29 | struct crypto_shash *hash; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | static inline void *align_ptr(void *p, unsigned int align) | 32 | static inline void *align_ptr(void *p, unsigned int align) |
33 | { | 33 | { |
34 | return (void *)ALIGN((unsigned long)p, align); | 34 | return (void *)ALIGN((unsigned long)p, align); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) | 37 | static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) |
38 | { | 38 | { |
39 | return align_ptr(crypto_shash_ctx_aligned(tfm) + | 39 | return align_ptr(crypto_shash_ctx_aligned(tfm) + |
40 | crypto_shash_statesize(tfm) * 2, | 40 | crypto_shash_statesize(tfm) * 2, |
41 | crypto_tfm_ctx_alignment()); | 41 | crypto_tfm_ctx_alignment()); |
42 | } | 42 | } |
43 | 43 | ||
44 | static int hmac_setkey(struct crypto_shash *parent, | 44 | static int hmac_setkey(struct crypto_shash *parent, |
45 | const u8 *inkey, unsigned int keylen) | 45 | const u8 *inkey, unsigned int keylen) |
46 | { | 46 | { |
47 | int bs = crypto_shash_blocksize(parent); | 47 | int bs = crypto_shash_blocksize(parent); |
48 | int ds = crypto_shash_digestsize(parent); | 48 | int ds = crypto_shash_digestsize(parent); |
49 | int ss = crypto_shash_statesize(parent); | 49 | int ss = crypto_shash_statesize(parent); |
50 | char *ipad = crypto_shash_ctx_aligned(parent); | 50 | char *ipad = crypto_shash_ctx_aligned(parent); |
51 | char *opad = ipad + ss; | 51 | char *opad = ipad + ss; |
52 | struct hmac_ctx *ctx = align_ptr(opad + ss, | 52 | struct hmac_ctx *ctx = align_ptr(opad + ss, |
53 | crypto_tfm_ctx_alignment()); | 53 | crypto_tfm_ctx_alignment()); |
54 | struct crypto_shash *hash = ctx->hash; | 54 | struct crypto_shash *hash = ctx->hash; |
55 | SHASH_DESC_ON_STACK(shash, hash); | 55 | SHASH_DESC_ON_STACK(shash, hash); |
56 | unsigned int i; | 56 | unsigned int i; |
57 | 57 | ||
58 | shash->tfm = hash; | 58 | shash->tfm = hash; |
59 | shash->flags = crypto_shash_get_flags(parent) | 59 | shash->flags = crypto_shash_get_flags(parent) |
60 | & CRYPTO_TFM_REQ_MAY_SLEEP; | 60 | & CRYPTO_TFM_REQ_MAY_SLEEP; |
61 | 61 | ||
62 | if (keylen > bs) { | 62 | if (keylen > bs) { |
63 | int err; | 63 | int err; |
64 | 64 | ||
65 | err = crypto_shash_digest(shash, inkey, keylen, ipad); | 65 | err = crypto_shash_digest(shash, inkey, keylen, ipad); |
66 | if (err) | 66 | if (err) |
67 | return err; | 67 | return err; |
68 | 68 | ||
69 | keylen = ds; | 69 | keylen = ds; |
70 | } else | 70 | } else |
71 | memcpy(ipad, inkey, keylen); | 71 | memcpy(ipad, inkey, keylen); |
72 | 72 | ||
73 | memset(ipad + keylen, 0, bs - keylen); | 73 | memset(ipad + keylen, 0, bs - keylen); |
74 | memcpy(opad, ipad, bs); | 74 | memcpy(opad, ipad, bs); |
75 | 75 | ||
76 | for (i = 0; i < bs; i++) { | 76 | for (i = 0; i < bs; i++) { |
77 | ipad[i] ^= 0x36; | 77 | ipad[i] ^= 0x36; |
78 | opad[i] ^= 0x5c; | 78 | opad[i] ^= 0x5c; |
79 | } | 79 | } |
80 | 80 | ||
81 | return crypto_shash_init(shash) ?: | 81 | return crypto_shash_init(shash) ?: |
82 | crypto_shash_update(shash, ipad, bs) ?: | 82 | crypto_shash_update(shash, ipad, bs) ?: |
83 | crypto_shash_export(shash, ipad) ?: | 83 | crypto_shash_export(shash, ipad) ?: |
84 | crypto_shash_init(shash) ?: | 84 | crypto_shash_init(shash) ?: |
85 | crypto_shash_update(shash, opad, bs) ?: | 85 | crypto_shash_update(shash, opad, bs) ?: |
86 | crypto_shash_export(shash, opad); | 86 | crypto_shash_export(shash, opad); |
87 | } | 87 | } |
88 | 88 | ||
89 | static int hmac_export(struct shash_desc *pdesc, void *out) | 89 | static int hmac_export(struct shash_desc *pdesc, void *out) |
90 | { | 90 | { |
91 | struct shash_desc *desc = shash_desc_ctx(pdesc); | 91 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
92 | 92 | ||
93 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 93 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
94 | 94 | ||
95 | return crypto_shash_export(desc, out); | 95 | return crypto_shash_export(desc, out); |
96 | } | 96 | } |
97 | 97 | ||
98 | static int hmac_import(struct shash_desc *pdesc, const void *in) | 98 | static int hmac_import(struct shash_desc *pdesc, const void *in) |
99 | { | 99 | { |
100 | struct shash_desc *desc = shash_desc_ctx(pdesc); | 100 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
101 | struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); | 101 | struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); |
102 | 102 | ||
103 | desc->tfm = ctx->hash; | 103 | desc->tfm = ctx->hash; |
104 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 104 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
105 | 105 | ||
106 | return crypto_shash_import(desc, in); | 106 | return crypto_shash_import(desc, in); |
107 | } | 107 | } |
108 | 108 | ||
109 | static int hmac_init(struct shash_desc *pdesc) | 109 | static int hmac_init(struct shash_desc *pdesc) |
110 | { | 110 | { |
111 | return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); | 111 | return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); |
112 | } | 112 | } |
113 | 113 | ||
114 | static int hmac_update(struct shash_desc *pdesc, | 114 | static int hmac_update(struct shash_desc *pdesc, |
115 | const u8 *data, unsigned int nbytes) | 115 | const u8 *data, unsigned int nbytes) |
116 | { | 116 | { |
117 | struct shash_desc *desc = shash_desc_ctx(pdesc); | 117 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
118 | 118 | ||
119 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 119 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
120 | 120 | ||
121 | return crypto_shash_update(desc, data, nbytes); | 121 | return crypto_shash_update(desc, data, nbytes); |
122 | } | 122 | } |
123 | 123 | ||
124 | static int hmac_final(struct shash_desc *pdesc, u8 *out) | 124 | static int hmac_final(struct shash_desc *pdesc, u8 *out) |
125 | { | 125 | { |
126 | struct crypto_shash *parent = pdesc->tfm; | 126 | struct crypto_shash *parent = pdesc->tfm; |
127 | int ds = crypto_shash_digestsize(parent); | 127 | int ds = crypto_shash_digestsize(parent); |
128 | int ss = crypto_shash_statesize(parent); | 128 | int ss = crypto_shash_statesize(parent); |
129 | char *opad = crypto_shash_ctx_aligned(parent) + ss; | 129 | char *opad = crypto_shash_ctx_aligned(parent) + ss; |
130 | struct shash_desc *desc = shash_desc_ctx(pdesc); | 130 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
131 | 131 | ||
132 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 132 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
133 | 133 | ||
134 | return crypto_shash_final(desc, out) ?: | 134 | return crypto_shash_final(desc, out) ?: |
135 | crypto_shash_import(desc, opad) ?: | 135 | crypto_shash_import(desc, opad) ?: |
136 | crypto_shash_finup(desc, out, ds, out); | 136 | crypto_shash_finup(desc, out, ds, out); |
137 | } | 137 | } |
138 | 138 | ||
139 | static int hmac_finup(struct shash_desc *pdesc, const u8 *data, | 139 | static int hmac_finup(struct shash_desc *pdesc, const u8 *data, |
140 | unsigned int nbytes, u8 *out) | 140 | unsigned int nbytes, u8 *out) |
141 | { | 141 | { |
142 | 142 | ||
143 | struct crypto_shash *parent = pdesc->tfm; | 143 | struct crypto_shash *parent = pdesc->tfm; |
144 | int ds = crypto_shash_digestsize(parent); | 144 | int ds = crypto_shash_digestsize(parent); |
145 | int ss = crypto_shash_statesize(parent); | 145 | int ss = crypto_shash_statesize(parent); |
146 | char *opad = crypto_shash_ctx_aligned(parent) + ss; | 146 | char *opad = crypto_shash_ctx_aligned(parent) + ss; |
147 | struct shash_desc *desc = shash_desc_ctx(pdesc); | 147 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
148 | 148 | ||
149 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 149 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
150 | 150 | ||
151 | return crypto_shash_finup(desc, data, nbytes, out) ?: | 151 | return crypto_shash_finup(desc, data, nbytes, out) ?: |
152 | crypto_shash_import(desc, opad) ?: | 152 | crypto_shash_import(desc, opad) ?: |
153 | crypto_shash_finup(desc, out, ds, out); | 153 | crypto_shash_finup(desc, out, ds, out); |
154 | } | 154 | } |
155 | 155 | ||
156 | static int hmac_init_tfm(struct crypto_tfm *tfm) | 156 | static int hmac_init_tfm(struct crypto_tfm *tfm) |
157 | { | 157 | { |
158 | struct crypto_shash *parent = __crypto_shash_cast(tfm); | 158 | struct crypto_shash *parent = __crypto_shash_cast(tfm); |
159 | struct crypto_shash *hash; | 159 | struct crypto_shash *hash; |
160 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 160 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
161 | struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); | 161 | struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); |
162 | struct hmac_ctx *ctx = hmac_ctx(parent); | 162 | struct hmac_ctx *ctx = hmac_ctx(parent); |
163 | 163 | ||
164 | hash = crypto_spawn_shash(spawn); | 164 | hash = crypto_spawn_shash(spawn); |
165 | if (IS_ERR(hash)) | 165 | if (IS_ERR(hash)) |
166 | return PTR_ERR(hash); | 166 | return PTR_ERR(hash); |
167 | 167 | ||
168 | parent->descsize = sizeof(struct shash_desc) + | 168 | parent->descsize = sizeof(struct shash_desc) + |
169 | crypto_shash_descsize(hash); | 169 | crypto_shash_descsize(hash); |
170 | 170 | ||
171 | ctx->hash = hash; | 171 | ctx->hash = hash; |
172 | return 0; | 172 | return 0; |
173 | } | 173 | } |
174 | 174 | ||
175 | static void hmac_exit_tfm(struct crypto_tfm *tfm) | 175 | static void hmac_exit_tfm(struct crypto_tfm *tfm) |
176 | { | 176 | { |
177 | struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); | 177 | struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); |
178 | crypto_free_shash(ctx->hash); | 178 | crypto_free_shash(ctx->hash); |
179 | } | 179 | } |
180 | 180 | ||
181 | static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) | 181 | static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) |
182 | { | 182 | { |
183 | struct shash_instance *inst; | 183 | struct shash_instance *inst; |
184 | struct crypto_alg *alg; | 184 | struct crypto_alg *alg; |
185 | struct shash_alg *salg; | 185 | struct shash_alg *salg; |
186 | int err; | 186 | int err; |
187 | int ds; | 187 | int ds; |
188 | int ss; | 188 | int ss; |
189 | 189 | ||
190 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); | 190 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
191 | if (err) | 191 | if (err) |
192 | return err; | 192 | return err; |
193 | 193 | ||
194 | salg = shash_attr_alg(tb[1], 0, 0); | 194 | salg = shash_attr_alg(tb[1], 0, 0); |
195 | if (IS_ERR(salg)) | 195 | if (IS_ERR(salg)) |
196 | return PTR_ERR(salg); | 196 | return PTR_ERR(salg); |
197 | 197 | ||
198 | err = -EINVAL; | 198 | err = -EINVAL; |
199 | ds = salg->digestsize; | 199 | ds = salg->digestsize; |
200 | ss = salg->statesize; | 200 | ss = salg->statesize; |
201 | alg = &salg->base; | 201 | alg = &salg->base; |
202 | if (ds > alg->cra_blocksize || | 202 | if (ds > alg->cra_blocksize || |
203 | ss < alg->cra_blocksize) | 203 | ss < alg->cra_blocksize) |
204 | goto out_put_alg; | 204 | goto out_put_alg; |
205 | 205 | ||
206 | inst = shash_alloc_instance("hmac", alg); | 206 | inst = shash_alloc_instance("hmac", alg); |
207 | err = PTR_ERR(inst); | 207 | err = PTR_ERR(inst); |
208 | if (IS_ERR(inst)) | 208 | if (IS_ERR(inst)) |
209 | goto out_put_alg; | 209 | goto out_put_alg; |
210 | 210 | ||
211 | err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, | 211 | err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, |
212 | shash_crypto_instance(inst)); | 212 | shash_crypto_instance(inst)); |
213 | if (err) | 213 | if (err) |
214 | goto out_free_inst; | 214 | goto out_free_inst; |
215 | 215 | ||
216 | inst->alg.base.cra_priority = alg->cra_priority; | 216 | inst->alg.base.cra_priority = alg->cra_priority; |
217 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | 217 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
218 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | 218 | inst->alg.base.cra_alignmask = alg->cra_alignmask; |
219 | 219 | ||
220 | ss = ALIGN(ss, alg->cra_alignmask + 1); | 220 | ss = ALIGN(ss, alg->cra_alignmask + 1); |
221 | inst->alg.digestsize = ds; | 221 | inst->alg.digestsize = ds; |
222 | inst->alg.statesize = ss; | 222 | inst->alg.statesize = ss; |
223 | 223 | ||
224 | inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + | 224 | inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + |
225 | ALIGN(ss * 2, crypto_tfm_ctx_alignment()); | 225 | ALIGN(ss * 2, crypto_tfm_ctx_alignment()); |
226 | 226 | ||
227 | inst->alg.base.cra_init = hmac_init_tfm; | 227 | inst->alg.base.cra_init = hmac_init_tfm; |
228 | inst->alg.base.cra_exit = hmac_exit_tfm; | 228 | inst->alg.base.cra_exit = hmac_exit_tfm; |
229 | 229 | ||
230 | inst->alg.init = hmac_init; | 230 | inst->alg.init = hmac_init; |
231 | inst->alg.update = hmac_update; | 231 | inst->alg.update = hmac_update; |
232 | inst->alg.final = hmac_final; | 232 | inst->alg.final = hmac_final; |
233 | inst->alg.finup = hmac_finup; | 233 | inst->alg.finup = hmac_finup; |
234 | inst->alg.export = hmac_export; | 234 | inst->alg.export = hmac_export; |
235 | inst->alg.import = hmac_import; | 235 | inst->alg.import = hmac_import; |
236 | inst->alg.setkey = hmac_setkey; | 236 | inst->alg.setkey = hmac_setkey; |
237 | 237 | ||
238 | err = shash_register_instance(tmpl, inst); | 238 | err = shash_register_instance(tmpl, inst); |
239 | if (err) { | 239 | if (err) { |
240 | out_free_inst: | 240 | out_free_inst: |
241 | shash_free_instance(shash_crypto_instance(inst)); | 241 | shash_free_instance(shash_crypto_instance(inst)); |
242 | } | 242 | } |
243 | 243 | ||
244 | out_put_alg: | 244 | out_put_alg: |
245 | crypto_mod_put(alg); | 245 | crypto_mod_put(alg); |
246 | return err; | 246 | return err; |
247 | } | 247 | } |
248 | 248 | ||
249 | static struct crypto_template hmac_tmpl = { | 249 | static struct crypto_template hmac_tmpl = { |
250 | .name = "hmac", | 250 | .name = "hmac", |
251 | .create = hmac_create, | 251 | .create = hmac_create, |
252 | .free = shash_free_instance, | 252 | .free = shash_free_instance, |
253 | .module = THIS_MODULE, | 253 | .module = THIS_MODULE, |
254 | }; | 254 | }; |
255 | 255 | ||
256 | static int __init hmac_module_init(void) | 256 | static int __init hmac_module_init(void) |
257 | { | 257 | { |
258 | return crypto_register_template(&hmac_tmpl); | 258 | return crypto_register_template(&hmac_tmpl); |
259 | } | 259 | } |
260 | 260 | ||
261 | static void __exit hmac_module_exit(void) | 261 | static void __exit hmac_module_exit(void) |
262 | { | 262 | { |
263 | crypto_unregister_template(&hmac_tmpl); | 263 | crypto_unregister_template(&hmac_tmpl); |
264 | } | 264 | } |
265 | 265 | ||
266 | module_init(hmac_module_init); | 266 | module_init(hmac_module_init); |
267 | module_exit(hmac_module_exit); | 267 | module_exit(hmac_module_exit); |
268 | 268 | ||
269 | MODULE_LICENSE("GPL"); | 269 | MODULE_LICENSE("GPL"); |
270 | MODULE_DESCRIPTION("HMAC hash algorithm"); | 270 | MODULE_DESCRIPTION("HMAC hash algorithm"); |
271 | MODULE_ALIAS_CRYPTO("hmac"); | ||
271 | 272 |
crypto/lrw.c
1 | /* LRW: as defined by Cyril Guyot in | 1 | /* LRW: as defined by Cyril Guyot in |
2 | * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf | 2 | * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> | 4 | * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> |
5 | * | 5 | * |
6 | * Based on ecb.c | 6 | * Based on ecb.c |
7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) | 11 | * Software Foundation; either version 2 of the License, or (at your option) |
12 | * any later version. | 12 | * any later version. |
13 | */ | 13 | */ |
14 | /* This implementation is checked against the test vectors in the above | 14 | /* This implementation is checked against the test vectors in the above |
15 | * document and by a test vector provided by Ken Buchanan at | 15 | * document and by a test vector provided by Ken Buchanan at |
16 | * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html | 16 | * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html |
17 | * | 17 | * |
18 | * The test vectors are included in the testing module tcrypt.[ch] */ | 18 | * The test vectors are included in the testing module tcrypt.[ch] */ |
19 | 19 | ||
20 | #include <crypto/algapi.h> | 20 | #include <crypto/algapi.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | 27 | ||
28 | #include <crypto/b128ops.h> | 28 | #include <crypto/b128ops.h> |
29 | #include <crypto/gf128mul.h> | 29 | #include <crypto/gf128mul.h> |
30 | #include <crypto/lrw.h> | 30 | #include <crypto/lrw.h> |
31 | 31 | ||
32 | struct priv { | 32 | struct priv { |
33 | struct crypto_cipher *child; | 33 | struct crypto_cipher *child; |
34 | struct lrw_table_ctx table; | 34 | struct lrw_table_ctx table; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static inline void setbit128_bbe(void *b, int bit) | 37 | static inline void setbit128_bbe(void *b, int bit) |
38 | { | 38 | { |
39 | __set_bit(bit ^ (0x80 - | 39 | __set_bit(bit ^ (0x80 - |
40 | #ifdef __BIG_ENDIAN | 40 | #ifdef __BIG_ENDIAN |
41 | BITS_PER_LONG | 41 | BITS_PER_LONG |
42 | #else | 42 | #else |
43 | BITS_PER_BYTE | 43 | BITS_PER_BYTE |
44 | #endif | 44 | #endif |
45 | ), b); | 45 | ), b); |
46 | } | 46 | } |
47 | 47 | ||
48 | int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) | 48 | int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) |
49 | { | 49 | { |
50 | be128 tmp = { 0 }; | 50 | be128 tmp = { 0 }; |
51 | int i; | 51 | int i; |
52 | 52 | ||
53 | if (ctx->table) | 53 | if (ctx->table) |
54 | gf128mul_free_64k(ctx->table); | 54 | gf128mul_free_64k(ctx->table); |
55 | 55 | ||
56 | /* initialize multiplication table for Key2 */ | 56 | /* initialize multiplication table for Key2 */ |
57 | ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); | 57 | ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); |
58 | if (!ctx->table) | 58 | if (!ctx->table) |
59 | return -ENOMEM; | 59 | return -ENOMEM; |
60 | 60 | ||
61 | /* initialize optimization table */ | 61 | /* initialize optimization table */ |
62 | for (i = 0; i < 128; i++) { | 62 | for (i = 0; i < 128; i++) { |
63 | setbit128_bbe(&tmp, i); | 63 | setbit128_bbe(&tmp, i); |
64 | ctx->mulinc[i] = tmp; | 64 | ctx->mulinc[i] = tmp; |
65 | gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); | 65 | gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); |
66 | } | 66 | } |
67 | 67 | ||
68 | return 0; | 68 | return 0; |
69 | } | 69 | } |
70 | EXPORT_SYMBOL_GPL(lrw_init_table); | 70 | EXPORT_SYMBOL_GPL(lrw_init_table); |
71 | 71 | ||
72 | void lrw_free_table(struct lrw_table_ctx *ctx) | 72 | void lrw_free_table(struct lrw_table_ctx *ctx) |
73 | { | 73 | { |
74 | if (ctx->table) | 74 | if (ctx->table) |
75 | gf128mul_free_64k(ctx->table); | 75 | gf128mul_free_64k(ctx->table); |
76 | } | 76 | } |
77 | EXPORT_SYMBOL_GPL(lrw_free_table); | 77 | EXPORT_SYMBOL_GPL(lrw_free_table); |
78 | 78 | ||
79 | static int setkey(struct crypto_tfm *parent, const u8 *key, | 79 | static int setkey(struct crypto_tfm *parent, const u8 *key, |
80 | unsigned int keylen) | 80 | unsigned int keylen) |
81 | { | 81 | { |
82 | struct priv *ctx = crypto_tfm_ctx(parent); | 82 | struct priv *ctx = crypto_tfm_ctx(parent); |
83 | struct crypto_cipher *child = ctx->child; | 83 | struct crypto_cipher *child = ctx->child; |
84 | int err, bsize = LRW_BLOCK_SIZE; | 84 | int err, bsize = LRW_BLOCK_SIZE; |
85 | const u8 *tweak = key + keylen - bsize; | 85 | const u8 *tweak = key + keylen - bsize; |
86 | 86 | ||
87 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 87 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
88 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 88 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & |
89 | CRYPTO_TFM_REQ_MASK); | 89 | CRYPTO_TFM_REQ_MASK); |
90 | err = crypto_cipher_setkey(child, key, keylen - bsize); | 90 | err = crypto_cipher_setkey(child, key, keylen - bsize); |
91 | if (err) | 91 | if (err) |
92 | return err; | 92 | return err; |
93 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 93 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & |
94 | CRYPTO_TFM_RES_MASK); | 94 | CRYPTO_TFM_RES_MASK); |
95 | 95 | ||
96 | return lrw_init_table(&ctx->table, tweak); | 96 | return lrw_init_table(&ctx->table, tweak); |
97 | } | 97 | } |
98 | 98 | ||
99 | struct sinfo { | 99 | struct sinfo { |
100 | be128 t; | 100 | be128 t; |
101 | struct crypto_tfm *tfm; | 101 | struct crypto_tfm *tfm; |
102 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); | 102 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); |
103 | }; | 103 | }; |
104 | 104 | ||
105 | static inline void inc(be128 *iv) | 105 | static inline void inc(be128 *iv) |
106 | { | 106 | { |
107 | be64_add_cpu(&iv->b, 1); | 107 | be64_add_cpu(&iv->b, 1); |
108 | if (!iv->b) | 108 | if (!iv->b) |
109 | be64_add_cpu(&iv->a, 1); | 109 | be64_add_cpu(&iv->a, 1); |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline void lrw_round(struct sinfo *s, void *dst, const void *src) | 112 | static inline void lrw_round(struct sinfo *s, void *dst, const void *src) |
113 | { | 113 | { |
114 | be128_xor(dst, &s->t, src); /* PP <- T xor P */ | 114 | be128_xor(dst, &s->t, src); /* PP <- T xor P */ |
115 | s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ | 115 | s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ |
116 | be128_xor(dst, dst, &s->t); /* C <- T xor CC */ | 116 | be128_xor(dst, dst, &s->t); /* C <- T xor CC */ |
117 | } | 117 | } |
118 | 118 | ||
119 | /* this returns the number of consequative 1 bits starting | 119 | /* this returns the number of consequative 1 bits starting |
120 | * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ | 120 | * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ |
121 | static inline int get_index128(be128 *block) | 121 | static inline int get_index128(be128 *block) |
122 | { | 122 | { |
123 | int x; | 123 | int x; |
124 | __be32 *p = (__be32 *) block; | 124 | __be32 *p = (__be32 *) block; |
125 | 125 | ||
126 | for (p += 3, x = 0; x < 128; p--, x += 32) { | 126 | for (p += 3, x = 0; x < 128; p--, x += 32) { |
127 | u32 val = be32_to_cpup(p); | 127 | u32 val = be32_to_cpup(p); |
128 | 128 | ||
129 | if (!~val) | 129 | if (!~val) |
130 | continue; | 130 | continue; |
131 | 131 | ||
132 | return x + ffz(val); | 132 | return x + ffz(val); |
133 | } | 133 | } |
134 | 134 | ||
135 | return x; | 135 | return x; |
136 | } | 136 | } |
137 | 137 | ||
138 | static int crypt(struct blkcipher_desc *d, | 138 | static int crypt(struct blkcipher_desc *d, |
139 | struct blkcipher_walk *w, struct priv *ctx, | 139 | struct blkcipher_walk *w, struct priv *ctx, |
140 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) | 140 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) |
141 | { | 141 | { |
142 | int err; | 142 | int err; |
143 | unsigned int avail; | 143 | unsigned int avail; |
144 | const int bs = LRW_BLOCK_SIZE; | 144 | const int bs = LRW_BLOCK_SIZE; |
145 | struct sinfo s = { | 145 | struct sinfo s = { |
146 | .tfm = crypto_cipher_tfm(ctx->child), | 146 | .tfm = crypto_cipher_tfm(ctx->child), |
147 | .fn = fn | 147 | .fn = fn |
148 | }; | 148 | }; |
149 | be128 *iv; | 149 | be128 *iv; |
150 | u8 *wsrc; | 150 | u8 *wsrc; |
151 | u8 *wdst; | 151 | u8 *wdst; |
152 | 152 | ||
153 | err = blkcipher_walk_virt(d, w); | 153 | err = blkcipher_walk_virt(d, w); |
154 | if (!(avail = w->nbytes)) | 154 | if (!(avail = w->nbytes)) |
155 | return err; | 155 | return err; |
156 | 156 | ||
157 | wsrc = w->src.virt.addr; | 157 | wsrc = w->src.virt.addr; |
158 | wdst = w->dst.virt.addr; | 158 | wdst = w->dst.virt.addr; |
159 | 159 | ||
160 | /* calculate first value of T */ | 160 | /* calculate first value of T */ |
161 | iv = (be128 *)w->iv; | 161 | iv = (be128 *)w->iv; |
162 | s.t = *iv; | 162 | s.t = *iv; |
163 | 163 | ||
164 | /* T <- I*Key2 */ | 164 | /* T <- I*Key2 */ |
165 | gf128mul_64k_bbe(&s.t, ctx->table.table); | 165 | gf128mul_64k_bbe(&s.t, ctx->table.table); |
166 | 166 | ||
167 | goto first; | 167 | goto first; |
168 | 168 | ||
169 | for (;;) { | 169 | for (;;) { |
170 | do { | 170 | do { |
171 | /* T <- I*Key2, using the optimization | 171 | /* T <- I*Key2, using the optimization |
172 | * discussed in the specification */ | 172 | * discussed in the specification */ |
173 | be128_xor(&s.t, &s.t, | 173 | be128_xor(&s.t, &s.t, |
174 | &ctx->table.mulinc[get_index128(iv)]); | 174 | &ctx->table.mulinc[get_index128(iv)]); |
175 | inc(iv); | 175 | inc(iv); |
176 | 176 | ||
177 | first: | 177 | first: |
178 | lrw_round(&s, wdst, wsrc); | 178 | lrw_round(&s, wdst, wsrc); |
179 | 179 | ||
180 | wsrc += bs; | 180 | wsrc += bs; |
181 | wdst += bs; | 181 | wdst += bs; |
182 | } while ((avail -= bs) >= bs); | 182 | } while ((avail -= bs) >= bs); |
183 | 183 | ||
184 | err = blkcipher_walk_done(d, w, avail); | 184 | err = blkcipher_walk_done(d, w, avail); |
185 | if (!(avail = w->nbytes)) | 185 | if (!(avail = w->nbytes)) |
186 | break; | 186 | break; |
187 | 187 | ||
188 | wsrc = w->src.virt.addr; | 188 | wsrc = w->src.virt.addr; |
189 | wdst = w->dst.virt.addr; | 189 | wdst = w->dst.virt.addr; |
190 | } | 190 | } |
191 | 191 | ||
192 | return err; | 192 | return err; |
193 | } | 193 | } |
194 | 194 | ||
195 | static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 195 | static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
196 | struct scatterlist *src, unsigned int nbytes) | 196 | struct scatterlist *src, unsigned int nbytes) |
197 | { | 197 | { |
198 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 198 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); |
199 | struct blkcipher_walk w; | 199 | struct blkcipher_walk w; |
200 | 200 | ||
201 | blkcipher_walk_init(&w, dst, src, nbytes); | 201 | blkcipher_walk_init(&w, dst, src, nbytes); |
202 | return crypt(desc, &w, ctx, | 202 | return crypt(desc, &w, ctx, |
203 | crypto_cipher_alg(ctx->child)->cia_encrypt); | 203 | crypto_cipher_alg(ctx->child)->cia_encrypt); |
204 | } | 204 | } |
205 | 205 | ||
206 | static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 206 | static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
207 | struct scatterlist *src, unsigned int nbytes) | 207 | struct scatterlist *src, unsigned int nbytes) |
208 | { | 208 | { |
209 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 209 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); |
210 | struct blkcipher_walk w; | 210 | struct blkcipher_walk w; |
211 | 211 | ||
212 | blkcipher_walk_init(&w, dst, src, nbytes); | 212 | blkcipher_walk_init(&w, dst, src, nbytes); |
213 | return crypt(desc, &w, ctx, | 213 | return crypt(desc, &w, ctx, |
214 | crypto_cipher_alg(ctx->child)->cia_decrypt); | 214 | crypto_cipher_alg(ctx->child)->cia_decrypt); |
215 | } | 215 | } |
216 | 216 | ||
217 | int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, | 217 | int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
218 | struct scatterlist *ssrc, unsigned int nbytes, | 218 | struct scatterlist *ssrc, unsigned int nbytes, |
219 | struct lrw_crypt_req *req) | 219 | struct lrw_crypt_req *req) |
220 | { | 220 | { |
221 | const unsigned int bsize = LRW_BLOCK_SIZE; | 221 | const unsigned int bsize = LRW_BLOCK_SIZE; |
222 | const unsigned int max_blks = req->tbuflen / bsize; | 222 | const unsigned int max_blks = req->tbuflen / bsize; |
223 | struct lrw_table_ctx *ctx = req->table_ctx; | 223 | struct lrw_table_ctx *ctx = req->table_ctx; |
224 | struct blkcipher_walk walk; | 224 | struct blkcipher_walk walk; |
225 | unsigned int nblocks; | 225 | unsigned int nblocks; |
226 | be128 *iv, *src, *dst, *t; | 226 | be128 *iv, *src, *dst, *t; |
227 | be128 *t_buf = req->tbuf; | 227 | be128 *t_buf = req->tbuf; |
228 | int err, i; | 228 | int err, i; |
229 | 229 | ||
230 | BUG_ON(max_blks < 1); | 230 | BUG_ON(max_blks < 1); |
231 | 231 | ||
232 | blkcipher_walk_init(&walk, sdst, ssrc, nbytes); | 232 | blkcipher_walk_init(&walk, sdst, ssrc, nbytes); |
233 | 233 | ||
234 | err = blkcipher_walk_virt(desc, &walk); | 234 | err = blkcipher_walk_virt(desc, &walk); |
235 | nbytes = walk.nbytes; | 235 | nbytes = walk.nbytes; |
236 | if (!nbytes) | 236 | if (!nbytes) |
237 | return err; | 237 | return err; |
238 | 238 | ||
239 | nblocks = min(walk.nbytes / bsize, max_blks); | 239 | nblocks = min(walk.nbytes / bsize, max_blks); |
240 | src = (be128 *)walk.src.virt.addr; | 240 | src = (be128 *)walk.src.virt.addr; |
241 | dst = (be128 *)walk.dst.virt.addr; | 241 | dst = (be128 *)walk.dst.virt.addr; |
242 | 242 | ||
243 | /* calculate first value of T */ | 243 | /* calculate first value of T */ |
244 | iv = (be128 *)walk.iv; | 244 | iv = (be128 *)walk.iv; |
245 | t_buf[0] = *iv; | 245 | t_buf[0] = *iv; |
246 | 246 | ||
247 | /* T <- I*Key2 */ | 247 | /* T <- I*Key2 */ |
248 | gf128mul_64k_bbe(&t_buf[0], ctx->table); | 248 | gf128mul_64k_bbe(&t_buf[0], ctx->table); |
249 | 249 | ||
250 | i = 0; | 250 | i = 0; |
251 | goto first; | 251 | goto first; |
252 | 252 | ||
253 | for (;;) { | 253 | for (;;) { |
254 | do { | 254 | do { |
255 | for (i = 0; i < nblocks; i++) { | 255 | for (i = 0; i < nblocks; i++) { |
256 | /* T <- I*Key2, using the optimization | 256 | /* T <- I*Key2, using the optimization |
257 | * discussed in the specification */ | 257 | * discussed in the specification */ |
258 | be128_xor(&t_buf[i], t, | 258 | be128_xor(&t_buf[i], t, |
259 | &ctx->mulinc[get_index128(iv)]); | 259 | &ctx->mulinc[get_index128(iv)]); |
260 | inc(iv); | 260 | inc(iv); |
261 | first: | 261 | first: |
262 | t = &t_buf[i]; | 262 | t = &t_buf[i]; |
263 | 263 | ||
264 | /* PP <- T xor P */ | 264 | /* PP <- T xor P */ |
265 | be128_xor(dst + i, t, src + i); | 265 | be128_xor(dst + i, t, src + i); |
266 | } | 266 | } |
267 | 267 | ||
268 | /* CC <- E(Key2,PP) */ | 268 | /* CC <- E(Key2,PP) */ |
269 | req->crypt_fn(req->crypt_ctx, (u8 *)dst, | 269 | req->crypt_fn(req->crypt_ctx, (u8 *)dst, |
270 | nblocks * bsize); | 270 | nblocks * bsize); |
271 | 271 | ||
272 | /* C <- T xor CC */ | 272 | /* C <- T xor CC */ |
273 | for (i = 0; i < nblocks; i++) | 273 | for (i = 0; i < nblocks; i++) |
274 | be128_xor(dst + i, dst + i, &t_buf[i]); | 274 | be128_xor(dst + i, dst + i, &t_buf[i]); |
275 | 275 | ||
276 | src += nblocks; | 276 | src += nblocks; |
277 | dst += nblocks; | 277 | dst += nblocks; |
278 | nbytes -= nblocks * bsize; | 278 | nbytes -= nblocks * bsize; |
279 | nblocks = min(nbytes / bsize, max_blks); | 279 | nblocks = min(nbytes / bsize, max_blks); |
280 | } while (nblocks > 0); | 280 | } while (nblocks > 0); |
281 | 281 | ||
282 | err = blkcipher_walk_done(desc, &walk, nbytes); | 282 | err = blkcipher_walk_done(desc, &walk, nbytes); |
283 | nbytes = walk.nbytes; | 283 | nbytes = walk.nbytes; |
284 | if (!nbytes) | 284 | if (!nbytes) |
285 | break; | 285 | break; |
286 | 286 | ||
287 | nblocks = min(nbytes / bsize, max_blks); | 287 | nblocks = min(nbytes / bsize, max_blks); |
288 | src = (be128 *)walk.src.virt.addr; | 288 | src = (be128 *)walk.src.virt.addr; |
289 | dst = (be128 *)walk.dst.virt.addr; | 289 | dst = (be128 *)walk.dst.virt.addr; |
290 | } | 290 | } |
291 | 291 | ||
292 | return err; | 292 | return err; |
293 | } | 293 | } |
294 | EXPORT_SYMBOL_GPL(lrw_crypt); | 294 | EXPORT_SYMBOL_GPL(lrw_crypt); |
295 | 295 | ||
296 | static int init_tfm(struct crypto_tfm *tfm) | 296 | static int init_tfm(struct crypto_tfm *tfm) |
297 | { | 297 | { |
298 | struct crypto_cipher *cipher; | 298 | struct crypto_cipher *cipher; |
299 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 299 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
300 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 300 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
301 | struct priv *ctx = crypto_tfm_ctx(tfm); | 301 | struct priv *ctx = crypto_tfm_ctx(tfm); |
302 | u32 *flags = &tfm->crt_flags; | 302 | u32 *flags = &tfm->crt_flags; |
303 | 303 | ||
304 | cipher = crypto_spawn_cipher(spawn); | 304 | cipher = crypto_spawn_cipher(spawn); |
305 | if (IS_ERR(cipher)) | 305 | if (IS_ERR(cipher)) |
306 | return PTR_ERR(cipher); | 306 | return PTR_ERR(cipher); |
307 | 307 | ||
308 | if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { | 308 | if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { |
309 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | 309 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; |
310 | crypto_free_cipher(cipher); | 310 | crypto_free_cipher(cipher); |
311 | return -EINVAL; | 311 | return -EINVAL; |
312 | } | 312 | } |
313 | 313 | ||
314 | ctx->child = cipher; | 314 | ctx->child = cipher; |
315 | return 0; | 315 | return 0; |
316 | } | 316 | } |
317 | 317 | ||
318 | static void exit_tfm(struct crypto_tfm *tfm) | 318 | static void exit_tfm(struct crypto_tfm *tfm) |
319 | { | 319 | { |
320 | struct priv *ctx = crypto_tfm_ctx(tfm); | 320 | struct priv *ctx = crypto_tfm_ctx(tfm); |
321 | 321 | ||
322 | lrw_free_table(&ctx->table); | 322 | lrw_free_table(&ctx->table); |
323 | crypto_free_cipher(ctx->child); | 323 | crypto_free_cipher(ctx->child); |
324 | } | 324 | } |
325 | 325 | ||
326 | static struct crypto_instance *alloc(struct rtattr **tb) | 326 | static struct crypto_instance *alloc(struct rtattr **tb) |
327 | { | 327 | { |
328 | struct crypto_instance *inst; | 328 | struct crypto_instance *inst; |
329 | struct crypto_alg *alg; | 329 | struct crypto_alg *alg; |
330 | int err; | 330 | int err; |
331 | 331 | ||
332 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 332 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
333 | if (err) | 333 | if (err) |
334 | return ERR_PTR(err); | 334 | return ERR_PTR(err); |
335 | 335 | ||
336 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 336 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
337 | CRYPTO_ALG_TYPE_MASK); | 337 | CRYPTO_ALG_TYPE_MASK); |
338 | if (IS_ERR(alg)) | 338 | if (IS_ERR(alg)) |
339 | return ERR_CAST(alg); | 339 | return ERR_CAST(alg); |
340 | 340 | ||
341 | inst = crypto_alloc_instance("lrw", alg); | 341 | inst = crypto_alloc_instance("lrw", alg); |
342 | if (IS_ERR(inst)) | 342 | if (IS_ERR(inst)) |
343 | goto out_put_alg; | 343 | goto out_put_alg; |
344 | 344 | ||
345 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 345 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; |
346 | inst->alg.cra_priority = alg->cra_priority; | 346 | inst->alg.cra_priority = alg->cra_priority; |
347 | inst->alg.cra_blocksize = alg->cra_blocksize; | 347 | inst->alg.cra_blocksize = alg->cra_blocksize; |
348 | 348 | ||
349 | if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; | 349 | if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; |
350 | else inst->alg.cra_alignmask = alg->cra_alignmask; | 350 | else inst->alg.cra_alignmask = alg->cra_alignmask; |
351 | inst->alg.cra_type = &crypto_blkcipher_type; | 351 | inst->alg.cra_type = &crypto_blkcipher_type; |
352 | 352 | ||
353 | if (!(alg->cra_blocksize % 4)) | 353 | if (!(alg->cra_blocksize % 4)) |
354 | inst->alg.cra_alignmask |= 3; | 354 | inst->alg.cra_alignmask |= 3; |
355 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 355 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; |
356 | inst->alg.cra_blkcipher.min_keysize = | 356 | inst->alg.cra_blkcipher.min_keysize = |
357 | alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; | 357 | alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; |
358 | inst->alg.cra_blkcipher.max_keysize = | 358 | inst->alg.cra_blkcipher.max_keysize = |
359 | alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; | 359 | alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; |
360 | 360 | ||
361 | inst->alg.cra_ctxsize = sizeof(struct priv); | 361 | inst->alg.cra_ctxsize = sizeof(struct priv); |
362 | 362 | ||
363 | inst->alg.cra_init = init_tfm; | 363 | inst->alg.cra_init = init_tfm; |
364 | inst->alg.cra_exit = exit_tfm; | 364 | inst->alg.cra_exit = exit_tfm; |
365 | 365 | ||
366 | inst->alg.cra_blkcipher.setkey = setkey; | 366 | inst->alg.cra_blkcipher.setkey = setkey; |
367 | inst->alg.cra_blkcipher.encrypt = encrypt; | 367 | inst->alg.cra_blkcipher.encrypt = encrypt; |
368 | inst->alg.cra_blkcipher.decrypt = decrypt; | 368 | inst->alg.cra_blkcipher.decrypt = decrypt; |
369 | 369 | ||
370 | out_put_alg: | 370 | out_put_alg: |
371 | crypto_mod_put(alg); | 371 | crypto_mod_put(alg); |
372 | return inst; | 372 | return inst; |
373 | } | 373 | } |
374 | 374 | ||
375 | static void free(struct crypto_instance *inst) | 375 | static void free(struct crypto_instance *inst) |
376 | { | 376 | { |
377 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 377 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
378 | kfree(inst); | 378 | kfree(inst); |
379 | } | 379 | } |
380 | 380 | ||
381 | static struct crypto_template crypto_tmpl = { | 381 | static struct crypto_template crypto_tmpl = { |
382 | .name = "lrw", | 382 | .name = "lrw", |
383 | .alloc = alloc, | 383 | .alloc = alloc, |
384 | .free = free, | 384 | .free = free, |
385 | .module = THIS_MODULE, | 385 | .module = THIS_MODULE, |
386 | }; | 386 | }; |
387 | 387 | ||
388 | static int __init crypto_module_init(void) | 388 | static int __init crypto_module_init(void) |
389 | { | 389 | { |
390 | return crypto_register_template(&crypto_tmpl); | 390 | return crypto_register_template(&crypto_tmpl); |
391 | } | 391 | } |
392 | 392 | ||
393 | static void __exit crypto_module_exit(void) | 393 | static void __exit crypto_module_exit(void) |
394 | { | 394 | { |
395 | crypto_unregister_template(&crypto_tmpl); | 395 | crypto_unregister_template(&crypto_tmpl); |
396 | } | 396 | } |
397 | 397 | ||
398 | module_init(crypto_module_init); | 398 | module_init(crypto_module_init); |
399 | module_exit(crypto_module_exit); | 399 | module_exit(crypto_module_exit); |
400 | 400 | ||
401 | MODULE_LICENSE("GPL"); | 401 | MODULE_LICENSE("GPL"); |
402 | MODULE_DESCRIPTION("LRW block cipher mode"); | 402 | MODULE_DESCRIPTION("LRW block cipher mode"); |
403 | MODULE_ALIAS_CRYPTO("lrw"); | ||
403 | 404 |
crypto/mcryptd.c
1 | /* | 1 | /* |
2 | * Software multibuffer async crypto daemon. | 2 | * Software multibuffer async crypto daemon. |
3 | * | 3 | * |
4 | * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com> | 4 | * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com> |
5 | * | 5 | * |
6 | * Adapted from crypto daemon. | 6 | * Adapted from crypto daemon. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free | 9 | * under the terms of the GNU General Public License as published by the Free |
10 | * Software Foundation; either version 2 of the License, or (at your option) | 10 | * Software Foundation; either version 2 of the License, or (at your option) |
11 | * any later version. | 11 | * any later version. |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/algapi.h> | 15 | #include <crypto/algapi.h> |
16 | #include <crypto/internal/hash.h> | 16 | #include <crypto/internal/hash.h> |
17 | #include <crypto/internal/aead.h> | 17 | #include <crypto/internal/aead.h> |
18 | #include <crypto/mcryptd.h> | 18 | #include <crypto/mcryptd.h> |
19 | #include <crypto/crypto_wq.h> | 19 | #include <crypto/crypto_wq.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/hardirq.h> | 28 | #include <linux/hardirq.h> |
29 | 29 | ||
30 | #define MCRYPTD_MAX_CPU_QLEN 100 | 30 | #define MCRYPTD_MAX_CPU_QLEN 100 |
31 | #define MCRYPTD_BATCH 9 | 31 | #define MCRYPTD_BATCH 9 |
32 | 32 | ||
33 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | 33 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
34 | unsigned int tail); | 34 | unsigned int tail); |
35 | 35 | ||
36 | struct mcryptd_flush_list { | 36 | struct mcryptd_flush_list { |
37 | struct list_head list; | 37 | struct list_head list; |
38 | struct mutex lock; | 38 | struct mutex lock; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | static struct mcryptd_flush_list __percpu *mcryptd_flist; | 41 | static struct mcryptd_flush_list __percpu *mcryptd_flist; |
42 | 42 | ||
43 | struct hashd_instance_ctx { | 43 | struct hashd_instance_ctx { |
44 | struct crypto_shash_spawn spawn; | 44 | struct crypto_shash_spawn spawn; |
45 | struct mcryptd_queue *queue; | 45 | struct mcryptd_queue *queue; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static void mcryptd_queue_worker(struct work_struct *work); | 48 | static void mcryptd_queue_worker(struct work_struct *work); |
49 | 49 | ||
50 | void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) | 50 | void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) |
51 | { | 51 | { |
52 | struct mcryptd_flush_list *flist; | 52 | struct mcryptd_flush_list *flist; |
53 | 53 | ||
54 | if (!cstate->flusher_engaged) { | 54 | if (!cstate->flusher_engaged) { |
55 | /* put the flusher on the flush list */ | 55 | /* put the flusher on the flush list */ |
56 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | 56 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); |
57 | mutex_lock(&flist->lock); | 57 | mutex_lock(&flist->lock); |
58 | list_add_tail(&cstate->flush_list, &flist->list); | 58 | list_add_tail(&cstate->flush_list, &flist->list); |
59 | cstate->flusher_engaged = true; | 59 | cstate->flusher_engaged = true; |
60 | cstate->next_flush = jiffies + delay; | 60 | cstate->next_flush = jiffies + delay; |
61 | queue_delayed_work_on(smp_processor_id(), kcrypto_wq, | 61 | queue_delayed_work_on(smp_processor_id(), kcrypto_wq, |
62 | &cstate->flush, delay); | 62 | &cstate->flush, delay); |
63 | mutex_unlock(&flist->lock); | 63 | mutex_unlock(&flist->lock); |
64 | } | 64 | } |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(mcryptd_arm_flusher); | 66 | EXPORT_SYMBOL(mcryptd_arm_flusher); |
67 | 67 | ||
68 | static int mcryptd_init_queue(struct mcryptd_queue *queue, | 68 | static int mcryptd_init_queue(struct mcryptd_queue *queue, |
69 | unsigned int max_cpu_qlen) | 69 | unsigned int max_cpu_qlen) |
70 | { | 70 | { |
71 | int cpu; | 71 | int cpu; |
72 | struct mcryptd_cpu_queue *cpu_queue; | 72 | struct mcryptd_cpu_queue *cpu_queue; |
73 | 73 | ||
74 | queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); | 74 | queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); |
75 | pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); | 75 | pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); |
76 | if (!queue->cpu_queue) | 76 | if (!queue->cpu_queue) |
77 | return -ENOMEM; | 77 | return -ENOMEM; |
78 | for_each_possible_cpu(cpu) { | 78 | for_each_possible_cpu(cpu) { |
79 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 79 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); | 80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); |
81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | 81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); | 82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); |
83 | } | 83 | } |
84 | return 0; | 84 | return 0; |
85 | } | 85 | } |
86 | 86 | ||
87 | static void mcryptd_fini_queue(struct mcryptd_queue *queue) | 87 | static void mcryptd_fini_queue(struct mcryptd_queue *queue) |
88 | { | 88 | { |
89 | int cpu; | 89 | int cpu; |
90 | struct mcryptd_cpu_queue *cpu_queue; | 90 | struct mcryptd_cpu_queue *cpu_queue; |
91 | 91 | ||
92 | for_each_possible_cpu(cpu) { | 92 | for_each_possible_cpu(cpu) { |
93 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 93 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
94 | BUG_ON(cpu_queue->queue.qlen); | 94 | BUG_ON(cpu_queue->queue.qlen); |
95 | } | 95 | } |
96 | free_percpu(queue->cpu_queue); | 96 | free_percpu(queue->cpu_queue); |
97 | } | 97 | } |
98 | 98 | ||
99 | static int mcryptd_enqueue_request(struct mcryptd_queue *queue, | 99 | static int mcryptd_enqueue_request(struct mcryptd_queue *queue, |
100 | struct crypto_async_request *request, | 100 | struct crypto_async_request *request, |
101 | struct mcryptd_hash_request_ctx *rctx) | 101 | struct mcryptd_hash_request_ctx *rctx) |
102 | { | 102 | { |
103 | int cpu, err; | 103 | int cpu, err; |
104 | struct mcryptd_cpu_queue *cpu_queue; | 104 | struct mcryptd_cpu_queue *cpu_queue; |
105 | 105 | ||
106 | cpu = get_cpu(); | 106 | cpu = get_cpu(); |
107 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | 107 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
108 | rctx->tag.cpu = cpu; | 108 | rctx->tag.cpu = cpu; |
109 | 109 | ||
110 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 110 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
111 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", | 111 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", |
112 | cpu, cpu_queue, request); | 112 | cpu, cpu_queue, request); |
113 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 113 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
114 | put_cpu(); | 114 | put_cpu(); |
115 | 115 | ||
116 | return err; | 116 | return err; |
117 | } | 117 | } |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * Try to opportunisticlly flush the partially completed jobs if | 120 | * Try to opportunisticlly flush the partially completed jobs if |
121 | * crypto daemon is the only task running. | 121 | * crypto daemon is the only task running. |
122 | */ | 122 | */ |
123 | static void mcryptd_opportunistic_flush(void) | 123 | static void mcryptd_opportunistic_flush(void) |
124 | { | 124 | { |
125 | struct mcryptd_flush_list *flist; | 125 | struct mcryptd_flush_list *flist; |
126 | struct mcryptd_alg_cstate *cstate; | 126 | struct mcryptd_alg_cstate *cstate; |
127 | 127 | ||
128 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | 128 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); |
129 | while (single_task_running()) { | 129 | while (single_task_running()) { |
130 | mutex_lock(&flist->lock); | 130 | mutex_lock(&flist->lock); |
131 | if (list_empty(&flist->list)) { | 131 | if (list_empty(&flist->list)) { |
132 | mutex_unlock(&flist->lock); | 132 | mutex_unlock(&flist->lock); |
133 | return; | 133 | return; |
134 | } | 134 | } |
135 | cstate = list_entry(flist->list.next, | 135 | cstate = list_entry(flist->list.next, |
136 | struct mcryptd_alg_cstate, flush_list); | 136 | struct mcryptd_alg_cstate, flush_list); |
137 | if (!cstate->flusher_engaged) { | 137 | if (!cstate->flusher_engaged) { |
138 | mutex_unlock(&flist->lock); | 138 | mutex_unlock(&flist->lock); |
139 | return; | 139 | return; |
140 | } | 140 | } |
141 | list_del(&cstate->flush_list); | 141 | list_del(&cstate->flush_list); |
142 | cstate->flusher_engaged = false; | 142 | cstate->flusher_engaged = false; |
143 | mutex_unlock(&flist->lock); | 143 | mutex_unlock(&flist->lock); |
144 | cstate->alg_state->flusher(cstate); | 144 | cstate->alg_state->flusher(cstate); |
145 | } | 145 | } |
146 | } | 146 | } |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * Called in workqueue context, do one real cryption work (via | 149 | * Called in workqueue context, do one real cryption work (via |
150 | * req->complete) and reschedule itself if there are more work to | 150 | * req->complete) and reschedule itself if there are more work to |
151 | * do. | 151 | * do. |
152 | */ | 152 | */ |
153 | static void mcryptd_queue_worker(struct work_struct *work) | 153 | static void mcryptd_queue_worker(struct work_struct *work) |
154 | { | 154 | { |
155 | struct mcryptd_cpu_queue *cpu_queue; | 155 | struct mcryptd_cpu_queue *cpu_queue; |
156 | struct crypto_async_request *req, *backlog; | 156 | struct crypto_async_request *req, *backlog; |
157 | int i; | 157 | int i; |
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Need to loop through more than once for multi-buffer to | 160 | * Need to loop through more than once for multi-buffer to |
161 | * be effective. | 161 | * be effective. |
162 | */ | 162 | */ |
163 | 163 | ||
164 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); | 164 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); |
165 | i = 0; | 165 | i = 0; |
166 | while (i < MCRYPTD_BATCH || single_task_running()) { | 166 | while (i < MCRYPTD_BATCH || single_task_running()) { |
167 | /* | 167 | /* |
168 | * preempt_disable/enable is used to prevent | 168 | * preempt_disable/enable is used to prevent |
169 | * being preempted by mcryptd_enqueue_request() | 169 | * being preempted by mcryptd_enqueue_request() |
170 | */ | 170 | */ |
171 | local_bh_disable(); | 171 | local_bh_disable(); |
172 | preempt_disable(); | 172 | preempt_disable(); |
173 | backlog = crypto_get_backlog(&cpu_queue->queue); | 173 | backlog = crypto_get_backlog(&cpu_queue->queue); |
174 | req = crypto_dequeue_request(&cpu_queue->queue); | 174 | req = crypto_dequeue_request(&cpu_queue->queue); |
175 | preempt_enable(); | 175 | preempt_enable(); |
176 | local_bh_enable(); | 176 | local_bh_enable(); |
177 | 177 | ||
178 | if (!req) { | 178 | if (!req) { |
179 | mcryptd_opportunistic_flush(); | 179 | mcryptd_opportunistic_flush(); |
180 | return; | 180 | return; |
181 | } | 181 | } |
182 | 182 | ||
183 | if (backlog) | 183 | if (backlog) |
184 | backlog->complete(backlog, -EINPROGRESS); | 184 | backlog->complete(backlog, -EINPROGRESS); |
185 | req->complete(req, 0); | 185 | req->complete(req, 0); |
186 | if (!cpu_queue->queue.qlen) | 186 | if (!cpu_queue->queue.qlen) |
187 | return; | 187 | return; |
188 | ++i; | 188 | ++i; |
189 | } | 189 | } |
190 | if (cpu_queue->queue.qlen) | 190 | if (cpu_queue->queue.qlen) |
191 | queue_work(kcrypto_wq, &cpu_queue->work); | 191 | queue_work(kcrypto_wq, &cpu_queue->work); |
192 | } | 192 | } |
193 | 193 | ||
194 | void mcryptd_flusher(struct work_struct *__work) | 194 | void mcryptd_flusher(struct work_struct *__work) |
195 | { | 195 | { |
196 | struct mcryptd_alg_cstate *alg_cpu_state; | 196 | struct mcryptd_alg_cstate *alg_cpu_state; |
197 | struct mcryptd_alg_state *alg_state; | 197 | struct mcryptd_alg_state *alg_state; |
198 | struct mcryptd_flush_list *flist; | 198 | struct mcryptd_flush_list *flist; |
199 | int cpu; | 199 | int cpu; |
200 | 200 | ||
201 | cpu = smp_processor_id(); | 201 | cpu = smp_processor_id(); |
202 | alg_cpu_state = container_of(to_delayed_work(__work), | 202 | alg_cpu_state = container_of(to_delayed_work(__work), |
203 | struct mcryptd_alg_cstate, flush); | 203 | struct mcryptd_alg_cstate, flush); |
204 | alg_state = alg_cpu_state->alg_state; | 204 | alg_state = alg_cpu_state->alg_state; |
205 | if (alg_cpu_state->cpu != cpu) | 205 | if (alg_cpu_state->cpu != cpu) |
206 | pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", | 206 | pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", |
207 | cpu, alg_cpu_state->cpu); | 207 | cpu, alg_cpu_state->cpu); |
208 | 208 | ||
209 | if (alg_cpu_state->flusher_engaged) { | 209 | if (alg_cpu_state->flusher_engaged) { |
210 | flist = per_cpu_ptr(mcryptd_flist, cpu); | 210 | flist = per_cpu_ptr(mcryptd_flist, cpu); |
211 | mutex_lock(&flist->lock); | 211 | mutex_lock(&flist->lock); |
212 | list_del(&alg_cpu_state->flush_list); | 212 | list_del(&alg_cpu_state->flush_list); |
213 | alg_cpu_state->flusher_engaged = false; | 213 | alg_cpu_state->flusher_engaged = false; |
214 | mutex_unlock(&flist->lock); | 214 | mutex_unlock(&flist->lock); |
215 | alg_state->flusher(alg_cpu_state); | 215 | alg_state->flusher(alg_cpu_state); |
216 | } | 216 | } |
217 | } | 217 | } |
218 | EXPORT_SYMBOL_GPL(mcryptd_flusher); | 218 | EXPORT_SYMBOL_GPL(mcryptd_flusher); |
219 | 219 | ||
220 | static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) | 220 | static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) |
221 | { | 221 | { |
222 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 222 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
223 | struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 223 | struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
224 | 224 | ||
225 | return ictx->queue; | 225 | return ictx->queue; |
226 | } | 226 | } |
227 | 227 | ||
228 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | 228 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
229 | unsigned int tail) | 229 | unsigned int tail) |
230 | { | 230 | { |
231 | char *p; | 231 | char *p; |
232 | struct crypto_instance *inst; | 232 | struct crypto_instance *inst; |
233 | int err; | 233 | int err; |
234 | 234 | ||
235 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); | 235 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
236 | if (!p) | 236 | if (!p) |
237 | return ERR_PTR(-ENOMEM); | 237 | return ERR_PTR(-ENOMEM); |
238 | 238 | ||
239 | inst = (void *)(p + head); | 239 | inst = (void *)(p + head); |
240 | 240 | ||
241 | err = -ENAMETOOLONG; | 241 | err = -ENAMETOOLONG; |
242 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 242 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
243 | "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 243 | "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
244 | goto out_free_inst; | 244 | goto out_free_inst; |
245 | 245 | ||
246 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 246 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
247 | 247 | ||
248 | inst->alg.cra_priority = alg->cra_priority + 50; | 248 | inst->alg.cra_priority = alg->cra_priority + 50; |
249 | inst->alg.cra_blocksize = alg->cra_blocksize; | 249 | inst->alg.cra_blocksize = alg->cra_blocksize; |
250 | inst->alg.cra_alignmask = alg->cra_alignmask; | 250 | inst->alg.cra_alignmask = alg->cra_alignmask; |
251 | 251 | ||
252 | out: | 252 | out: |
253 | return p; | 253 | return p; |
254 | 254 | ||
255 | out_free_inst: | 255 | out_free_inst: |
256 | kfree(p); | 256 | kfree(p); |
257 | p = ERR_PTR(err); | 257 | p = ERR_PTR(err); |
258 | goto out; | 258 | goto out; |
259 | } | 259 | } |
260 | 260 | ||
261 | static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) | 261 | static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) |
262 | { | 262 | { |
263 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 263 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
264 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | 264 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
265 | struct crypto_shash_spawn *spawn = &ictx->spawn; | 265 | struct crypto_shash_spawn *spawn = &ictx->spawn; |
266 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 266 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
267 | struct crypto_shash *hash; | 267 | struct crypto_shash *hash; |
268 | 268 | ||
269 | hash = crypto_spawn_shash(spawn); | 269 | hash = crypto_spawn_shash(spawn); |
270 | if (IS_ERR(hash)) | 270 | if (IS_ERR(hash)) |
271 | return PTR_ERR(hash); | 271 | return PTR_ERR(hash); |
272 | 272 | ||
273 | ctx->child = hash; | 273 | ctx->child = hash; |
274 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 274 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
275 | sizeof(struct mcryptd_hash_request_ctx) + | 275 | sizeof(struct mcryptd_hash_request_ctx) + |
276 | crypto_shash_descsize(hash)); | 276 | crypto_shash_descsize(hash)); |
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) | 280 | static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) |
281 | { | 281 | { |
282 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 282 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
283 | 283 | ||
284 | crypto_free_shash(ctx->child); | 284 | crypto_free_shash(ctx->child); |
285 | } | 285 | } |
286 | 286 | ||
287 | static int mcryptd_hash_setkey(struct crypto_ahash *parent, | 287 | static int mcryptd_hash_setkey(struct crypto_ahash *parent, |
288 | const u8 *key, unsigned int keylen) | 288 | const u8 *key, unsigned int keylen) |
289 | { | 289 | { |
290 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | 290 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
291 | struct crypto_shash *child = ctx->child; | 291 | struct crypto_shash *child = ctx->child; |
292 | int err; | 292 | int err; |
293 | 293 | ||
294 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 294 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
295 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | 295 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
296 | CRYPTO_TFM_REQ_MASK); | 296 | CRYPTO_TFM_REQ_MASK); |
297 | err = crypto_shash_setkey(child, key, keylen); | 297 | err = crypto_shash_setkey(child, key, keylen); |
298 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | 298 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
299 | CRYPTO_TFM_RES_MASK); | 299 | CRYPTO_TFM_RES_MASK); |
300 | return err; | 300 | return err; |
301 | } | 301 | } |
302 | 302 | ||
303 | static int mcryptd_hash_enqueue(struct ahash_request *req, | 303 | static int mcryptd_hash_enqueue(struct ahash_request *req, |
304 | crypto_completion_t complete) | 304 | crypto_completion_t complete) |
305 | { | 305 | { |
306 | int ret; | 306 | int ret; |
307 | 307 | ||
308 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 308 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
309 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 309 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
310 | struct mcryptd_queue *queue = | 310 | struct mcryptd_queue *queue = |
311 | mcryptd_get_queue(crypto_ahash_tfm(tfm)); | 311 | mcryptd_get_queue(crypto_ahash_tfm(tfm)); |
312 | 312 | ||
313 | rctx->complete = req->base.complete; | 313 | rctx->complete = req->base.complete; |
314 | req->base.complete = complete; | 314 | req->base.complete = complete; |
315 | 315 | ||
316 | ret = mcryptd_enqueue_request(queue, &req->base, rctx); | 316 | ret = mcryptd_enqueue_request(queue, &req->base, rctx); |
317 | 317 | ||
318 | return ret; | 318 | return ret; |
319 | } | 319 | } |
320 | 320 | ||
321 | static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) | 321 | static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) |
322 | { | 322 | { |
323 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 323 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
324 | struct crypto_shash *child = ctx->child; | 324 | struct crypto_shash *child = ctx->child; |
325 | struct ahash_request *req = ahash_request_cast(req_async); | 325 | struct ahash_request *req = ahash_request_cast(req_async); |
326 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 326 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
327 | struct shash_desc *desc = &rctx->desc; | 327 | struct shash_desc *desc = &rctx->desc; |
328 | 328 | ||
329 | if (unlikely(err == -EINPROGRESS)) | 329 | if (unlikely(err == -EINPROGRESS)) |
330 | goto out; | 330 | goto out; |
331 | 331 | ||
332 | desc->tfm = child; | 332 | desc->tfm = child; |
333 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 333 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
334 | 334 | ||
335 | err = crypto_shash_init(desc); | 335 | err = crypto_shash_init(desc); |
336 | 336 | ||
337 | req->base.complete = rctx->complete; | 337 | req->base.complete = rctx->complete; |
338 | 338 | ||
339 | out: | 339 | out: |
340 | local_bh_disable(); | 340 | local_bh_disable(); |
341 | rctx->complete(&req->base, err); | 341 | rctx->complete(&req->base, err); |
342 | local_bh_enable(); | 342 | local_bh_enable(); |
343 | } | 343 | } |
344 | 344 | ||
345 | static int mcryptd_hash_init_enqueue(struct ahash_request *req) | 345 | static int mcryptd_hash_init_enqueue(struct ahash_request *req) |
346 | { | 346 | { |
347 | return mcryptd_hash_enqueue(req, mcryptd_hash_init); | 347 | return mcryptd_hash_enqueue(req, mcryptd_hash_init); |
348 | } | 348 | } |
349 | 349 | ||
350 | static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) | 350 | static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) |
351 | { | 351 | { |
352 | struct ahash_request *req = ahash_request_cast(req_async); | 352 | struct ahash_request *req = ahash_request_cast(req_async); |
353 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 353 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
354 | 354 | ||
355 | if (unlikely(err == -EINPROGRESS)) | 355 | if (unlikely(err == -EINPROGRESS)) |
356 | goto out; | 356 | goto out; |
357 | 357 | ||
358 | err = shash_ahash_mcryptd_update(req, &rctx->desc); | 358 | err = shash_ahash_mcryptd_update(req, &rctx->desc); |
359 | if (err) { | 359 | if (err) { |
360 | req->base.complete = rctx->complete; | 360 | req->base.complete = rctx->complete; |
361 | goto out; | 361 | goto out; |
362 | } | 362 | } |
363 | 363 | ||
364 | return; | 364 | return; |
365 | out: | 365 | out: |
366 | local_bh_disable(); | 366 | local_bh_disable(); |
367 | rctx->complete(&req->base, err); | 367 | rctx->complete(&req->base, err); |
368 | local_bh_enable(); | 368 | local_bh_enable(); |
369 | } | 369 | } |
370 | 370 | ||
371 | static int mcryptd_hash_update_enqueue(struct ahash_request *req) | 371 | static int mcryptd_hash_update_enqueue(struct ahash_request *req) |
372 | { | 372 | { |
373 | return mcryptd_hash_enqueue(req, mcryptd_hash_update); | 373 | return mcryptd_hash_enqueue(req, mcryptd_hash_update); |
374 | } | 374 | } |
375 | 375 | ||
376 | static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) | 376 | static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) |
377 | { | 377 | { |
378 | struct ahash_request *req = ahash_request_cast(req_async); | 378 | struct ahash_request *req = ahash_request_cast(req_async); |
379 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 379 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
380 | 380 | ||
381 | if (unlikely(err == -EINPROGRESS)) | 381 | if (unlikely(err == -EINPROGRESS)) |
382 | goto out; | 382 | goto out; |
383 | 383 | ||
384 | err = shash_ahash_mcryptd_final(req, &rctx->desc); | 384 | err = shash_ahash_mcryptd_final(req, &rctx->desc); |
385 | if (err) { | 385 | if (err) { |
386 | req->base.complete = rctx->complete; | 386 | req->base.complete = rctx->complete; |
387 | goto out; | 387 | goto out; |
388 | } | 388 | } |
389 | 389 | ||
390 | return; | 390 | return; |
391 | out: | 391 | out: |
392 | local_bh_disable(); | 392 | local_bh_disable(); |
393 | rctx->complete(&req->base, err); | 393 | rctx->complete(&req->base, err); |
394 | local_bh_enable(); | 394 | local_bh_enable(); |
395 | } | 395 | } |
396 | 396 | ||
397 | static int mcryptd_hash_final_enqueue(struct ahash_request *req) | 397 | static int mcryptd_hash_final_enqueue(struct ahash_request *req) |
398 | { | 398 | { |
399 | return mcryptd_hash_enqueue(req, mcryptd_hash_final); | 399 | return mcryptd_hash_enqueue(req, mcryptd_hash_final); |
400 | } | 400 | } |
401 | 401 | ||
402 | static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) | 402 | static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) |
403 | { | 403 | { |
404 | struct ahash_request *req = ahash_request_cast(req_async); | 404 | struct ahash_request *req = ahash_request_cast(req_async); |
405 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 405 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
406 | 406 | ||
407 | if (unlikely(err == -EINPROGRESS)) | 407 | if (unlikely(err == -EINPROGRESS)) |
408 | goto out; | 408 | goto out; |
409 | 409 | ||
410 | err = shash_ahash_mcryptd_finup(req, &rctx->desc); | 410 | err = shash_ahash_mcryptd_finup(req, &rctx->desc); |
411 | 411 | ||
412 | if (err) { | 412 | if (err) { |
413 | req->base.complete = rctx->complete; | 413 | req->base.complete = rctx->complete; |
414 | goto out; | 414 | goto out; |
415 | } | 415 | } |
416 | 416 | ||
417 | return; | 417 | return; |
418 | out: | 418 | out: |
419 | local_bh_disable(); | 419 | local_bh_disable(); |
420 | rctx->complete(&req->base, err); | 420 | rctx->complete(&req->base, err); |
421 | local_bh_enable(); | 421 | local_bh_enable(); |
422 | } | 422 | } |
423 | 423 | ||
424 | static int mcryptd_hash_finup_enqueue(struct ahash_request *req) | 424 | static int mcryptd_hash_finup_enqueue(struct ahash_request *req) |
425 | { | 425 | { |
426 | return mcryptd_hash_enqueue(req, mcryptd_hash_finup); | 426 | return mcryptd_hash_enqueue(req, mcryptd_hash_finup); |
427 | } | 427 | } |
428 | 428 | ||
429 | static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) | 429 | static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) |
430 | { | 430 | { |
431 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 431 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
432 | struct crypto_shash *child = ctx->child; | 432 | struct crypto_shash *child = ctx->child; |
433 | struct ahash_request *req = ahash_request_cast(req_async); | 433 | struct ahash_request *req = ahash_request_cast(req_async); |
434 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 434 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
435 | struct shash_desc *desc = &rctx->desc; | 435 | struct shash_desc *desc = &rctx->desc; |
436 | 436 | ||
437 | if (unlikely(err == -EINPROGRESS)) | 437 | if (unlikely(err == -EINPROGRESS)) |
438 | goto out; | 438 | goto out; |
439 | 439 | ||
440 | desc->tfm = child; | 440 | desc->tfm = child; |
441 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ | 441 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ |
442 | 442 | ||
443 | err = shash_ahash_mcryptd_digest(req, desc); | 443 | err = shash_ahash_mcryptd_digest(req, desc); |
444 | 444 | ||
445 | if (err) { | 445 | if (err) { |
446 | req->base.complete = rctx->complete; | 446 | req->base.complete = rctx->complete; |
447 | goto out; | 447 | goto out; |
448 | } | 448 | } |
449 | 449 | ||
450 | return; | 450 | return; |
451 | out: | 451 | out: |
452 | local_bh_disable(); | 452 | local_bh_disable(); |
453 | rctx->complete(&req->base, err); | 453 | rctx->complete(&req->base, err); |
454 | local_bh_enable(); | 454 | local_bh_enable(); |
455 | } | 455 | } |
456 | 456 | ||
457 | static int mcryptd_hash_digest_enqueue(struct ahash_request *req) | 457 | static int mcryptd_hash_digest_enqueue(struct ahash_request *req) |
458 | { | 458 | { |
459 | return mcryptd_hash_enqueue(req, mcryptd_hash_digest); | 459 | return mcryptd_hash_enqueue(req, mcryptd_hash_digest); |
460 | } | 460 | } |
461 | 461 | ||
462 | static int mcryptd_hash_export(struct ahash_request *req, void *out) | 462 | static int mcryptd_hash_export(struct ahash_request *req, void *out) |
463 | { | 463 | { |
464 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 464 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
465 | 465 | ||
466 | return crypto_shash_export(&rctx->desc, out); | 466 | return crypto_shash_export(&rctx->desc, out); |
467 | } | 467 | } |
468 | 468 | ||
469 | static int mcryptd_hash_import(struct ahash_request *req, const void *in) | 469 | static int mcryptd_hash_import(struct ahash_request *req, const void *in) |
470 | { | 470 | { |
471 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 471 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
472 | 472 | ||
473 | return crypto_shash_import(&rctx->desc, in); | 473 | return crypto_shash_import(&rctx->desc, in); |
474 | } | 474 | } |
475 | 475 | ||
476 | static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | 476 | static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
477 | struct mcryptd_queue *queue) | 477 | struct mcryptd_queue *queue) |
478 | { | 478 | { |
479 | struct hashd_instance_ctx *ctx; | 479 | struct hashd_instance_ctx *ctx; |
480 | struct ahash_instance *inst; | 480 | struct ahash_instance *inst; |
481 | struct shash_alg *salg; | 481 | struct shash_alg *salg; |
482 | struct crypto_alg *alg; | 482 | struct crypto_alg *alg; |
483 | int err; | 483 | int err; |
484 | 484 | ||
485 | salg = shash_attr_alg(tb[1], 0, 0); | 485 | salg = shash_attr_alg(tb[1], 0, 0); |
486 | if (IS_ERR(salg)) | 486 | if (IS_ERR(salg)) |
487 | return PTR_ERR(salg); | 487 | return PTR_ERR(salg); |
488 | 488 | ||
489 | alg = &salg->base; | 489 | alg = &salg->base; |
490 | pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); | 490 | pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); |
491 | inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), | 491 | inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), |
492 | sizeof(*ctx)); | 492 | sizeof(*ctx)); |
493 | err = PTR_ERR(inst); | 493 | err = PTR_ERR(inst); |
494 | if (IS_ERR(inst)) | 494 | if (IS_ERR(inst)) |
495 | goto out_put_alg; | 495 | goto out_put_alg; |
496 | 496 | ||
497 | ctx = ahash_instance_ctx(inst); | 497 | ctx = ahash_instance_ctx(inst); |
498 | ctx->queue = queue; | 498 | ctx->queue = queue; |
499 | 499 | ||
500 | err = crypto_init_shash_spawn(&ctx->spawn, salg, | 500 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
501 | ahash_crypto_instance(inst)); | 501 | ahash_crypto_instance(inst)); |
502 | if (err) | 502 | if (err) |
503 | goto out_free_inst; | 503 | goto out_free_inst; |
504 | 504 | ||
505 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; | 505 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; |
506 | 506 | ||
507 | inst->alg.halg.digestsize = salg->digestsize; | 507 | inst->alg.halg.digestsize = salg->digestsize; |
508 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); | 508 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); |
509 | 509 | ||
510 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; | 510 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; |
511 | inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; | 511 | inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; |
512 | 512 | ||
513 | inst->alg.init = mcryptd_hash_init_enqueue; | 513 | inst->alg.init = mcryptd_hash_init_enqueue; |
514 | inst->alg.update = mcryptd_hash_update_enqueue; | 514 | inst->alg.update = mcryptd_hash_update_enqueue; |
515 | inst->alg.final = mcryptd_hash_final_enqueue; | 515 | inst->alg.final = mcryptd_hash_final_enqueue; |
516 | inst->alg.finup = mcryptd_hash_finup_enqueue; | 516 | inst->alg.finup = mcryptd_hash_finup_enqueue; |
517 | inst->alg.export = mcryptd_hash_export; | 517 | inst->alg.export = mcryptd_hash_export; |
518 | inst->alg.import = mcryptd_hash_import; | 518 | inst->alg.import = mcryptd_hash_import; |
519 | inst->alg.setkey = mcryptd_hash_setkey; | 519 | inst->alg.setkey = mcryptd_hash_setkey; |
520 | inst->alg.digest = mcryptd_hash_digest_enqueue; | 520 | inst->alg.digest = mcryptd_hash_digest_enqueue; |
521 | 521 | ||
522 | err = ahash_register_instance(tmpl, inst); | 522 | err = ahash_register_instance(tmpl, inst); |
523 | if (err) { | 523 | if (err) { |
524 | crypto_drop_shash(&ctx->spawn); | 524 | crypto_drop_shash(&ctx->spawn); |
525 | out_free_inst: | 525 | out_free_inst: |
526 | kfree(inst); | 526 | kfree(inst); |
527 | } | 527 | } |
528 | 528 | ||
529 | out_put_alg: | 529 | out_put_alg: |
530 | crypto_mod_put(alg); | 530 | crypto_mod_put(alg); |
531 | return err; | 531 | return err; |
532 | } | 532 | } |
533 | 533 | ||
534 | static struct mcryptd_queue mqueue; | 534 | static struct mcryptd_queue mqueue; |
535 | 535 | ||
536 | static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | 536 | static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
537 | { | 537 | { |
538 | struct crypto_attr_type *algt; | 538 | struct crypto_attr_type *algt; |
539 | 539 | ||
540 | algt = crypto_get_attr_type(tb); | 540 | algt = crypto_get_attr_type(tb); |
541 | if (IS_ERR(algt)) | 541 | if (IS_ERR(algt)) |
542 | return PTR_ERR(algt); | 542 | return PTR_ERR(algt); |
543 | 543 | ||
544 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 544 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
545 | case CRYPTO_ALG_TYPE_DIGEST: | 545 | case CRYPTO_ALG_TYPE_DIGEST: |
546 | return mcryptd_create_hash(tmpl, tb, &mqueue); | 546 | return mcryptd_create_hash(tmpl, tb, &mqueue); |
547 | break; | 547 | break; |
548 | } | 548 | } |
549 | 549 | ||
550 | return -EINVAL; | 550 | return -EINVAL; |
551 | } | 551 | } |
552 | 552 | ||
553 | static void mcryptd_free(struct crypto_instance *inst) | 553 | static void mcryptd_free(struct crypto_instance *inst) |
554 | { | 554 | { |
555 | struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 555 | struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
556 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | 556 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
557 | 557 | ||
558 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | 558 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
559 | case CRYPTO_ALG_TYPE_AHASH: | 559 | case CRYPTO_ALG_TYPE_AHASH: |
560 | crypto_drop_shash(&hctx->spawn); | 560 | crypto_drop_shash(&hctx->spawn); |
561 | kfree(ahash_instance(inst)); | 561 | kfree(ahash_instance(inst)); |
562 | return; | 562 | return; |
563 | default: | 563 | default: |
564 | crypto_drop_spawn(&ctx->spawn); | 564 | crypto_drop_spawn(&ctx->spawn); |
565 | kfree(inst); | 565 | kfree(inst); |
566 | } | 566 | } |
567 | } | 567 | } |
568 | 568 | ||
569 | static struct crypto_template mcryptd_tmpl = { | 569 | static struct crypto_template mcryptd_tmpl = { |
570 | .name = "mcryptd", | 570 | .name = "mcryptd", |
571 | .create = mcryptd_create, | 571 | .create = mcryptd_create, |
572 | .free = mcryptd_free, | 572 | .free = mcryptd_free, |
573 | .module = THIS_MODULE, | 573 | .module = THIS_MODULE, |
574 | }; | 574 | }; |
575 | 575 | ||
576 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, | 576 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, |
577 | u32 type, u32 mask) | 577 | u32 type, u32 mask) |
578 | { | 578 | { |
579 | char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 579 | char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
580 | struct crypto_ahash *tfm; | 580 | struct crypto_ahash *tfm; |
581 | 581 | ||
582 | if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 582 | if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
583 | "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 583 | "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
584 | return ERR_PTR(-EINVAL); | 584 | return ERR_PTR(-EINVAL); |
585 | tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); | 585 | tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); |
586 | if (IS_ERR(tfm)) | 586 | if (IS_ERR(tfm)) |
587 | return ERR_CAST(tfm); | 587 | return ERR_CAST(tfm); |
588 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | 588 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { |
589 | crypto_free_ahash(tfm); | 589 | crypto_free_ahash(tfm); |
590 | return ERR_PTR(-EINVAL); | 590 | return ERR_PTR(-EINVAL); |
591 | } | 591 | } |
592 | 592 | ||
593 | return __mcryptd_ahash_cast(tfm); | 593 | return __mcryptd_ahash_cast(tfm); |
594 | } | 594 | } |
595 | EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); | 595 | EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); |
596 | 596 | ||
597 | int shash_ahash_mcryptd_digest(struct ahash_request *req, | 597 | int shash_ahash_mcryptd_digest(struct ahash_request *req, |
598 | struct shash_desc *desc) | 598 | struct shash_desc *desc) |
599 | { | 599 | { |
600 | int err; | 600 | int err; |
601 | 601 | ||
602 | err = crypto_shash_init(desc) ?: | 602 | err = crypto_shash_init(desc) ?: |
603 | shash_ahash_mcryptd_finup(req, desc); | 603 | shash_ahash_mcryptd_finup(req, desc); |
604 | 604 | ||
605 | return err; | 605 | return err; |
606 | } | 606 | } |
607 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest); | 607 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest); |
608 | 608 | ||
609 | int shash_ahash_mcryptd_update(struct ahash_request *req, | 609 | int shash_ahash_mcryptd_update(struct ahash_request *req, |
610 | struct shash_desc *desc) | 610 | struct shash_desc *desc) |
611 | { | 611 | { |
612 | struct crypto_shash *tfm = desc->tfm; | 612 | struct crypto_shash *tfm = desc->tfm; |
613 | struct shash_alg *shash = crypto_shash_alg(tfm); | 613 | struct shash_alg *shash = crypto_shash_alg(tfm); |
614 | 614 | ||
615 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | 615 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
616 | 616 | ||
617 | return shash->update(desc, NULL, 0); | 617 | return shash->update(desc, NULL, 0); |
618 | } | 618 | } |
619 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update); | 619 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update); |
620 | 620 | ||
621 | int shash_ahash_mcryptd_finup(struct ahash_request *req, | 621 | int shash_ahash_mcryptd_finup(struct ahash_request *req, |
622 | struct shash_desc *desc) | 622 | struct shash_desc *desc) |
623 | { | 623 | { |
624 | struct crypto_shash *tfm = desc->tfm; | 624 | struct crypto_shash *tfm = desc->tfm; |
625 | struct shash_alg *shash = crypto_shash_alg(tfm); | 625 | struct shash_alg *shash = crypto_shash_alg(tfm); |
626 | 626 | ||
627 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | 627 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
628 | 628 | ||
629 | return shash->finup(desc, NULL, 0, req->result); | 629 | return shash->finup(desc, NULL, 0, req->result); |
630 | } | 630 | } |
631 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup); | 631 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup); |
632 | 632 | ||
633 | int shash_ahash_mcryptd_final(struct ahash_request *req, | 633 | int shash_ahash_mcryptd_final(struct ahash_request *req, |
634 | struct shash_desc *desc) | 634 | struct shash_desc *desc) |
635 | { | 635 | { |
636 | struct crypto_shash *tfm = desc->tfm; | 636 | struct crypto_shash *tfm = desc->tfm; |
637 | struct shash_alg *shash = crypto_shash_alg(tfm); | 637 | struct shash_alg *shash = crypto_shash_alg(tfm); |
638 | 638 | ||
639 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | 639 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
640 | 640 | ||
641 | return shash->final(desc, req->result); | 641 | return shash->final(desc, req->result); |
642 | } | 642 | } |
643 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final); | 643 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final); |
644 | 644 | ||
645 | struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) | 645 | struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) |
646 | { | 646 | { |
647 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | 647 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
648 | 648 | ||
649 | return ctx->child; | 649 | return ctx->child; |
650 | } | 650 | } |
651 | EXPORT_SYMBOL_GPL(mcryptd_ahash_child); | 651 | EXPORT_SYMBOL_GPL(mcryptd_ahash_child); |
652 | 652 | ||
653 | struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) | 653 | struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) |
654 | { | 654 | { |
655 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 655 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
656 | return &rctx->desc; | 656 | return &rctx->desc; |
657 | } | 657 | } |
658 | EXPORT_SYMBOL_GPL(mcryptd_shash_desc); | 658 | EXPORT_SYMBOL_GPL(mcryptd_shash_desc); |
659 | 659 | ||
660 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm) | 660 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm) |
661 | { | 661 | { |
662 | crypto_free_ahash(&tfm->base); | 662 | crypto_free_ahash(&tfm->base); |
663 | } | 663 | } |
664 | EXPORT_SYMBOL_GPL(mcryptd_free_ahash); | 664 | EXPORT_SYMBOL_GPL(mcryptd_free_ahash); |
665 | 665 | ||
666 | 666 | ||
667 | static int __init mcryptd_init(void) | 667 | static int __init mcryptd_init(void) |
668 | { | 668 | { |
669 | int err, cpu; | 669 | int err, cpu; |
670 | struct mcryptd_flush_list *flist; | 670 | struct mcryptd_flush_list *flist; |
671 | 671 | ||
672 | mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); | 672 | mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); |
673 | for_each_possible_cpu(cpu) { | 673 | for_each_possible_cpu(cpu) { |
674 | flist = per_cpu_ptr(mcryptd_flist, cpu); | 674 | flist = per_cpu_ptr(mcryptd_flist, cpu); |
675 | INIT_LIST_HEAD(&flist->list); | 675 | INIT_LIST_HEAD(&flist->list); |
676 | mutex_init(&flist->lock); | 676 | mutex_init(&flist->lock); |
677 | } | 677 | } |
678 | 678 | ||
679 | err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); | 679 | err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); |
680 | if (err) { | 680 | if (err) { |
681 | free_percpu(mcryptd_flist); | 681 | free_percpu(mcryptd_flist); |
682 | return err; | 682 | return err; |
683 | } | 683 | } |
684 | 684 | ||
685 | err = crypto_register_template(&mcryptd_tmpl); | 685 | err = crypto_register_template(&mcryptd_tmpl); |
686 | if (err) { | 686 | if (err) { |
687 | mcryptd_fini_queue(&mqueue); | 687 | mcryptd_fini_queue(&mqueue); |
688 | free_percpu(mcryptd_flist); | 688 | free_percpu(mcryptd_flist); |
689 | } | 689 | } |
690 | 690 | ||
691 | return err; | 691 | return err; |
692 | } | 692 | } |
693 | 693 | ||
694 | static void __exit mcryptd_exit(void) | 694 | static void __exit mcryptd_exit(void) |
695 | { | 695 | { |
696 | mcryptd_fini_queue(&mqueue); | 696 | mcryptd_fini_queue(&mqueue); |
697 | crypto_unregister_template(&mcryptd_tmpl); | 697 | crypto_unregister_template(&mcryptd_tmpl); |
698 | free_percpu(mcryptd_flist); | 698 | free_percpu(mcryptd_flist); |
699 | } | 699 | } |
700 | 700 | ||
701 | subsys_initcall(mcryptd_init); | 701 | subsys_initcall(mcryptd_init); |
702 | module_exit(mcryptd_exit); | 702 | module_exit(mcryptd_exit); |
703 | 703 | ||
704 | MODULE_LICENSE("GPL"); | 704 | MODULE_LICENSE("GPL"); |
705 | MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); | 705 | MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); |
706 | MODULE_ALIAS_CRYPTO("mcryptd"); | ||
706 | 707 |
crypto/pcbc.c
1 | /* | 1 | /* |
2 | * PCBC: Propagating Cipher Block Chaining mode | 2 | * PCBC: Propagating Cipher Block Chaining mode |
3 | * | 3 | * |
4 | * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. | 4 | * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) | 5 | * Written by David Howells (dhowells@redhat.com) |
6 | * | 6 | * |
7 | * Derived from cbc.c | 7 | * Derived from cbc.c |
8 | * - Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 8 | * - Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
12 | * Software Foundation; either version 2 of the License, or (at your option) | 12 | * Software Foundation; either version 2 of the License, or (at your option) |
13 | * any later version. | 13 | * any later version. |
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <crypto/algapi.h> | 17 | #include <crypto/algapi.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | 24 | ||
25 | struct crypto_pcbc_ctx { | 25 | struct crypto_pcbc_ctx { |
26 | struct crypto_cipher *child; | 26 | struct crypto_cipher *child; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key, | 29 | static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key, |
30 | unsigned int keylen) | 30 | unsigned int keylen) |
31 | { | 31 | { |
32 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent); | 32 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent); |
33 | struct crypto_cipher *child = ctx->child; | 33 | struct crypto_cipher *child = ctx->child; |
34 | int err; | 34 | int err; |
35 | 35 | ||
36 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 36 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
37 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 37 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & |
38 | CRYPTO_TFM_REQ_MASK); | 38 | CRYPTO_TFM_REQ_MASK); |
39 | err = crypto_cipher_setkey(child, key, keylen); | 39 | err = crypto_cipher_setkey(child, key, keylen); |
40 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 40 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & |
41 | CRYPTO_TFM_RES_MASK); | 41 | CRYPTO_TFM_RES_MASK); |
42 | return err; | 42 | return err; |
43 | } | 43 | } |
44 | 44 | ||
45 | static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, | 45 | static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, |
46 | struct blkcipher_walk *walk, | 46 | struct blkcipher_walk *walk, |
47 | struct crypto_cipher *tfm) | 47 | struct crypto_cipher *tfm) |
48 | { | 48 | { |
49 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 49 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
50 | crypto_cipher_alg(tfm)->cia_encrypt; | 50 | crypto_cipher_alg(tfm)->cia_encrypt; |
51 | int bsize = crypto_cipher_blocksize(tfm); | 51 | int bsize = crypto_cipher_blocksize(tfm); |
52 | unsigned int nbytes = walk->nbytes; | 52 | unsigned int nbytes = walk->nbytes; |
53 | u8 *src = walk->src.virt.addr; | 53 | u8 *src = walk->src.virt.addr; |
54 | u8 *dst = walk->dst.virt.addr; | 54 | u8 *dst = walk->dst.virt.addr; |
55 | u8 *iv = walk->iv; | 55 | u8 *iv = walk->iv; |
56 | 56 | ||
57 | do { | 57 | do { |
58 | crypto_xor(iv, src, bsize); | 58 | crypto_xor(iv, src, bsize); |
59 | fn(crypto_cipher_tfm(tfm), dst, iv); | 59 | fn(crypto_cipher_tfm(tfm), dst, iv); |
60 | memcpy(iv, dst, bsize); | 60 | memcpy(iv, dst, bsize); |
61 | crypto_xor(iv, src, bsize); | 61 | crypto_xor(iv, src, bsize); |
62 | 62 | ||
63 | src += bsize; | 63 | src += bsize; |
64 | dst += bsize; | 64 | dst += bsize; |
65 | } while ((nbytes -= bsize) >= bsize); | 65 | } while ((nbytes -= bsize) >= bsize); |
66 | 66 | ||
67 | return nbytes; | 67 | return nbytes; |
68 | } | 68 | } |
69 | 69 | ||
70 | static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, | 70 | static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, |
71 | struct blkcipher_walk *walk, | 71 | struct blkcipher_walk *walk, |
72 | struct crypto_cipher *tfm) | 72 | struct crypto_cipher *tfm) |
73 | { | 73 | { |
74 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 74 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
75 | crypto_cipher_alg(tfm)->cia_encrypt; | 75 | crypto_cipher_alg(tfm)->cia_encrypt; |
76 | int bsize = crypto_cipher_blocksize(tfm); | 76 | int bsize = crypto_cipher_blocksize(tfm); |
77 | unsigned int nbytes = walk->nbytes; | 77 | unsigned int nbytes = walk->nbytes; |
78 | u8 *src = walk->src.virt.addr; | 78 | u8 *src = walk->src.virt.addr; |
79 | u8 *iv = walk->iv; | 79 | u8 *iv = walk->iv; |
80 | u8 tmpbuf[bsize]; | 80 | u8 tmpbuf[bsize]; |
81 | 81 | ||
82 | do { | 82 | do { |
83 | memcpy(tmpbuf, src, bsize); | 83 | memcpy(tmpbuf, src, bsize); |
84 | crypto_xor(iv, src, bsize); | 84 | crypto_xor(iv, src, bsize); |
85 | fn(crypto_cipher_tfm(tfm), src, iv); | 85 | fn(crypto_cipher_tfm(tfm), src, iv); |
86 | memcpy(iv, tmpbuf, bsize); | 86 | memcpy(iv, tmpbuf, bsize); |
87 | crypto_xor(iv, src, bsize); | 87 | crypto_xor(iv, src, bsize); |
88 | 88 | ||
89 | src += bsize; | 89 | src += bsize; |
90 | } while ((nbytes -= bsize) >= bsize); | 90 | } while ((nbytes -= bsize) >= bsize); |
91 | 91 | ||
92 | memcpy(walk->iv, iv, bsize); | 92 | memcpy(walk->iv, iv, bsize); |
93 | 93 | ||
94 | return nbytes; | 94 | return nbytes; |
95 | } | 95 | } |
96 | 96 | ||
97 | static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, | 97 | static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, |
98 | struct scatterlist *dst, struct scatterlist *src, | 98 | struct scatterlist *dst, struct scatterlist *src, |
99 | unsigned int nbytes) | 99 | unsigned int nbytes) |
100 | { | 100 | { |
101 | struct blkcipher_walk walk; | 101 | struct blkcipher_walk walk; |
102 | struct crypto_blkcipher *tfm = desc->tfm; | 102 | struct crypto_blkcipher *tfm = desc->tfm; |
103 | struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | 103 | struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); |
104 | struct crypto_cipher *child = ctx->child; | 104 | struct crypto_cipher *child = ctx->child; |
105 | int err; | 105 | int err; |
106 | 106 | ||
107 | blkcipher_walk_init(&walk, dst, src, nbytes); | 107 | blkcipher_walk_init(&walk, dst, src, nbytes); |
108 | err = blkcipher_walk_virt(desc, &walk); | 108 | err = blkcipher_walk_virt(desc, &walk); |
109 | 109 | ||
110 | while ((nbytes = walk.nbytes)) { | 110 | while ((nbytes = walk.nbytes)) { |
111 | if (walk.src.virt.addr == walk.dst.virt.addr) | 111 | if (walk.src.virt.addr == walk.dst.virt.addr) |
112 | nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, | 112 | nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, |
113 | child); | 113 | child); |
114 | else | 114 | else |
115 | nbytes = crypto_pcbc_encrypt_segment(desc, &walk, | 115 | nbytes = crypto_pcbc_encrypt_segment(desc, &walk, |
116 | child); | 116 | child); |
117 | err = blkcipher_walk_done(desc, &walk, nbytes); | 117 | err = blkcipher_walk_done(desc, &walk, nbytes); |
118 | } | 118 | } |
119 | 119 | ||
120 | return err; | 120 | return err; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, | 123 | static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, |
124 | struct blkcipher_walk *walk, | 124 | struct blkcipher_walk *walk, |
125 | struct crypto_cipher *tfm) | 125 | struct crypto_cipher *tfm) |
126 | { | 126 | { |
127 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 127 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
128 | crypto_cipher_alg(tfm)->cia_decrypt; | 128 | crypto_cipher_alg(tfm)->cia_decrypt; |
129 | int bsize = crypto_cipher_blocksize(tfm); | 129 | int bsize = crypto_cipher_blocksize(tfm); |
130 | unsigned int nbytes = walk->nbytes; | 130 | unsigned int nbytes = walk->nbytes; |
131 | u8 *src = walk->src.virt.addr; | 131 | u8 *src = walk->src.virt.addr; |
132 | u8 *dst = walk->dst.virt.addr; | 132 | u8 *dst = walk->dst.virt.addr; |
133 | u8 *iv = walk->iv; | 133 | u8 *iv = walk->iv; |
134 | 134 | ||
135 | do { | 135 | do { |
136 | fn(crypto_cipher_tfm(tfm), dst, src); | 136 | fn(crypto_cipher_tfm(tfm), dst, src); |
137 | crypto_xor(dst, iv, bsize); | 137 | crypto_xor(dst, iv, bsize); |
138 | memcpy(iv, src, bsize); | 138 | memcpy(iv, src, bsize); |
139 | crypto_xor(iv, dst, bsize); | 139 | crypto_xor(iv, dst, bsize); |
140 | 140 | ||
141 | src += bsize; | 141 | src += bsize; |
142 | dst += bsize; | 142 | dst += bsize; |
143 | } while ((nbytes -= bsize) >= bsize); | 143 | } while ((nbytes -= bsize) >= bsize); |
144 | 144 | ||
145 | memcpy(walk->iv, iv, bsize); | 145 | memcpy(walk->iv, iv, bsize); |
146 | 146 | ||
147 | return nbytes; | 147 | return nbytes; |
148 | } | 148 | } |
149 | 149 | ||
150 | static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, | 150 | static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, |
151 | struct blkcipher_walk *walk, | 151 | struct blkcipher_walk *walk, |
152 | struct crypto_cipher *tfm) | 152 | struct crypto_cipher *tfm) |
153 | { | 153 | { |
154 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 154 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = |
155 | crypto_cipher_alg(tfm)->cia_decrypt; | 155 | crypto_cipher_alg(tfm)->cia_decrypt; |
156 | int bsize = crypto_cipher_blocksize(tfm); | 156 | int bsize = crypto_cipher_blocksize(tfm); |
157 | unsigned int nbytes = walk->nbytes; | 157 | unsigned int nbytes = walk->nbytes; |
158 | u8 *src = walk->src.virt.addr; | 158 | u8 *src = walk->src.virt.addr; |
159 | u8 *iv = walk->iv; | 159 | u8 *iv = walk->iv; |
160 | u8 tmpbuf[bsize]; | 160 | u8 tmpbuf[bsize]; |
161 | 161 | ||
162 | do { | 162 | do { |
163 | memcpy(tmpbuf, src, bsize); | 163 | memcpy(tmpbuf, src, bsize); |
164 | fn(crypto_cipher_tfm(tfm), src, src); | 164 | fn(crypto_cipher_tfm(tfm), src, src); |
165 | crypto_xor(src, iv, bsize); | 165 | crypto_xor(src, iv, bsize); |
166 | memcpy(iv, tmpbuf, bsize); | 166 | memcpy(iv, tmpbuf, bsize); |
167 | crypto_xor(iv, src, bsize); | 167 | crypto_xor(iv, src, bsize); |
168 | 168 | ||
169 | src += bsize; | 169 | src += bsize; |
170 | } while ((nbytes -= bsize) >= bsize); | 170 | } while ((nbytes -= bsize) >= bsize); |
171 | 171 | ||
172 | memcpy(walk->iv, iv, bsize); | 172 | memcpy(walk->iv, iv, bsize); |
173 | 173 | ||
174 | return nbytes; | 174 | return nbytes; |
175 | } | 175 | } |
176 | 176 | ||
177 | static int crypto_pcbc_decrypt(struct blkcipher_desc *desc, | 177 | static int crypto_pcbc_decrypt(struct blkcipher_desc *desc, |
178 | struct scatterlist *dst, struct scatterlist *src, | 178 | struct scatterlist *dst, struct scatterlist *src, |
179 | unsigned int nbytes) | 179 | unsigned int nbytes) |
180 | { | 180 | { |
181 | struct blkcipher_walk walk; | 181 | struct blkcipher_walk walk; |
182 | struct crypto_blkcipher *tfm = desc->tfm; | 182 | struct crypto_blkcipher *tfm = desc->tfm; |
183 | struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | 183 | struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); |
184 | struct crypto_cipher *child = ctx->child; | 184 | struct crypto_cipher *child = ctx->child; |
185 | int err; | 185 | int err; |
186 | 186 | ||
187 | blkcipher_walk_init(&walk, dst, src, nbytes); | 187 | blkcipher_walk_init(&walk, dst, src, nbytes); |
188 | err = blkcipher_walk_virt(desc, &walk); | 188 | err = blkcipher_walk_virt(desc, &walk); |
189 | 189 | ||
190 | while ((nbytes = walk.nbytes)) { | 190 | while ((nbytes = walk.nbytes)) { |
191 | if (walk.src.virt.addr == walk.dst.virt.addr) | 191 | if (walk.src.virt.addr == walk.dst.virt.addr) |
192 | nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, | 192 | nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, |
193 | child); | 193 | child); |
194 | else | 194 | else |
195 | nbytes = crypto_pcbc_decrypt_segment(desc, &walk, | 195 | nbytes = crypto_pcbc_decrypt_segment(desc, &walk, |
196 | child); | 196 | child); |
197 | err = blkcipher_walk_done(desc, &walk, nbytes); | 197 | err = blkcipher_walk_done(desc, &walk, nbytes); |
198 | } | 198 | } |
199 | 199 | ||
200 | return err; | 200 | return err; |
201 | } | 201 | } |
202 | 202 | ||
203 | static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) | 203 | static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) |
204 | { | 204 | { |
205 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 205 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
206 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 206 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
207 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); | 207 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); |
208 | struct crypto_cipher *cipher; | 208 | struct crypto_cipher *cipher; |
209 | 209 | ||
210 | cipher = crypto_spawn_cipher(spawn); | 210 | cipher = crypto_spawn_cipher(spawn); |
211 | if (IS_ERR(cipher)) | 211 | if (IS_ERR(cipher)) |
212 | return PTR_ERR(cipher); | 212 | return PTR_ERR(cipher); |
213 | 213 | ||
214 | ctx->child = cipher; | 214 | ctx->child = cipher; |
215 | return 0; | 215 | return 0; |
216 | } | 216 | } |
217 | 217 | ||
218 | static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) | 218 | static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) |
219 | { | 219 | { |
220 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); | 220 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); |
221 | crypto_free_cipher(ctx->child); | 221 | crypto_free_cipher(ctx->child); |
222 | } | 222 | } |
223 | 223 | ||
224 | static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) | 224 | static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) |
225 | { | 225 | { |
226 | struct crypto_instance *inst; | 226 | struct crypto_instance *inst; |
227 | struct crypto_alg *alg; | 227 | struct crypto_alg *alg; |
228 | int err; | 228 | int err; |
229 | 229 | ||
230 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 230 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
231 | if (err) | 231 | if (err) |
232 | return ERR_PTR(err); | 232 | return ERR_PTR(err); |
233 | 233 | ||
234 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 234 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
235 | CRYPTO_ALG_TYPE_MASK); | 235 | CRYPTO_ALG_TYPE_MASK); |
236 | if (IS_ERR(alg)) | 236 | if (IS_ERR(alg)) |
237 | return ERR_CAST(alg); | 237 | return ERR_CAST(alg); |
238 | 238 | ||
239 | inst = crypto_alloc_instance("pcbc", alg); | 239 | inst = crypto_alloc_instance("pcbc", alg); |
240 | if (IS_ERR(inst)) | 240 | if (IS_ERR(inst)) |
241 | goto out_put_alg; | 241 | goto out_put_alg; |
242 | 242 | ||
243 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 243 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; |
244 | inst->alg.cra_priority = alg->cra_priority; | 244 | inst->alg.cra_priority = alg->cra_priority; |
245 | inst->alg.cra_blocksize = alg->cra_blocksize; | 245 | inst->alg.cra_blocksize = alg->cra_blocksize; |
246 | inst->alg.cra_alignmask = alg->cra_alignmask; | 246 | inst->alg.cra_alignmask = alg->cra_alignmask; |
247 | inst->alg.cra_type = &crypto_blkcipher_type; | 247 | inst->alg.cra_type = &crypto_blkcipher_type; |
248 | 248 | ||
249 | /* We access the data as u32s when xoring. */ | 249 | /* We access the data as u32s when xoring. */ |
250 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 250 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; |
251 | 251 | ||
252 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 252 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; |
253 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; | 253 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; |
254 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; | 254 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; |
255 | 255 | ||
256 | inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); | 256 | inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); |
257 | 257 | ||
258 | inst->alg.cra_init = crypto_pcbc_init_tfm; | 258 | inst->alg.cra_init = crypto_pcbc_init_tfm; |
259 | inst->alg.cra_exit = crypto_pcbc_exit_tfm; | 259 | inst->alg.cra_exit = crypto_pcbc_exit_tfm; |
260 | 260 | ||
261 | inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey; | 261 | inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey; |
262 | inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt; | 262 | inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt; |
263 | inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt; | 263 | inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt; |
264 | 264 | ||
265 | out_put_alg: | 265 | out_put_alg: |
266 | crypto_mod_put(alg); | 266 | crypto_mod_put(alg); |
267 | return inst; | 267 | return inst; |
268 | } | 268 | } |
269 | 269 | ||
270 | static void crypto_pcbc_free(struct crypto_instance *inst) | 270 | static void crypto_pcbc_free(struct crypto_instance *inst) |
271 | { | 271 | { |
272 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 272 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
273 | kfree(inst); | 273 | kfree(inst); |
274 | } | 274 | } |
275 | 275 | ||
276 | static struct crypto_template crypto_pcbc_tmpl = { | 276 | static struct crypto_template crypto_pcbc_tmpl = { |
277 | .name = "pcbc", | 277 | .name = "pcbc", |
278 | .alloc = crypto_pcbc_alloc, | 278 | .alloc = crypto_pcbc_alloc, |
279 | .free = crypto_pcbc_free, | 279 | .free = crypto_pcbc_free, |
280 | .module = THIS_MODULE, | 280 | .module = THIS_MODULE, |
281 | }; | 281 | }; |
282 | 282 | ||
283 | static int __init crypto_pcbc_module_init(void) | 283 | static int __init crypto_pcbc_module_init(void) |
284 | { | 284 | { |
285 | return crypto_register_template(&crypto_pcbc_tmpl); | 285 | return crypto_register_template(&crypto_pcbc_tmpl); |
286 | } | 286 | } |
287 | 287 | ||
288 | static void __exit crypto_pcbc_module_exit(void) | 288 | static void __exit crypto_pcbc_module_exit(void) |
289 | { | 289 | { |
290 | crypto_unregister_template(&crypto_pcbc_tmpl); | 290 | crypto_unregister_template(&crypto_pcbc_tmpl); |
291 | } | 291 | } |
292 | 292 | ||
293 | module_init(crypto_pcbc_module_init); | 293 | module_init(crypto_pcbc_module_init); |
294 | module_exit(crypto_pcbc_module_exit); | 294 | module_exit(crypto_pcbc_module_exit); |
295 | 295 | ||
296 | MODULE_LICENSE("GPL"); | 296 | MODULE_LICENSE("GPL"); |
297 | MODULE_DESCRIPTION("PCBC block cipher algorithm"); | 297 | MODULE_DESCRIPTION("PCBC block cipher algorithm"); |
298 | MODULE_ALIAS_CRYPTO("pcbc"); | ||
298 | 299 |
crypto/pcrypt.c
1 | /* | 1 | /* |
2 | * pcrypt - Parallel crypto wrapper. | 2 | * pcrypt - Parallel crypto wrapper. |
3 | * | 3 | * |
4 | * Copyright (C) 2009 secunet Security Networks AG | 4 | * Copyright (C) 2009 secunet Security Networks AG |
5 | * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> | 5 | * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms and conditions of the GNU General Public License, | 8 | * under the terms and conditions of the GNU General Public License, |
9 | * version 2, as published by the Free Software Foundation. | 9 | * version 2, as published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | 11 | * This program is distributed in the hope it will be useful, but WITHOUT |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
14 | * more details. | 14 | * more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License along with | 16 | * You should have received a copy of the GNU General Public License along with |
17 | * this program; if not, write to the Free Software Foundation, Inc., | 17 | * this program; if not, write to the Free Software Foundation, Inc., |
18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <crypto/algapi.h> | 21 | #include <crypto/algapi.h> |
22 | #include <crypto/internal/aead.h> | 22 | #include <crypto/internal/aead.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/notifier.h> | 27 | #include <linux/notifier.h> |
28 | #include <linux/kobject.h> | 28 | #include <linux/kobject.h> |
29 | #include <linux/cpu.h> | 29 | #include <linux/cpu.h> |
30 | #include <crypto/pcrypt.h> | 30 | #include <crypto/pcrypt.h> |
31 | 31 | ||
32 | struct padata_pcrypt { | 32 | struct padata_pcrypt { |
33 | struct padata_instance *pinst; | 33 | struct padata_instance *pinst; |
34 | struct workqueue_struct *wq; | 34 | struct workqueue_struct *wq; |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * Cpumask for callback CPUs. It should be | 37 | * Cpumask for callback CPUs. It should be |
38 | * equal to serial cpumask of corresponding padata instance, | 38 | * equal to serial cpumask of corresponding padata instance, |
39 | * so it is updated when padata notifies us about serial | 39 | * so it is updated when padata notifies us about serial |
40 | * cpumask change. | 40 | * cpumask change. |
41 | * | 41 | * |
42 | * cb_cpumask is protected by RCU. This fact prevents us from | 42 | * cb_cpumask is protected by RCU. This fact prevents us from |
43 | * using cpumask_var_t directly because the actual type of | 43 | * using cpumask_var_t directly because the actual type of |
44 | * cpumsak_var_t depends on kernel configuration(particularly on | 44 | * cpumsak_var_t depends on kernel configuration(particularly on |
45 | * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration | 45 | * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration |
46 | * cpumask_var_t may be either a pointer to the struct cpumask | 46 | * cpumask_var_t may be either a pointer to the struct cpumask |
47 | * or a variable allocated on the stack. Thus we can not safely use | 47 | * or a variable allocated on the stack. Thus we can not safely use |
48 | * cpumask_var_t with RCU operations such as rcu_assign_pointer or | 48 | * cpumask_var_t with RCU operations such as rcu_assign_pointer or |
49 | * rcu_dereference. So cpumask_var_t is wrapped with struct | 49 | * rcu_dereference. So cpumask_var_t is wrapped with struct |
50 | * pcrypt_cpumask which makes possible to use it with RCU. | 50 | * pcrypt_cpumask which makes possible to use it with RCU. |
51 | */ | 51 | */ |
52 | struct pcrypt_cpumask { | 52 | struct pcrypt_cpumask { |
53 | cpumask_var_t mask; | 53 | cpumask_var_t mask; |
54 | } *cb_cpumask; | 54 | } *cb_cpumask; |
55 | struct notifier_block nblock; | 55 | struct notifier_block nblock; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static struct padata_pcrypt pencrypt; | 58 | static struct padata_pcrypt pencrypt; |
59 | static struct padata_pcrypt pdecrypt; | 59 | static struct padata_pcrypt pdecrypt; |
60 | static struct kset *pcrypt_kset; | 60 | static struct kset *pcrypt_kset; |
61 | 61 | ||
62 | struct pcrypt_instance_ctx { | 62 | struct pcrypt_instance_ctx { |
63 | struct crypto_spawn spawn; | 63 | struct crypto_spawn spawn; |
64 | unsigned int tfm_count; | 64 | unsigned int tfm_count; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct pcrypt_aead_ctx { | 67 | struct pcrypt_aead_ctx { |
68 | struct crypto_aead *child; | 68 | struct crypto_aead *child; |
69 | unsigned int cb_cpu; | 69 | unsigned int cb_cpu; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, | 72 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, |
73 | struct padata_pcrypt *pcrypt) | 73 | struct padata_pcrypt *pcrypt) |
74 | { | 74 | { |
75 | unsigned int cpu_index, cpu, i; | 75 | unsigned int cpu_index, cpu, i; |
76 | struct pcrypt_cpumask *cpumask; | 76 | struct pcrypt_cpumask *cpumask; |
77 | 77 | ||
78 | cpu = *cb_cpu; | 78 | cpu = *cb_cpu; |
79 | 79 | ||
80 | rcu_read_lock_bh(); | 80 | rcu_read_lock_bh(); |
81 | cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); | 81 | cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); |
82 | if (cpumask_test_cpu(cpu, cpumask->mask)) | 82 | if (cpumask_test_cpu(cpu, cpumask->mask)) |
83 | goto out; | 83 | goto out; |
84 | 84 | ||
85 | if (!cpumask_weight(cpumask->mask)) | 85 | if (!cpumask_weight(cpumask->mask)) |
86 | goto out; | 86 | goto out; |
87 | 87 | ||
88 | cpu_index = cpu % cpumask_weight(cpumask->mask); | 88 | cpu_index = cpu % cpumask_weight(cpumask->mask); |
89 | 89 | ||
90 | cpu = cpumask_first(cpumask->mask); | 90 | cpu = cpumask_first(cpumask->mask); |
91 | for (i = 0; i < cpu_index; i++) | 91 | for (i = 0; i < cpu_index; i++) |
92 | cpu = cpumask_next(cpu, cpumask->mask); | 92 | cpu = cpumask_next(cpu, cpumask->mask); |
93 | 93 | ||
94 | *cb_cpu = cpu; | 94 | *cb_cpu = cpu; |
95 | 95 | ||
96 | out: | 96 | out: |
97 | rcu_read_unlock_bh(); | 97 | rcu_read_unlock_bh(); |
98 | return padata_do_parallel(pcrypt->pinst, padata, cpu); | 98 | return padata_do_parallel(pcrypt->pinst, padata, cpu); |
99 | } | 99 | } |
100 | 100 | ||
101 | static int pcrypt_aead_setkey(struct crypto_aead *parent, | 101 | static int pcrypt_aead_setkey(struct crypto_aead *parent, |
102 | const u8 *key, unsigned int keylen) | 102 | const u8 *key, unsigned int keylen) |
103 | { | 103 | { |
104 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); | 104 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); |
105 | 105 | ||
106 | return crypto_aead_setkey(ctx->child, key, keylen); | 106 | return crypto_aead_setkey(ctx->child, key, keylen); |
107 | } | 107 | } |
108 | 108 | ||
109 | static int pcrypt_aead_setauthsize(struct crypto_aead *parent, | 109 | static int pcrypt_aead_setauthsize(struct crypto_aead *parent, |
110 | unsigned int authsize) | 110 | unsigned int authsize) |
111 | { | 111 | { |
112 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); | 112 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); |
113 | 113 | ||
114 | return crypto_aead_setauthsize(ctx->child, authsize); | 114 | return crypto_aead_setauthsize(ctx->child, authsize); |
115 | } | 115 | } |
116 | 116 | ||
117 | static void pcrypt_aead_serial(struct padata_priv *padata) | 117 | static void pcrypt_aead_serial(struct padata_priv *padata) |
118 | { | 118 | { |
119 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 119 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
120 | struct aead_request *req = pcrypt_request_ctx(preq); | 120 | struct aead_request *req = pcrypt_request_ctx(preq); |
121 | 121 | ||
122 | aead_request_complete(req->base.data, padata->info); | 122 | aead_request_complete(req->base.data, padata->info); |
123 | } | 123 | } |
124 | 124 | ||
125 | static void pcrypt_aead_giv_serial(struct padata_priv *padata) | 125 | static void pcrypt_aead_giv_serial(struct padata_priv *padata) |
126 | { | 126 | { |
127 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 127 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
128 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | 128 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); |
129 | 129 | ||
130 | aead_request_complete(req->areq.base.data, padata->info); | 130 | aead_request_complete(req->areq.base.data, padata->info); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void pcrypt_aead_done(struct crypto_async_request *areq, int err) | 133 | static void pcrypt_aead_done(struct crypto_async_request *areq, int err) |
134 | { | 134 | { |
135 | struct aead_request *req = areq->data; | 135 | struct aead_request *req = areq->data; |
136 | struct pcrypt_request *preq = aead_request_ctx(req); | 136 | struct pcrypt_request *preq = aead_request_ctx(req); |
137 | struct padata_priv *padata = pcrypt_request_padata(preq); | 137 | struct padata_priv *padata = pcrypt_request_padata(preq); |
138 | 138 | ||
139 | padata->info = err; | 139 | padata->info = err; |
140 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 140 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
141 | 141 | ||
142 | padata_do_serial(padata); | 142 | padata_do_serial(padata); |
143 | } | 143 | } |
144 | 144 | ||
145 | static void pcrypt_aead_enc(struct padata_priv *padata) | 145 | static void pcrypt_aead_enc(struct padata_priv *padata) |
146 | { | 146 | { |
147 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 147 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
148 | struct aead_request *req = pcrypt_request_ctx(preq); | 148 | struct aead_request *req = pcrypt_request_ctx(preq); |
149 | 149 | ||
150 | padata->info = crypto_aead_encrypt(req); | 150 | padata->info = crypto_aead_encrypt(req); |
151 | 151 | ||
152 | if (padata->info == -EINPROGRESS) | 152 | if (padata->info == -EINPROGRESS) |
153 | return; | 153 | return; |
154 | 154 | ||
155 | padata_do_serial(padata); | 155 | padata_do_serial(padata); |
156 | } | 156 | } |
157 | 157 | ||
158 | static int pcrypt_aead_encrypt(struct aead_request *req) | 158 | static int pcrypt_aead_encrypt(struct aead_request *req) |
159 | { | 159 | { |
160 | int err; | 160 | int err; |
161 | struct pcrypt_request *preq = aead_request_ctx(req); | 161 | struct pcrypt_request *preq = aead_request_ctx(req); |
162 | struct aead_request *creq = pcrypt_request_ctx(preq); | 162 | struct aead_request *creq = pcrypt_request_ctx(preq); |
163 | struct padata_priv *padata = pcrypt_request_padata(preq); | 163 | struct padata_priv *padata = pcrypt_request_padata(preq); |
164 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 164 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
165 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | 165 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); |
166 | u32 flags = aead_request_flags(req); | 166 | u32 flags = aead_request_flags(req); |
167 | 167 | ||
168 | memset(padata, 0, sizeof(struct padata_priv)); | 168 | memset(padata, 0, sizeof(struct padata_priv)); |
169 | 169 | ||
170 | padata->parallel = pcrypt_aead_enc; | 170 | padata->parallel = pcrypt_aead_enc; |
171 | padata->serial = pcrypt_aead_serial; | 171 | padata->serial = pcrypt_aead_serial; |
172 | 172 | ||
173 | aead_request_set_tfm(creq, ctx->child); | 173 | aead_request_set_tfm(creq, ctx->child); |
174 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | 174 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
175 | pcrypt_aead_done, req); | 175 | pcrypt_aead_done, req); |
176 | aead_request_set_crypt(creq, req->src, req->dst, | 176 | aead_request_set_crypt(creq, req->src, req->dst, |
177 | req->cryptlen, req->iv); | 177 | req->cryptlen, req->iv); |
178 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 178 | aead_request_set_assoc(creq, req->assoc, req->assoclen); |
179 | 179 | ||
180 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); | 180 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
181 | if (!err) | 181 | if (!err) |
182 | return -EINPROGRESS; | 182 | return -EINPROGRESS; |
183 | 183 | ||
184 | return err; | 184 | return err; |
185 | } | 185 | } |
186 | 186 | ||
187 | static void pcrypt_aead_dec(struct padata_priv *padata) | 187 | static void pcrypt_aead_dec(struct padata_priv *padata) |
188 | { | 188 | { |
189 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 189 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
190 | struct aead_request *req = pcrypt_request_ctx(preq); | 190 | struct aead_request *req = pcrypt_request_ctx(preq); |
191 | 191 | ||
192 | padata->info = crypto_aead_decrypt(req); | 192 | padata->info = crypto_aead_decrypt(req); |
193 | 193 | ||
194 | if (padata->info == -EINPROGRESS) | 194 | if (padata->info == -EINPROGRESS) |
195 | return; | 195 | return; |
196 | 196 | ||
197 | padata_do_serial(padata); | 197 | padata_do_serial(padata); |
198 | } | 198 | } |
199 | 199 | ||
200 | static int pcrypt_aead_decrypt(struct aead_request *req) | 200 | static int pcrypt_aead_decrypt(struct aead_request *req) |
201 | { | 201 | { |
202 | int err; | 202 | int err; |
203 | struct pcrypt_request *preq = aead_request_ctx(req); | 203 | struct pcrypt_request *preq = aead_request_ctx(req); |
204 | struct aead_request *creq = pcrypt_request_ctx(preq); | 204 | struct aead_request *creq = pcrypt_request_ctx(preq); |
205 | struct padata_priv *padata = pcrypt_request_padata(preq); | 205 | struct padata_priv *padata = pcrypt_request_padata(preq); |
206 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 206 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
207 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | 207 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); |
208 | u32 flags = aead_request_flags(req); | 208 | u32 flags = aead_request_flags(req); |
209 | 209 | ||
210 | memset(padata, 0, sizeof(struct padata_priv)); | 210 | memset(padata, 0, sizeof(struct padata_priv)); |
211 | 211 | ||
212 | padata->parallel = pcrypt_aead_dec; | 212 | padata->parallel = pcrypt_aead_dec; |
213 | padata->serial = pcrypt_aead_serial; | 213 | padata->serial = pcrypt_aead_serial; |
214 | 214 | ||
215 | aead_request_set_tfm(creq, ctx->child); | 215 | aead_request_set_tfm(creq, ctx->child); |
216 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | 216 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
217 | pcrypt_aead_done, req); | 217 | pcrypt_aead_done, req); |
218 | aead_request_set_crypt(creq, req->src, req->dst, | 218 | aead_request_set_crypt(creq, req->src, req->dst, |
219 | req->cryptlen, req->iv); | 219 | req->cryptlen, req->iv); |
220 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 220 | aead_request_set_assoc(creq, req->assoc, req->assoclen); |
221 | 221 | ||
222 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); | 222 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); |
223 | if (!err) | 223 | if (!err) |
224 | return -EINPROGRESS; | 224 | return -EINPROGRESS; |
225 | 225 | ||
226 | return err; | 226 | return err; |
227 | } | 227 | } |
228 | 228 | ||
229 | static void pcrypt_aead_givenc(struct padata_priv *padata) | 229 | static void pcrypt_aead_givenc(struct padata_priv *padata) |
230 | { | 230 | { |
231 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | 231 | struct pcrypt_request *preq = pcrypt_padata_request(padata); |
232 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | 232 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); |
233 | 233 | ||
234 | padata->info = crypto_aead_givencrypt(req); | 234 | padata->info = crypto_aead_givencrypt(req); |
235 | 235 | ||
236 | if (padata->info == -EINPROGRESS) | 236 | if (padata->info == -EINPROGRESS) |
237 | return; | 237 | return; |
238 | 238 | ||
239 | padata_do_serial(padata); | 239 | padata_do_serial(padata); |
240 | } | 240 | } |
241 | 241 | ||
242 | static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) | 242 | static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) |
243 | { | 243 | { |
244 | int err; | 244 | int err; |
245 | struct aead_request *areq = &req->areq; | 245 | struct aead_request *areq = &req->areq; |
246 | struct pcrypt_request *preq = aead_request_ctx(areq); | 246 | struct pcrypt_request *preq = aead_request_ctx(areq); |
247 | struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); | 247 | struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); |
248 | struct padata_priv *padata = pcrypt_request_padata(preq); | 248 | struct padata_priv *padata = pcrypt_request_padata(preq); |
249 | struct crypto_aead *aead = aead_givcrypt_reqtfm(req); | 249 | struct crypto_aead *aead = aead_givcrypt_reqtfm(req); |
250 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | 250 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); |
251 | u32 flags = aead_request_flags(areq); | 251 | u32 flags = aead_request_flags(areq); |
252 | 252 | ||
253 | memset(padata, 0, sizeof(struct padata_priv)); | 253 | memset(padata, 0, sizeof(struct padata_priv)); |
254 | 254 | ||
255 | padata->parallel = pcrypt_aead_givenc; | 255 | padata->parallel = pcrypt_aead_givenc; |
256 | padata->serial = pcrypt_aead_giv_serial; | 256 | padata->serial = pcrypt_aead_giv_serial; |
257 | 257 | ||
258 | aead_givcrypt_set_tfm(creq, ctx->child); | 258 | aead_givcrypt_set_tfm(creq, ctx->child); |
259 | aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | 259 | aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
260 | pcrypt_aead_done, areq); | 260 | pcrypt_aead_done, areq); |
261 | aead_givcrypt_set_crypt(creq, areq->src, areq->dst, | 261 | aead_givcrypt_set_crypt(creq, areq->src, areq->dst, |
262 | areq->cryptlen, areq->iv); | 262 | areq->cryptlen, areq->iv); |
263 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); | 263 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); |
264 | aead_givcrypt_set_giv(creq, req->giv, req->seq); | 264 | aead_givcrypt_set_giv(creq, req->giv, req->seq); |
265 | 265 | ||
266 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); | 266 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
267 | if (!err) | 267 | if (!err) |
268 | return -EINPROGRESS; | 268 | return -EINPROGRESS; |
269 | 269 | ||
270 | return err; | 270 | return err; |
271 | } | 271 | } |
272 | 272 | ||
273 | static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) | 273 | static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) |
274 | { | 274 | { |
275 | int cpu, cpu_index; | 275 | int cpu, cpu_index; |
276 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 276 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
277 | struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); | 277 | struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); |
278 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 278 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
279 | struct crypto_aead *cipher; | 279 | struct crypto_aead *cipher; |
280 | 280 | ||
281 | ictx->tfm_count++; | 281 | ictx->tfm_count++; |
282 | 282 | ||
283 | cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask); | 283 | cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask); |
284 | 284 | ||
285 | ctx->cb_cpu = cpumask_first(cpu_online_mask); | 285 | ctx->cb_cpu = cpumask_first(cpu_online_mask); |
286 | for (cpu = 0; cpu < cpu_index; cpu++) | 286 | for (cpu = 0; cpu < cpu_index; cpu++) |
287 | ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); | 287 | ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); |
288 | 288 | ||
289 | cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); | 289 | cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); |
290 | 290 | ||
291 | if (IS_ERR(cipher)) | 291 | if (IS_ERR(cipher)) |
292 | return PTR_ERR(cipher); | 292 | return PTR_ERR(cipher); |
293 | 293 | ||
294 | ctx->child = cipher; | 294 | ctx->child = cipher; |
295 | tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) | 295 | tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) |
296 | + sizeof(struct aead_givcrypt_request) | 296 | + sizeof(struct aead_givcrypt_request) |
297 | + crypto_aead_reqsize(cipher); | 297 | + crypto_aead_reqsize(cipher); |
298 | 298 | ||
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
301 | 301 | ||
302 | static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) | 302 | static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) |
303 | { | 303 | { |
304 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 304 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
305 | 305 | ||
306 | crypto_free_aead(ctx->child); | 306 | crypto_free_aead(ctx->child); |
307 | } | 307 | } |
308 | 308 | ||
309 | static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) | 309 | static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) |
310 | { | 310 | { |
311 | struct crypto_instance *inst; | 311 | struct crypto_instance *inst; |
312 | struct pcrypt_instance_ctx *ctx; | 312 | struct pcrypt_instance_ctx *ctx; |
313 | int err; | 313 | int err; |
314 | 314 | ||
315 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 315 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
316 | if (!inst) { | 316 | if (!inst) { |
317 | inst = ERR_PTR(-ENOMEM); | 317 | inst = ERR_PTR(-ENOMEM); |
318 | goto out; | 318 | goto out; |
319 | } | 319 | } |
320 | 320 | ||
321 | err = -ENAMETOOLONG; | 321 | err = -ENAMETOOLONG; |
322 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 322 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
323 | "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 323 | "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
324 | goto out_free_inst; | 324 | goto out_free_inst; |
325 | 325 | ||
326 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 326 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
327 | 327 | ||
328 | ctx = crypto_instance_ctx(inst); | 328 | ctx = crypto_instance_ctx(inst); |
329 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | 329 | err = crypto_init_spawn(&ctx->spawn, alg, inst, |
330 | CRYPTO_ALG_TYPE_MASK); | 330 | CRYPTO_ALG_TYPE_MASK); |
331 | if (err) | 331 | if (err) |
332 | goto out_free_inst; | 332 | goto out_free_inst; |
333 | 333 | ||
334 | inst->alg.cra_priority = alg->cra_priority + 100; | 334 | inst->alg.cra_priority = alg->cra_priority + 100; |
335 | inst->alg.cra_blocksize = alg->cra_blocksize; | 335 | inst->alg.cra_blocksize = alg->cra_blocksize; |
336 | inst->alg.cra_alignmask = alg->cra_alignmask; | 336 | inst->alg.cra_alignmask = alg->cra_alignmask; |
337 | 337 | ||
338 | out: | 338 | out: |
339 | return inst; | 339 | return inst; |
340 | 340 | ||
341 | out_free_inst: | 341 | out_free_inst: |
342 | kfree(inst); | 342 | kfree(inst); |
343 | inst = ERR_PTR(err); | 343 | inst = ERR_PTR(err); |
344 | goto out; | 344 | goto out; |
345 | } | 345 | } |
346 | 346 | ||
347 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, | 347 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, |
348 | u32 type, u32 mask) | 348 | u32 type, u32 mask) |
349 | { | 349 | { |
350 | struct crypto_instance *inst; | 350 | struct crypto_instance *inst; |
351 | struct crypto_alg *alg; | 351 | struct crypto_alg *alg; |
352 | 352 | ||
353 | alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); | 353 | alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); |
354 | if (IS_ERR(alg)) | 354 | if (IS_ERR(alg)) |
355 | return ERR_CAST(alg); | 355 | return ERR_CAST(alg); |
356 | 356 | ||
357 | inst = pcrypt_alloc_instance(alg); | 357 | inst = pcrypt_alloc_instance(alg); |
358 | if (IS_ERR(inst)) | 358 | if (IS_ERR(inst)) |
359 | goto out_put_alg; | 359 | goto out_put_alg; |
360 | 360 | ||
361 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | 361 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; |
362 | inst->alg.cra_type = &crypto_aead_type; | 362 | inst->alg.cra_type = &crypto_aead_type; |
363 | 363 | ||
364 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | 364 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; |
365 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | 365 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; |
366 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | 366 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; |
367 | 367 | ||
368 | inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); | 368 | inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); |
369 | 369 | ||
370 | inst->alg.cra_init = pcrypt_aead_init_tfm; | 370 | inst->alg.cra_init = pcrypt_aead_init_tfm; |
371 | inst->alg.cra_exit = pcrypt_aead_exit_tfm; | 371 | inst->alg.cra_exit = pcrypt_aead_exit_tfm; |
372 | 372 | ||
373 | inst->alg.cra_aead.setkey = pcrypt_aead_setkey; | 373 | inst->alg.cra_aead.setkey = pcrypt_aead_setkey; |
374 | inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; | 374 | inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; |
375 | inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; | 375 | inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; |
376 | inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; | 376 | inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; |
377 | inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; | 377 | inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; |
378 | 378 | ||
379 | out_put_alg: | 379 | out_put_alg: |
380 | crypto_mod_put(alg); | 380 | crypto_mod_put(alg); |
381 | return inst; | 381 | return inst; |
382 | } | 382 | } |
383 | 383 | ||
384 | static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) | 384 | static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) |
385 | { | 385 | { |
386 | struct crypto_attr_type *algt; | 386 | struct crypto_attr_type *algt; |
387 | 387 | ||
388 | algt = crypto_get_attr_type(tb); | 388 | algt = crypto_get_attr_type(tb); |
389 | if (IS_ERR(algt)) | 389 | if (IS_ERR(algt)) |
390 | return ERR_CAST(algt); | 390 | return ERR_CAST(algt); |
391 | 391 | ||
392 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 392 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
393 | case CRYPTO_ALG_TYPE_AEAD: | 393 | case CRYPTO_ALG_TYPE_AEAD: |
394 | return pcrypt_alloc_aead(tb, algt->type, algt->mask); | 394 | return pcrypt_alloc_aead(tb, algt->type, algt->mask); |
395 | } | 395 | } |
396 | 396 | ||
397 | return ERR_PTR(-EINVAL); | 397 | return ERR_PTR(-EINVAL); |
398 | } | 398 | } |
399 | 399 | ||
400 | static void pcrypt_free(struct crypto_instance *inst) | 400 | static void pcrypt_free(struct crypto_instance *inst) |
401 | { | 401 | { |
402 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); | 402 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); |
403 | 403 | ||
404 | crypto_drop_spawn(&ctx->spawn); | 404 | crypto_drop_spawn(&ctx->spawn); |
405 | kfree(inst); | 405 | kfree(inst); |
406 | } | 406 | } |
407 | 407 | ||
408 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, | 408 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, |
409 | unsigned long val, void *data) | 409 | unsigned long val, void *data) |
410 | { | 410 | { |
411 | struct padata_pcrypt *pcrypt; | 411 | struct padata_pcrypt *pcrypt; |
412 | struct pcrypt_cpumask *new_mask, *old_mask; | 412 | struct pcrypt_cpumask *new_mask, *old_mask; |
413 | struct padata_cpumask *cpumask = (struct padata_cpumask *)data; | 413 | struct padata_cpumask *cpumask = (struct padata_cpumask *)data; |
414 | 414 | ||
415 | if (!(val & PADATA_CPU_SERIAL)) | 415 | if (!(val & PADATA_CPU_SERIAL)) |
416 | return 0; | 416 | return 0; |
417 | 417 | ||
418 | pcrypt = container_of(self, struct padata_pcrypt, nblock); | 418 | pcrypt = container_of(self, struct padata_pcrypt, nblock); |
419 | new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); | 419 | new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); |
420 | if (!new_mask) | 420 | if (!new_mask) |
421 | return -ENOMEM; | 421 | return -ENOMEM; |
422 | if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { | 422 | if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { |
423 | kfree(new_mask); | 423 | kfree(new_mask); |
424 | return -ENOMEM; | 424 | return -ENOMEM; |
425 | } | 425 | } |
426 | 426 | ||
427 | old_mask = pcrypt->cb_cpumask; | 427 | old_mask = pcrypt->cb_cpumask; |
428 | 428 | ||
429 | cpumask_copy(new_mask->mask, cpumask->cbcpu); | 429 | cpumask_copy(new_mask->mask, cpumask->cbcpu); |
430 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); | 430 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); |
431 | synchronize_rcu_bh(); | 431 | synchronize_rcu_bh(); |
432 | 432 | ||
433 | free_cpumask_var(old_mask->mask); | 433 | free_cpumask_var(old_mask->mask); |
434 | kfree(old_mask); | 434 | kfree(old_mask); |
435 | return 0; | 435 | return 0; |
436 | } | 436 | } |
437 | 437 | ||
438 | static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) | 438 | static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) |
439 | { | 439 | { |
440 | int ret; | 440 | int ret; |
441 | 441 | ||
442 | pinst->kobj.kset = pcrypt_kset; | 442 | pinst->kobj.kset = pcrypt_kset; |
443 | ret = kobject_add(&pinst->kobj, NULL, name); | 443 | ret = kobject_add(&pinst->kobj, NULL, name); |
444 | if (!ret) | 444 | if (!ret) |
445 | kobject_uevent(&pinst->kobj, KOBJ_ADD); | 445 | kobject_uevent(&pinst->kobj, KOBJ_ADD); |
446 | 446 | ||
447 | return ret; | 447 | return ret; |
448 | } | 448 | } |
449 | 449 | ||
450 | static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, | 450 | static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, |
451 | const char *name) | 451 | const char *name) |
452 | { | 452 | { |
453 | int ret = -ENOMEM; | 453 | int ret = -ENOMEM; |
454 | struct pcrypt_cpumask *mask; | 454 | struct pcrypt_cpumask *mask; |
455 | 455 | ||
456 | get_online_cpus(); | 456 | get_online_cpus(); |
457 | 457 | ||
458 | pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, | 458 | pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, |
459 | 1, name); | 459 | 1, name); |
460 | if (!pcrypt->wq) | 460 | if (!pcrypt->wq) |
461 | goto err; | 461 | goto err; |
462 | 462 | ||
463 | pcrypt->pinst = padata_alloc_possible(pcrypt->wq); | 463 | pcrypt->pinst = padata_alloc_possible(pcrypt->wq); |
464 | if (!pcrypt->pinst) | 464 | if (!pcrypt->pinst) |
465 | goto err_destroy_workqueue; | 465 | goto err_destroy_workqueue; |
466 | 466 | ||
467 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | 467 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); |
468 | if (!mask) | 468 | if (!mask) |
469 | goto err_free_padata; | 469 | goto err_free_padata; |
470 | if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { | 470 | if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { |
471 | kfree(mask); | 471 | kfree(mask); |
472 | goto err_free_padata; | 472 | goto err_free_padata; |
473 | } | 473 | } |
474 | 474 | ||
475 | cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); | 475 | cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); |
476 | rcu_assign_pointer(pcrypt->cb_cpumask, mask); | 476 | rcu_assign_pointer(pcrypt->cb_cpumask, mask); |
477 | 477 | ||
478 | pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; | 478 | pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; |
479 | ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | 479 | ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); |
480 | if (ret) | 480 | if (ret) |
481 | goto err_free_cpumask; | 481 | goto err_free_cpumask; |
482 | 482 | ||
483 | ret = pcrypt_sysfs_add(pcrypt->pinst, name); | 483 | ret = pcrypt_sysfs_add(pcrypt->pinst, name); |
484 | if (ret) | 484 | if (ret) |
485 | goto err_unregister_notifier; | 485 | goto err_unregister_notifier; |
486 | 486 | ||
487 | put_online_cpus(); | 487 | put_online_cpus(); |
488 | 488 | ||
489 | return ret; | 489 | return ret; |
490 | 490 | ||
491 | err_unregister_notifier: | 491 | err_unregister_notifier: |
492 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | 492 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); |
493 | err_free_cpumask: | 493 | err_free_cpumask: |
494 | free_cpumask_var(mask->mask); | 494 | free_cpumask_var(mask->mask); |
495 | kfree(mask); | 495 | kfree(mask); |
496 | err_free_padata: | 496 | err_free_padata: |
497 | padata_free(pcrypt->pinst); | 497 | padata_free(pcrypt->pinst); |
498 | err_destroy_workqueue: | 498 | err_destroy_workqueue: |
499 | destroy_workqueue(pcrypt->wq); | 499 | destroy_workqueue(pcrypt->wq); |
500 | err: | 500 | err: |
501 | put_online_cpus(); | 501 | put_online_cpus(); |
502 | 502 | ||
503 | return ret; | 503 | return ret; |
504 | } | 504 | } |
505 | 505 | ||
506 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | 506 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) |
507 | { | 507 | { |
508 | free_cpumask_var(pcrypt->cb_cpumask->mask); | 508 | free_cpumask_var(pcrypt->cb_cpumask->mask); |
509 | kfree(pcrypt->cb_cpumask); | 509 | kfree(pcrypt->cb_cpumask); |
510 | 510 | ||
511 | padata_stop(pcrypt->pinst); | 511 | padata_stop(pcrypt->pinst); |
512 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | 512 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); |
513 | destroy_workqueue(pcrypt->wq); | 513 | destroy_workqueue(pcrypt->wq); |
514 | padata_free(pcrypt->pinst); | 514 | padata_free(pcrypt->pinst); |
515 | } | 515 | } |
516 | 516 | ||
517 | static struct crypto_template pcrypt_tmpl = { | 517 | static struct crypto_template pcrypt_tmpl = { |
518 | .name = "pcrypt", | 518 | .name = "pcrypt", |
519 | .alloc = pcrypt_alloc, | 519 | .alloc = pcrypt_alloc, |
520 | .free = pcrypt_free, | 520 | .free = pcrypt_free, |
521 | .module = THIS_MODULE, | 521 | .module = THIS_MODULE, |
522 | }; | 522 | }; |
523 | 523 | ||
524 | static int __init pcrypt_init(void) | 524 | static int __init pcrypt_init(void) |
525 | { | 525 | { |
526 | int err = -ENOMEM; | 526 | int err = -ENOMEM; |
527 | 527 | ||
528 | pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); | 528 | pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); |
529 | if (!pcrypt_kset) | 529 | if (!pcrypt_kset) |
530 | goto err; | 530 | goto err; |
531 | 531 | ||
532 | err = pcrypt_init_padata(&pencrypt, "pencrypt"); | 532 | err = pcrypt_init_padata(&pencrypt, "pencrypt"); |
533 | if (err) | 533 | if (err) |
534 | goto err_unreg_kset; | 534 | goto err_unreg_kset; |
535 | 535 | ||
536 | err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); | 536 | err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); |
537 | if (err) | 537 | if (err) |
538 | goto err_deinit_pencrypt; | 538 | goto err_deinit_pencrypt; |
539 | 539 | ||
540 | padata_start(pencrypt.pinst); | 540 | padata_start(pencrypt.pinst); |
541 | padata_start(pdecrypt.pinst); | 541 | padata_start(pdecrypt.pinst); |
542 | 542 | ||
543 | return crypto_register_template(&pcrypt_tmpl); | 543 | return crypto_register_template(&pcrypt_tmpl); |
544 | 544 | ||
545 | err_deinit_pencrypt: | 545 | err_deinit_pencrypt: |
546 | pcrypt_fini_padata(&pencrypt); | 546 | pcrypt_fini_padata(&pencrypt); |
547 | err_unreg_kset: | 547 | err_unreg_kset: |
548 | kset_unregister(pcrypt_kset); | 548 | kset_unregister(pcrypt_kset); |
549 | err: | 549 | err: |
550 | return err; | 550 | return err; |
551 | } | 551 | } |
552 | 552 | ||
553 | static void __exit pcrypt_exit(void) | 553 | static void __exit pcrypt_exit(void) |
554 | { | 554 | { |
555 | pcrypt_fini_padata(&pencrypt); | 555 | pcrypt_fini_padata(&pencrypt); |
556 | pcrypt_fini_padata(&pdecrypt); | 556 | pcrypt_fini_padata(&pdecrypt); |
557 | 557 | ||
558 | kset_unregister(pcrypt_kset); | 558 | kset_unregister(pcrypt_kset); |
559 | crypto_unregister_template(&pcrypt_tmpl); | 559 | crypto_unregister_template(&pcrypt_tmpl); |
560 | } | 560 | } |
561 | 561 | ||
562 | module_init(pcrypt_init); | 562 | module_init(pcrypt_init); |
563 | module_exit(pcrypt_exit); | 563 | module_exit(pcrypt_exit); |
564 | 564 | ||
565 | MODULE_LICENSE("GPL"); | 565 | MODULE_LICENSE("GPL"); |
566 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); | 566 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); |
567 | MODULE_DESCRIPTION("Parallel crypto wrapper"); | 567 | MODULE_DESCRIPTION("Parallel crypto wrapper"); |
568 | MODULE_ALIAS_CRYPTO("pcrypt"); | ||
568 | 569 |
crypto/seqiv.c
1 | /* | 1 | /* |
2 | * seqiv: Sequence Number IV Generator | 2 | * seqiv: Sequence Number IV Generator |
3 | * | 3 | * |
4 | * This generator generates an IV based on a sequence number by xoring it | 4 | * This generator generates an IV based on a sequence number by xoring it |
5 | * with a salt. This algorithm is mainly useful for CTR and similar modes. | 5 | * with a salt. This algorithm is mainly useful for CTR and similar modes. |
6 | * | 6 | * |
7 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) | 11 | * Software Foundation; either version 2 of the License, or (at your option) |
12 | * any later version. | 12 | * any later version. |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <crypto/internal/aead.h> | 16 | #include <crypto/internal/aead.h> |
17 | #include <crypto/internal/skcipher.h> | 17 | #include <crypto/internal/skcipher.h> |
18 | #include <crypto/rng.h> | 18 | #include <crypto/rng.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/string.h> | 25 | #include <linux/string.h> |
26 | 26 | ||
27 | struct seqiv_ctx { | 27 | struct seqiv_ctx { |
28 | spinlock_t lock; | 28 | spinlock_t lock; |
29 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | 29 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); |
30 | }; | 30 | }; |
31 | 31 | ||
32 | static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) | 32 | static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) |
33 | { | 33 | { |
34 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | 34 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
35 | struct crypto_ablkcipher *geniv; | 35 | struct crypto_ablkcipher *geniv; |
36 | 36 | ||
37 | if (err == -EINPROGRESS) | 37 | if (err == -EINPROGRESS) |
38 | return; | 38 | return; |
39 | 39 | ||
40 | if (err) | 40 | if (err) |
41 | goto out; | 41 | goto out; |
42 | 42 | ||
43 | geniv = skcipher_givcrypt_reqtfm(req); | 43 | geniv = skcipher_givcrypt_reqtfm(req); |
44 | memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv)); | 44 | memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv)); |
45 | 45 | ||
46 | out: | 46 | out: |
47 | kfree(subreq->info); | 47 | kfree(subreq->info); |
48 | } | 48 | } |
49 | 49 | ||
50 | static void seqiv_complete(struct crypto_async_request *base, int err) | 50 | static void seqiv_complete(struct crypto_async_request *base, int err) |
51 | { | 51 | { |
52 | struct skcipher_givcrypt_request *req = base->data; | 52 | struct skcipher_givcrypt_request *req = base->data; |
53 | 53 | ||
54 | seqiv_complete2(req, err); | 54 | seqiv_complete2(req, err); |
55 | skcipher_givcrypt_complete(req, err); | 55 | skcipher_givcrypt_complete(req, err); |
56 | } | 56 | } |
57 | 57 | ||
58 | static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err) | 58 | static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err) |
59 | { | 59 | { |
60 | struct aead_request *subreq = aead_givcrypt_reqctx(req); | 60 | struct aead_request *subreq = aead_givcrypt_reqctx(req); |
61 | struct crypto_aead *geniv; | 61 | struct crypto_aead *geniv; |
62 | 62 | ||
63 | if (err == -EINPROGRESS) | 63 | if (err == -EINPROGRESS) |
64 | return; | 64 | return; |
65 | 65 | ||
66 | if (err) | 66 | if (err) |
67 | goto out; | 67 | goto out; |
68 | 68 | ||
69 | geniv = aead_givcrypt_reqtfm(req); | 69 | geniv = aead_givcrypt_reqtfm(req); |
70 | memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv)); | 70 | memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv)); |
71 | 71 | ||
72 | out: | 72 | out: |
73 | kfree(subreq->iv); | 73 | kfree(subreq->iv); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void seqiv_aead_complete(struct crypto_async_request *base, int err) | 76 | static void seqiv_aead_complete(struct crypto_async_request *base, int err) |
77 | { | 77 | { |
78 | struct aead_givcrypt_request *req = base->data; | 78 | struct aead_givcrypt_request *req = base->data; |
79 | 79 | ||
80 | seqiv_aead_complete2(req, err); | 80 | seqiv_aead_complete2(req, err); |
81 | aead_givcrypt_complete(req, err); | 81 | aead_givcrypt_complete(req, err); |
82 | } | 82 | } |
83 | 83 | ||
84 | static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, | 84 | static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, |
85 | unsigned int ivsize) | 85 | unsigned int ivsize) |
86 | { | 86 | { |
87 | unsigned int len = ivsize; | 87 | unsigned int len = ivsize; |
88 | 88 | ||
89 | if (ivsize > sizeof(u64)) { | 89 | if (ivsize > sizeof(u64)) { |
90 | memset(info, 0, ivsize - sizeof(u64)); | 90 | memset(info, 0, ivsize - sizeof(u64)); |
91 | len = sizeof(u64); | 91 | len = sizeof(u64); |
92 | } | 92 | } |
93 | seq = cpu_to_be64(seq); | 93 | seq = cpu_to_be64(seq); |
94 | memcpy(info + ivsize - len, &seq, len); | 94 | memcpy(info + ivsize - len, &seq, len); |
95 | crypto_xor(info, ctx->salt, ivsize); | 95 | crypto_xor(info, ctx->salt, ivsize); |
96 | } | 96 | } |
97 | 97 | ||
98 | static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) | 98 | static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) |
99 | { | 99 | { |
100 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 100 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
101 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 101 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
102 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | 102 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
103 | crypto_completion_t compl; | 103 | crypto_completion_t compl; |
104 | void *data; | 104 | void *data; |
105 | u8 *info; | 105 | u8 *info; |
106 | unsigned int ivsize; | 106 | unsigned int ivsize; |
107 | int err; | 107 | int err; |
108 | 108 | ||
109 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); | 109 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); |
110 | 110 | ||
111 | compl = req->creq.base.complete; | 111 | compl = req->creq.base.complete; |
112 | data = req->creq.base.data; | 112 | data = req->creq.base.data; |
113 | info = req->creq.info; | 113 | info = req->creq.info; |
114 | 114 | ||
115 | ivsize = crypto_ablkcipher_ivsize(geniv); | 115 | ivsize = crypto_ablkcipher_ivsize(geniv); |
116 | 116 | ||
117 | if (unlikely(!IS_ALIGNED((unsigned long)info, | 117 | if (unlikely(!IS_ALIGNED((unsigned long)info, |
118 | crypto_ablkcipher_alignmask(geniv) + 1))) { | 118 | crypto_ablkcipher_alignmask(geniv) + 1))) { |
119 | info = kmalloc(ivsize, req->creq.base.flags & | 119 | info = kmalloc(ivsize, req->creq.base.flags & |
120 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | 120 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: |
121 | GFP_ATOMIC); | 121 | GFP_ATOMIC); |
122 | if (!info) | 122 | if (!info) |
123 | return -ENOMEM; | 123 | return -ENOMEM; |
124 | 124 | ||
125 | compl = seqiv_complete; | 125 | compl = seqiv_complete; |
126 | data = req; | 126 | data = req; |
127 | } | 127 | } |
128 | 128 | ||
129 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, | 129 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, |
130 | data); | 130 | data); |
131 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, | 131 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, |
132 | req->creq.nbytes, info); | 132 | req->creq.nbytes, info); |
133 | 133 | ||
134 | seqiv_geniv(ctx, info, req->seq, ivsize); | 134 | seqiv_geniv(ctx, info, req->seq, ivsize); |
135 | memcpy(req->giv, info, ivsize); | 135 | memcpy(req->giv, info, ivsize); |
136 | 136 | ||
137 | err = crypto_ablkcipher_encrypt(subreq); | 137 | err = crypto_ablkcipher_encrypt(subreq); |
138 | if (unlikely(info != req->creq.info)) | 138 | if (unlikely(info != req->creq.info)) |
139 | seqiv_complete2(req, err); | 139 | seqiv_complete2(req, err); |
140 | return err; | 140 | return err; |
141 | } | 141 | } |
142 | 142 | ||
143 | static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) | 143 | static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) |
144 | { | 144 | { |
145 | struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); | 145 | struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); |
146 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); | 146 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); |
147 | struct aead_request *areq = &req->areq; | 147 | struct aead_request *areq = &req->areq; |
148 | struct aead_request *subreq = aead_givcrypt_reqctx(req); | 148 | struct aead_request *subreq = aead_givcrypt_reqctx(req); |
149 | crypto_completion_t compl; | 149 | crypto_completion_t compl; |
150 | void *data; | 150 | void *data; |
151 | u8 *info; | 151 | u8 *info; |
152 | unsigned int ivsize; | 152 | unsigned int ivsize; |
153 | int err; | 153 | int err; |
154 | 154 | ||
155 | aead_request_set_tfm(subreq, aead_geniv_base(geniv)); | 155 | aead_request_set_tfm(subreq, aead_geniv_base(geniv)); |
156 | 156 | ||
157 | compl = areq->base.complete; | 157 | compl = areq->base.complete; |
158 | data = areq->base.data; | 158 | data = areq->base.data; |
159 | info = areq->iv; | 159 | info = areq->iv; |
160 | 160 | ||
161 | ivsize = crypto_aead_ivsize(geniv); | 161 | ivsize = crypto_aead_ivsize(geniv); |
162 | 162 | ||
163 | if (unlikely(!IS_ALIGNED((unsigned long)info, | 163 | if (unlikely(!IS_ALIGNED((unsigned long)info, |
164 | crypto_aead_alignmask(geniv) + 1))) { | 164 | crypto_aead_alignmask(geniv) + 1))) { |
165 | info = kmalloc(ivsize, areq->base.flags & | 165 | info = kmalloc(ivsize, areq->base.flags & |
166 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | 166 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: |
167 | GFP_ATOMIC); | 167 | GFP_ATOMIC); |
168 | if (!info) | 168 | if (!info) |
169 | return -ENOMEM; | 169 | return -ENOMEM; |
170 | 170 | ||
171 | compl = seqiv_aead_complete; | 171 | compl = seqiv_aead_complete; |
172 | data = req; | 172 | data = req; |
173 | } | 173 | } |
174 | 174 | ||
175 | aead_request_set_callback(subreq, areq->base.flags, compl, data); | 175 | aead_request_set_callback(subreq, areq->base.flags, compl, data); |
176 | aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen, | 176 | aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen, |
177 | info); | 177 | info); |
178 | aead_request_set_assoc(subreq, areq->assoc, areq->assoclen); | 178 | aead_request_set_assoc(subreq, areq->assoc, areq->assoclen); |
179 | 179 | ||
180 | seqiv_geniv(ctx, info, req->seq, ivsize); | 180 | seqiv_geniv(ctx, info, req->seq, ivsize); |
181 | memcpy(req->giv, info, ivsize); | 181 | memcpy(req->giv, info, ivsize); |
182 | 182 | ||
183 | err = crypto_aead_encrypt(subreq); | 183 | err = crypto_aead_encrypt(subreq); |
184 | if (unlikely(info != areq->iv)) | 184 | if (unlikely(info != areq->iv)) |
185 | seqiv_aead_complete2(req, err); | 185 | seqiv_aead_complete2(req, err); |
186 | return err; | 186 | return err; |
187 | } | 187 | } |
188 | 188 | ||
189 | static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) | 189 | static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) |
190 | { | 190 | { |
191 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 191 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
192 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 192 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
193 | int err = 0; | 193 | int err = 0; |
194 | 194 | ||
195 | spin_lock_bh(&ctx->lock); | 195 | spin_lock_bh(&ctx->lock); |
196 | if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first) | 196 | if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first) |
197 | goto unlock; | 197 | goto unlock; |
198 | 198 | ||
199 | crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; | 199 | crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; |
200 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | 200 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, |
201 | crypto_ablkcipher_ivsize(geniv)); | 201 | crypto_ablkcipher_ivsize(geniv)); |
202 | 202 | ||
203 | unlock: | 203 | unlock: |
204 | spin_unlock_bh(&ctx->lock); | 204 | spin_unlock_bh(&ctx->lock); |
205 | 205 | ||
206 | if (err) | 206 | if (err) |
207 | return err; | 207 | return err; |
208 | 208 | ||
209 | return seqiv_givencrypt(req); | 209 | return seqiv_givencrypt(req); |
210 | } | 210 | } |
211 | 211 | ||
212 | static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req) | 212 | static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req) |
213 | { | 213 | { |
214 | struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); | 214 | struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); |
215 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); | 215 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); |
216 | int err = 0; | 216 | int err = 0; |
217 | 217 | ||
218 | spin_lock_bh(&ctx->lock); | 218 | spin_lock_bh(&ctx->lock); |
219 | if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first) | 219 | if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first) |
220 | goto unlock; | 220 | goto unlock; |
221 | 221 | ||
222 | crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt; | 222 | crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt; |
223 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | 223 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, |
224 | crypto_aead_ivsize(geniv)); | 224 | crypto_aead_ivsize(geniv)); |
225 | 225 | ||
226 | unlock: | 226 | unlock: |
227 | spin_unlock_bh(&ctx->lock); | 227 | spin_unlock_bh(&ctx->lock); |
228 | 228 | ||
229 | if (err) | 229 | if (err) |
230 | return err; | 230 | return err; |
231 | 231 | ||
232 | return seqiv_aead_givencrypt(req); | 232 | return seqiv_aead_givencrypt(req); |
233 | } | 233 | } |
234 | 234 | ||
235 | static int seqiv_init(struct crypto_tfm *tfm) | 235 | static int seqiv_init(struct crypto_tfm *tfm) |
236 | { | 236 | { |
237 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); | 237 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); |
238 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 238 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
239 | 239 | ||
240 | spin_lock_init(&ctx->lock); | 240 | spin_lock_init(&ctx->lock); |
241 | 241 | ||
242 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); | 242 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); |
243 | 243 | ||
244 | return skcipher_geniv_init(tfm); | 244 | return skcipher_geniv_init(tfm); |
245 | } | 245 | } |
246 | 246 | ||
247 | static int seqiv_aead_init(struct crypto_tfm *tfm) | 247 | static int seqiv_aead_init(struct crypto_tfm *tfm) |
248 | { | 248 | { |
249 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | 249 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); |
250 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); | 250 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); |
251 | 251 | ||
252 | spin_lock_init(&ctx->lock); | 252 | spin_lock_init(&ctx->lock); |
253 | 253 | ||
254 | tfm->crt_aead.reqsize = sizeof(struct aead_request); | 254 | tfm->crt_aead.reqsize = sizeof(struct aead_request); |
255 | 255 | ||
256 | return aead_geniv_init(tfm); | 256 | return aead_geniv_init(tfm); |
257 | } | 257 | } |
258 | 258 | ||
259 | static struct crypto_template seqiv_tmpl; | 259 | static struct crypto_template seqiv_tmpl; |
260 | 260 | ||
261 | static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) | 261 | static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) |
262 | { | 262 | { |
263 | struct crypto_instance *inst; | 263 | struct crypto_instance *inst; |
264 | 264 | ||
265 | inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0); | 265 | inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0); |
266 | 266 | ||
267 | if (IS_ERR(inst)) | 267 | if (IS_ERR(inst)) |
268 | goto out; | 268 | goto out; |
269 | 269 | ||
270 | inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; | 270 | inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; |
271 | 271 | ||
272 | inst->alg.cra_init = seqiv_init; | 272 | inst->alg.cra_init = seqiv_init; |
273 | inst->alg.cra_exit = skcipher_geniv_exit; | 273 | inst->alg.cra_exit = skcipher_geniv_exit; |
274 | 274 | ||
275 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; | 275 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; |
276 | 276 | ||
277 | out: | 277 | out: |
278 | return inst; | 278 | return inst; |
279 | } | 279 | } |
280 | 280 | ||
281 | static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb) | 281 | static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb) |
282 | { | 282 | { |
283 | struct crypto_instance *inst; | 283 | struct crypto_instance *inst; |
284 | 284 | ||
285 | inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0); | 285 | inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0); |
286 | 286 | ||
287 | if (IS_ERR(inst)) | 287 | if (IS_ERR(inst)) |
288 | goto out; | 288 | goto out; |
289 | 289 | ||
290 | inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; | 290 | inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; |
291 | 291 | ||
292 | inst->alg.cra_init = seqiv_aead_init; | 292 | inst->alg.cra_init = seqiv_aead_init; |
293 | inst->alg.cra_exit = aead_geniv_exit; | 293 | inst->alg.cra_exit = aead_geniv_exit; |
294 | 294 | ||
295 | inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; | 295 | inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; |
296 | 296 | ||
297 | out: | 297 | out: |
298 | return inst; | 298 | return inst; |
299 | } | 299 | } |
300 | 300 | ||
301 | static struct crypto_instance *seqiv_alloc(struct rtattr **tb) | 301 | static struct crypto_instance *seqiv_alloc(struct rtattr **tb) |
302 | { | 302 | { |
303 | struct crypto_attr_type *algt; | 303 | struct crypto_attr_type *algt; |
304 | struct crypto_instance *inst; | 304 | struct crypto_instance *inst; |
305 | int err; | 305 | int err; |
306 | 306 | ||
307 | algt = crypto_get_attr_type(tb); | 307 | algt = crypto_get_attr_type(tb); |
308 | if (IS_ERR(algt)) | 308 | if (IS_ERR(algt)) |
309 | return ERR_CAST(algt); | 309 | return ERR_CAST(algt); |
310 | 310 | ||
311 | err = crypto_get_default_rng(); | 311 | err = crypto_get_default_rng(); |
312 | if (err) | 312 | if (err) |
313 | return ERR_PTR(err); | 313 | return ERR_PTR(err); |
314 | 314 | ||
315 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) | 315 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) |
316 | inst = seqiv_ablkcipher_alloc(tb); | 316 | inst = seqiv_ablkcipher_alloc(tb); |
317 | else | 317 | else |
318 | inst = seqiv_aead_alloc(tb); | 318 | inst = seqiv_aead_alloc(tb); |
319 | 319 | ||
320 | if (IS_ERR(inst)) | 320 | if (IS_ERR(inst)) |
321 | goto put_rng; | 321 | goto put_rng; |
322 | 322 | ||
323 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 323 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; |
324 | inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); | 324 | inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); |
325 | 325 | ||
326 | out: | 326 | out: |
327 | return inst; | 327 | return inst; |
328 | 328 | ||
329 | put_rng: | 329 | put_rng: |
330 | crypto_put_default_rng(); | 330 | crypto_put_default_rng(); |
331 | goto out; | 331 | goto out; |
332 | } | 332 | } |
333 | 333 | ||
334 | static void seqiv_free(struct crypto_instance *inst) | 334 | static void seqiv_free(struct crypto_instance *inst) |
335 | { | 335 | { |
336 | if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) | 336 | if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) |
337 | skcipher_geniv_free(inst); | 337 | skcipher_geniv_free(inst); |
338 | else | 338 | else |
339 | aead_geniv_free(inst); | 339 | aead_geniv_free(inst); |
340 | crypto_put_default_rng(); | 340 | crypto_put_default_rng(); |
341 | } | 341 | } |
342 | 342 | ||
343 | static struct crypto_template seqiv_tmpl = { | 343 | static struct crypto_template seqiv_tmpl = { |
344 | .name = "seqiv", | 344 | .name = "seqiv", |
345 | .alloc = seqiv_alloc, | 345 | .alloc = seqiv_alloc, |
346 | .free = seqiv_free, | 346 | .free = seqiv_free, |
347 | .module = THIS_MODULE, | 347 | .module = THIS_MODULE, |
348 | }; | 348 | }; |
349 | 349 | ||
350 | static int __init seqiv_module_init(void) | 350 | static int __init seqiv_module_init(void) |
351 | { | 351 | { |
352 | return crypto_register_template(&seqiv_tmpl); | 352 | return crypto_register_template(&seqiv_tmpl); |
353 | } | 353 | } |
354 | 354 | ||
355 | static void __exit seqiv_module_exit(void) | 355 | static void __exit seqiv_module_exit(void) |
356 | { | 356 | { |
357 | crypto_unregister_template(&seqiv_tmpl); | 357 | crypto_unregister_template(&seqiv_tmpl); |
358 | } | 358 | } |
359 | 359 | ||
360 | module_init(seqiv_module_init); | 360 | module_init(seqiv_module_init); |
361 | module_exit(seqiv_module_exit); | 361 | module_exit(seqiv_module_exit); |
362 | 362 | ||
363 | MODULE_LICENSE("GPL"); | 363 | MODULE_LICENSE("GPL"); |
364 | MODULE_DESCRIPTION("Sequence Number IV Generator"); | 364 | MODULE_DESCRIPTION("Sequence Number IV Generator"); |
365 | MODULE_ALIAS_CRYPTO("seqiv"); | ||
365 | 366 |
crypto/vmac.c
1 | /* | 1 | /* |
2 | * Modified to interface to the Linux kernel | 2 | * Modified to interface to the Linux kernel |
3 | * Copyright (c) 2009, Intel Corporation. | 3 | * Copyright (c) 2009, Intel Corporation. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms and conditions of the GNU General Public License, | 6 | * under the terms and conditions of the GNU General Public License, |
7 | * version 2, as published by the Free Software Foundation. | 7 | * version 2, as published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License along with | 14 | * You should have received a copy of the GNU General Public License along with |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 16 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* -------------------------------------------------------------------------- | 19 | /* -------------------------------------------------------------------------- |
20 | * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. | 20 | * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. |
21 | * This implementation is herby placed in the public domain. | 21 | * This implementation is herby placed in the public domain. |
22 | * The authors offers no warranty. Use at your own risk. | 22 | * The authors offers no warranty. Use at your own risk. |
23 | * Please send bug reports to the authors. | 23 | * Please send bug reports to the authors. |
24 | * Last modified: 17 APR 08, 1700 PDT | 24 | * Last modified: 17 APR 08, 1700 PDT |
25 | * ----------------------------------------------------------------------- */ | 25 | * ----------------------------------------------------------------------- */ |
26 | 26 | ||
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/crypto.h> | 29 | #include <linux/crypto.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
32 | #include <asm/byteorder.h> | 32 | #include <asm/byteorder.h> |
33 | #include <crypto/scatterwalk.h> | 33 | #include <crypto/scatterwalk.h> |
34 | #include <crypto/vmac.h> | 34 | #include <crypto/vmac.h> |
35 | #include <crypto/internal/hash.h> | 35 | #include <crypto/internal/hash.h> |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Constants and masks | 38 | * Constants and masks |
39 | */ | 39 | */ |
40 | #define UINT64_C(x) x##ULL | 40 | #define UINT64_C(x) x##ULL |
41 | static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ | 41 | static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ |
42 | static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ | 42 | static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ |
43 | static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ | 43 | static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ |
44 | static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ | 44 | static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ |
45 | static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | 45 | static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ |
46 | 46 | ||
47 | #define pe64_to_cpup le64_to_cpup /* Prefer little endian */ | 47 | #define pe64_to_cpup le64_to_cpup /* Prefer little endian */ |
48 | 48 | ||
49 | #ifdef __LITTLE_ENDIAN | 49 | #ifdef __LITTLE_ENDIAN |
50 | #define INDEX_HIGH 1 | 50 | #define INDEX_HIGH 1 |
51 | #define INDEX_LOW 0 | 51 | #define INDEX_LOW 0 |
52 | #else | 52 | #else |
53 | #define INDEX_HIGH 0 | 53 | #define INDEX_HIGH 0 |
54 | #define INDEX_LOW 1 | 54 | #define INDEX_LOW 1 |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * The following routines are used in this implementation. They are | 58 | * The following routines are used in this implementation. They are |
59 | * written via macros to simulate zero-overhead call-by-reference. | 59 | * written via macros to simulate zero-overhead call-by-reference. |
60 | * | 60 | * |
61 | * MUL64: 64x64->128-bit multiplication | 61 | * MUL64: 64x64->128-bit multiplication |
62 | * PMUL64: assumes top bits cleared on inputs | 62 | * PMUL64: assumes top bits cleared on inputs |
63 | * ADD128: 128x128->128-bit addition | 63 | * ADD128: 128x128->128-bit addition |
64 | */ | 64 | */ |
65 | 65 | ||
66 | #define ADD128(rh, rl, ih, il) \ | 66 | #define ADD128(rh, rl, ih, il) \ |
67 | do { \ | 67 | do { \ |
68 | u64 _il = (il); \ | 68 | u64 _il = (il); \ |
69 | (rl) += (_il); \ | 69 | (rl) += (_il); \ |
70 | if ((rl) < (_il)) \ | 70 | if ((rl) < (_il)) \ |
71 | (rh)++; \ | 71 | (rh)++; \ |
72 | (rh) += (ih); \ | 72 | (rh) += (ih); \ |
73 | } while (0) | 73 | } while (0) |
74 | 74 | ||
75 | #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) | 75 | #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) |
76 | 76 | ||
77 | #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ | 77 | #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ |
78 | do { \ | 78 | do { \ |
79 | u64 _i1 = (i1), _i2 = (i2); \ | 79 | u64 _i1 = (i1), _i2 = (i2); \ |
80 | u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ | 80 | u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ |
81 | rh = MUL32(_i1>>32, _i2>>32); \ | 81 | rh = MUL32(_i1>>32, _i2>>32); \ |
82 | rl = MUL32(_i1, _i2); \ | 82 | rl = MUL32(_i1, _i2); \ |
83 | ADD128(rh, rl, (m >> 32), (m << 32)); \ | 83 | ADD128(rh, rl, (m >> 32), (m << 32)); \ |
84 | } while (0) | 84 | } while (0) |
85 | 85 | ||
86 | #define MUL64(rh, rl, i1, i2) \ | 86 | #define MUL64(rh, rl, i1, i2) \ |
87 | do { \ | 87 | do { \ |
88 | u64 _i1 = (i1), _i2 = (i2); \ | 88 | u64 _i1 = (i1), _i2 = (i2); \ |
89 | u64 m1 = MUL32(_i1, _i2>>32); \ | 89 | u64 m1 = MUL32(_i1, _i2>>32); \ |
90 | u64 m2 = MUL32(_i1>>32, _i2); \ | 90 | u64 m2 = MUL32(_i1>>32, _i2); \ |
91 | rh = MUL32(_i1>>32, _i2>>32); \ | 91 | rh = MUL32(_i1>>32, _i2>>32); \ |
92 | rl = MUL32(_i1, _i2); \ | 92 | rl = MUL32(_i1, _i2); \ |
93 | ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ | 93 | ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ |
94 | ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ | 94 | ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ |
95 | } while (0) | 95 | } while (0) |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * For highest performance the L1 NH and L2 polynomial hashes should be | 98 | * For highest performance the L1 NH and L2 polynomial hashes should be |
99 | * carefully implemented to take advantage of one's target architecture. | 99 | * carefully implemented to take advantage of one's target architecture. |
100 | * Here these two hash functions are defined multiple time; once for | 100 | * Here these two hash functions are defined multiple time; once for |
101 | * 64-bit architectures, once for 32-bit SSE2 architectures, and once | 101 | * 64-bit architectures, once for 32-bit SSE2 architectures, and once |
102 | * for the rest (32-bit) architectures. | 102 | * for the rest (32-bit) architectures. |
103 | * For each, nh_16 *must* be defined (works on multiples of 16 bytes). | 103 | * For each, nh_16 *must* be defined (works on multiples of 16 bytes). |
104 | * Optionally, nh_vmac_nhbytes can be defined (for multiples of | 104 | * Optionally, nh_vmac_nhbytes can be defined (for multiples of |
105 | * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two | 105 | * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two |
106 | * NH computations at once). | 106 | * NH computations at once). |
107 | */ | 107 | */ |
108 | 108 | ||
109 | #ifdef CONFIG_64BIT | 109 | #ifdef CONFIG_64BIT |
110 | 110 | ||
111 | #define nh_16(mp, kp, nw, rh, rl) \ | 111 | #define nh_16(mp, kp, nw, rh, rl) \ |
112 | do { \ | 112 | do { \ |
113 | int i; u64 th, tl; \ | 113 | int i; u64 th, tl; \ |
114 | rh = rl = 0; \ | 114 | rh = rl = 0; \ |
115 | for (i = 0; i < nw; i += 2) { \ | 115 | for (i = 0; i < nw; i += 2) { \ |
116 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ | 116 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ |
117 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | 117 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ |
118 | ADD128(rh, rl, th, tl); \ | 118 | ADD128(rh, rl, th, tl); \ |
119 | } \ | 119 | } \ |
120 | } while (0) | 120 | } while (0) |
121 | 121 | ||
122 | #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ | 122 | #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ |
123 | do { \ | 123 | do { \ |
124 | int i; u64 th, tl; \ | 124 | int i; u64 th, tl; \ |
125 | rh1 = rl1 = rh = rl = 0; \ | 125 | rh1 = rl1 = rh = rl = 0; \ |
126 | for (i = 0; i < nw; i += 2) { \ | 126 | for (i = 0; i < nw; i += 2) { \ |
127 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ | 127 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ |
128 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | 128 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ |
129 | ADD128(rh, rl, th, tl); \ | 129 | ADD128(rh, rl, th, tl); \ |
130 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ | 130 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ |
131 | pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | 131 | pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ |
132 | ADD128(rh1, rl1, th, tl); \ | 132 | ADD128(rh1, rl1, th, tl); \ |
133 | } \ | 133 | } \ |
134 | } while (0) | 134 | } while (0) |
135 | 135 | ||
136 | #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ | 136 | #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ |
137 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ | 137 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ |
138 | do { \ | 138 | do { \ |
139 | int i; u64 th, tl; \ | 139 | int i; u64 th, tl; \ |
140 | rh = rl = 0; \ | 140 | rh = rl = 0; \ |
141 | for (i = 0; i < nw; i += 8) { \ | 141 | for (i = 0; i < nw; i += 8) { \ |
142 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ | 142 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ |
143 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | 143 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ |
144 | ADD128(rh, rl, th, tl); \ | 144 | ADD128(rh, rl, th, tl); \ |
145 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ | 145 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ |
146 | pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | 146 | pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ |
147 | ADD128(rh, rl, th, tl); \ | 147 | ADD128(rh, rl, th, tl); \ |
148 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ | 148 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ |
149 | pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | 149 | pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ |
150 | ADD128(rh, rl, th, tl); \ | 150 | ADD128(rh, rl, th, tl); \ |
151 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ | 151 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ |
152 | pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | 152 | pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ |
153 | ADD128(rh, rl, th, tl); \ | 153 | ADD128(rh, rl, th, tl); \ |
154 | } \ | 154 | } \ |
155 | } while (0) | 155 | } while (0) |
156 | 156 | ||
157 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ | 157 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ |
158 | do { \ | 158 | do { \ |
159 | int i; u64 th, tl; \ | 159 | int i; u64 th, tl; \ |
160 | rh1 = rl1 = rh = rl = 0; \ | 160 | rh1 = rl1 = rh = rl = 0; \ |
161 | for (i = 0; i < nw; i += 8) { \ | 161 | for (i = 0; i < nw; i += 8) { \ |
162 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ | 162 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ |
163 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | 163 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ |
164 | ADD128(rh, rl, th, tl); \ | 164 | ADD128(rh, rl, th, tl); \ |
165 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ | 165 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ |
166 | pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | 166 | pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ |
167 | ADD128(rh1, rl1, th, tl); \ | 167 | ADD128(rh1, rl1, th, tl); \ |
168 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ | 168 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ |
169 | pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | 169 | pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ |
170 | ADD128(rh, rl, th, tl); \ | 170 | ADD128(rh, rl, th, tl); \ |
171 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ | 171 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ |
172 | pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ | 172 | pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ |
173 | ADD128(rh1, rl1, th, tl); \ | 173 | ADD128(rh1, rl1, th, tl); \ |
174 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ | 174 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ |
175 | pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | 175 | pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ |
176 | ADD128(rh, rl, th, tl); \ | 176 | ADD128(rh, rl, th, tl); \ |
177 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ | 177 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ |
178 | pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ | 178 | pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ |
179 | ADD128(rh1, rl1, th, tl); \ | 179 | ADD128(rh1, rl1, th, tl); \ |
180 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ | 180 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ |
181 | pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | 181 | pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ |
182 | ADD128(rh, rl, th, tl); \ | 182 | ADD128(rh, rl, th, tl); \ |
183 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ | 183 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ |
184 | pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ | 184 | pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ |
185 | ADD128(rh1, rl1, th, tl); \ | 185 | ADD128(rh1, rl1, th, tl); \ |
186 | } \ | 186 | } \ |
187 | } while (0) | 187 | } while (0) |
188 | #endif | 188 | #endif |
189 | 189 | ||
190 | #define poly_step(ah, al, kh, kl, mh, ml) \ | 190 | #define poly_step(ah, al, kh, kl, mh, ml) \ |
191 | do { \ | 191 | do { \ |
192 | u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ | 192 | u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ |
193 | /* compute ab*cd, put bd into result registers */ \ | 193 | /* compute ab*cd, put bd into result registers */ \ |
194 | PMUL64(t3h, t3l, al, kh); \ | 194 | PMUL64(t3h, t3l, al, kh); \ |
195 | PMUL64(t2h, t2l, ah, kl); \ | 195 | PMUL64(t2h, t2l, ah, kl); \ |
196 | PMUL64(t1h, t1l, ah, 2*kh); \ | 196 | PMUL64(t1h, t1l, ah, 2*kh); \ |
197 | PMUL64(ah, al, al, kl); \ | 197 | PMUL64(ah, al, al, kl); \ |
198 | /* add 2 * ac to result */ \ | 198 | /* add 2 * ac to result */ \ |
199 | ADD128(ah, al, t1h, t1l); \ | 199 | ADD128(ah, al, t1h, t1l); \ |
200 | /* add together ad + bc */ \ | 200 | /* add together ad + bc */ \ |
201 | ADD128(t2h, t2l, t3h, t3l); \ | 201 | ADD128(t2h, t2l, t3h, t3l); \ |
202 | /* now (ah,al), (t2l,2*t2h) need summing */ \ | 202 | /* now (ah,al), (t2l,2*t2h) need summing */ \ |
203 | /* first add the high registers, carrying into t2h */ \ | 203 | /* first add the high registers, carrying into t2h */ \ |
204 | ADD128(t2h, ah, z, t2l); \ | 204 | ADD128(t2h, ah, z, t2l); \ |
205 | /* double t2h and add top bit of ah */ \ | 205 | /* double t2h and add top bit of ah */ \ |
206 | t2h = 2 * t2h + (ah >> 63); \ | 206 | t2h = 2 * t2h + (ah >> 63); \ |
207 | ah &= m63; \ | 207 | ah &= m63; \ |
208 | /* now add the low registers */ \ | 208 | /* now add the low registers */ \ |
209 | ADD128(ah, al, mh, ml); \ | 209 | ADD128(ah, al, mh, ml); \ |
210 | ADD128(ah, al, z, t2h); \ | 210 | ADD128(ah, al, z, t2h); \ |
211 | } while (0) | 211 | } while (0) |
212 | 212 | ||
213 | #else /* ! CONFIG_64BIT */ | 213 | #else /* ! CONFIG_64BIT */ |
214 | 214 | ||
215 | #ifndef nh_16 | 215 | #ifndef nh_16 |
216 | #define nh_16(mp, kp, nw, rh, rl) \ | 216 | #define nh_16(mp, kp, nw, rh, rl) \ |
217 | do { \ | 217 | do { \ |
218 | u64 t1, t2, m1, m2, t; \ | 218 | u64 t1, t2, m1, m2, t; \ |
219 | int i; \ | 219 | int i; \ |
220 | rh = rl = t = 0; \ | 220 | rh = rl = t = 0; \ |
221 | for (i = 0; i < nw; i += 2) { \ | 221 | for (i = 0; i < nw; i += 2) { \ |
222 | t1 = pe64_to_cpup(mp+i) + kp[i]; \ | 222 | t1 = pe64_to_cpup(mp+i) + kp[i]; \ |
223 | t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ | 223 | t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ |
224 | m2 = MUL32(t1 >> 32, t2); \ | 224 | m2 = MUL32(t1 >> 32, t2); \ |
225 | m1 = MUL32(t1, t2 >> 32); \ | 225 | m1 = MUL32(t1, t2 >> 32); \ |
226 | ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ | 226 | ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ |
227 | MUL32(t1, t2)); \ | 227 | MUL32(t1, t2)); \ |
228 | rh += (u64)(u32)(m1 >> 32) \ | 228 | rh += (u64)(u32)(m1 >> 32) \ |
229 | + (u32)(m2 >> 32); \ | 229 | + (u32)(m2 >> 32); \ |
230 | t += (u64)(u32)m1 + (u32)m2; \ | 230 | t += (u64)(u32)m1 + (u32)m2; \ |
231 | } \ | 231 | } \ |
232 | ADD128(rh, rl, (t >> 32), (t << 32)); \ | 232 | ADD128(rh, rl, (t >> 32), (t << 32)); \ |
233 | } while (0) | 233 | } while (0) |
234 | #endif | 234 | #endif |
235 | 235 | ||
236 | static void poly_step_func(u64 *ahi, u64 *alo, | 236 | static void poly_step_func(u64 *ahi, u64 *alo, |
237 | const u64 *kh, const u64 *kl, | 237 | const u64 *kh, const u64 *kl, |
238 | const u64 *mh, const u64 *ml) | 238 | const u64 *mh, const u64 *ml) |
239 | { | 239 | { |
240 | #define a0 (*(((u32 *)alo)+INDEX_LOW)) | 240 | #define a0 (*(((u32 *)alo)+INDEX_LOW)) |
241 | #define a1 (*(((u32 *)alo)+INDEX_HIGH)) | 241 | #define a1 (*(((u32 *)alo)+INDEX_HIGH)) |
242 | #define a2 (*(((u32 *)ahi)+INDEX_LOW)) | 242 | #define a2 (*(((u32 *)ahi)+INDEX_LOW)) |
243 | #define a3 (*(((u32 *)ahi)+INDEX_HIGH)) | 243 | #define a3 (*(((u32 *)ahi)+INDEX_HIGH)) |
244 | #define k0 (*(((u32 *)kl)+INDEX_LOW)) | 244 | #define k0 (*(((u32 *)kl)+INDEX_LOW)) |
245 | #define k1 (*(((u32 *)kl)+INDEX_HIGH)) | 245 | #define k1 (*(((u32 *)kl)+INDEX_HIGH)) |
246 | #define k2 (*(((u32 *)kh)+INDEX_LOW)) | 246 | #define k2 (*(((u32 *)kh)+INDEX_LOW)) |
247 | #define k3 (*(((u32 *)kh)+INDEX_HIGH)) | 247 | #define k3 (*(((u32 *)kh)+INDEX_HIGH)) |
248 | 248 | ||
249 | u64 p, q, t; | 249 | u64 p, q, t; |
250 | u32 t2; | 250 | u32 t2; |
251 | 251 | ||
252 | p = MUL32(a3, k3); | 252 | p = MUL32(a3, k3); |
253 | p += p; | 253 | p += p; |
254 | p += *(u64 *)mh; | 254 | p += *(u64 *)mh; |
255 | p += MUL32(a0, k2); | 255 | p += MUL32(a0, k2); |
256 | p += MUL32(a1, k1); | 256 | p += MUL32(a1, k1); |
257 | p += MUL32(a2, k0); | 257 | p += MUL32(a2, k0); |
258 | t = (u32)(p); | 258 | t = (u32)(p); |
259 | p >>= 32; | 259 | p >>= 32; |
260 | p += MUL32(a0, k3); | 260 | p += MUL32(a0, k3); |
261 | p += MUL32(a1, k2); | 261 | p += MUL32(a1, k2); |
262 | p += MUL32(a2, k1); | 262 | p += MUL32(a2, k1); |
263 | p += MUL32(a3, k0); | 263 | p += MUL32(a3, k0); |
264 | t |= ((u64)((u32)p & 0x7fffffff)) << 32; | 264 | t |= ((u64)((u32)p & 0x7fffffff)) << 32; |
265 | p >>= 31; | 265 | p >>= 31; |
266 | p += (u64)(((u32 *)ml)[INDEX_LOW]); | 266 | p += (u64)(((u32 *)ml)[INDEX_LOW]); |
267 | p += MUL32(a0, k0); | 267 | p += MUL32(a0, k0); |
268 | q = MUL32(a1, k3); | 268 | q = MUL32(a1, k3); |
269 | q += MUL32(a2, k2); | 269 | q += MUL32(a2, k2); |
270 | q += MUL32(a3, k1); | 270 | q += MUL32(a3, k1); |
271 | q += q; | 271 | q += q; |
272 | p += q; | 272 | p += q; |
273 | t2 = (u32)(p); | 273 | t2 = (u32)(p); |
274 | p >>= 32; | 274 | p >>= 32; |
275 | p += (u64)(((u32 *)ml)[INDEX_HIGH]); | 275 | p += (u64)(((u32 *)ml)[INDEX_HIGH]); |
276 | p += MUL32(a0, k1); | 276 | p += MUL32(a0, k1); |
277 | p += MUL32(a1, k0); | 277 | p += MUL32(a1, k0); |
278 | q = MUL32(a2, k3); | 278 | q = MUL32(a2, k3); |
279 | q += MUL32(a3, k2); | 279 | q += MUL32(a3, k2); |
280 | q += q; | 280 | q += q; |
281 | p += q; | 281 | p += q; |
282 | *(u64 *)(alo) = (p << 32) | t2; | 282 | *(u64 *)(alo) = (p << 32) | t2; |
283 | p >>= 32; | 283 | p >>= 32; |
284 | *(u64 *)(ahi) = p + t; | 284 | *(u64 *)(ahi) = p + t; |
285 | 285 | ||
286 | #undef a0 | 286 | #undef a0 |
287 | #undef a1 | 287 | #undef a1 |
288 | #undef a2 | 288 | #undef a2 |
289 | #undef a3 | 289 | #undef a3 |
290 | #undef k0 | 290 | #undef k0 |
291 | #undef k1 | 291 | #undef k1 |
292 | #undef k2 | 292 | #undef k2 |
293 | #undef k3 | 293 | #undef k3 |
294 | } | 294 | } |
295 | 295 | ||
296 | #define poly_step(ah, al, kh, kl, mh, ml) \ | 296 | #define poly_step(ah, al, kh, kl, mh, ml) \ |
297 | poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) | 297 | poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) |
298 | 298 | ||
299 | #endif /* end of specialized NH and poly definitions */ | 299 | #endif /* end of specialized NH and poly definitions */ |
300 | 300 | ||
301 | /* At least nh_16 is defined. Defined others as needed here */ | 301 | /* At least nh_16 is defined. Defined others as needed here */ |
302 | #ifndef nh_16_2 | 302 | #ifndef nh_16_2 |
303 | #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ | 303 | #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ |
304 | do { \ | 304 | do { \ |
305 | nh_16(mp, kp, nw, rh, rl); \ | 305 | nh_16(mp, kp, nw, rh, rl); \ |
306 | nh_16(mp, ((kp)+2), nw, rh2, rl2); \ | 306 | nh_16(mp, ((kp)+2), nw, rh2, rl2); \ |
307 | } while (0) | 307 | } while (0) |
308 | #endif | 308 | #endif |
309 | #ifndef nh_vmac_nhbytes | 309 | #ifndef nh_vmac_nhbytes |
310 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ | 310 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ |
311 | nh_16(mp, kp, nw, rh, rl) | 311 | nh_16(mp, kp, nw, rh, rl) |
312 | #endif | 312 | #endif |
313 | #ifndef nh_vmac_nhbytes_2 | 313 | #ifndef nh_vmac_nhbytes_2 |
314 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ | 314 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ |
315 | do { \ | 315 | do { \ |
316 | nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ | 316 | nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ |
317 | nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ | 317 | nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ |
318 | } while (0) | 318 | } while (0) |
319 | #endif | 319 | #endif |
320 | 320 | ||
321 | static void vhash_abort(struct vmac_ctx *ctx) | 321 | static void vhash_abort(struct vmac_ctx *ctx) |
322 | { | 322 | { |
323 | ctx->polytmp[0] = ctx->polykey[0] ; | 323 | ctx->polytmp[0] = ctx->polykey[0] ; |
324 | ctx->polytmp[1] = ctx->polykey[1] ; | 324 | ctx->polytmp[1] = ctx->polykey[1] ; |
325 | ctx->first_block_processed = 0; | 325 | ctx->first_block_processed = 0; |
326 | } | 326 | } |
327 | 327 | ||
328 | static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) | 328 | static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) |
329 | { | 329 | { |
330 | u64 rh, rl, t, z = 0; | 330 | u64 rh, rl, t, z = 0; |
331 | 331 | ||
332 | /* fully reduce (p1,p2)+(len,0) mod p127 */ | 332 | /* fully reduce (p1,p2)+(len,0) mod p127 */ |
333 | t = p1 >> 63; | 333 | t = p1 >> 63; |
334 | p1 &= m63; | 334 | p1 &= m63; |
335 | ADD128(p1, p2, len, t); | 335 | ADD128(p1, p2, len, t); |
336 | /* At this point, (p1,p2) is at most 2^127+(len<<64) */ | 336 | /* At this point, (p1,p2) is at most 2^127+(len<<64) */ |
337 | t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); | 337 | t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); |
338 | ADD128(p1, p2, z, t); | 338 | ADD128(p1, p2, z, t); |
339 | p1 &= m63; | 339 | p1 &= m63; |
340 | 340 | ||
341 | /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ | 341 | /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ |
342 | t = p1 + (p2 >> 32); | 342 | t = p1 + (p2 >> 32); |
343 | t += (t >> 32); | 343 | t += (t >> 32); |
344 | t += (u32)t > 0xfffffffeu; | 344 | t += (u32)t > 0xfffffffeu; |
345 | p1 += (t >> 32); | 345 | p1 += (t >> 32); |
346 | p2 += (p1 << 32); | 346 | p2 += (p1 << 32); |
347 | 347 | ||
348 | /* compute (p1+k1)%p64 and (p2+k2)%p64 */ | 348 | /* compute (p1+k1)%p64 and (p2+k2)%p64 */ |
349 | p1 += k1; | 349 | p1 += k1; |
350 | p1 += (0 - (p1 < k1)) & 257; | 350 | p1 += (0 - (p1 < k1)) & 257; |
351 | p2 += k2; | 351 | p2 += k2; |
352 | p2 += (0 - (p2 < k2)) & 257; | 352 | p2 += (0 - (p2 < k2)) & 257; |
353 | 353 | ||
354 | /* compute (p1+k1)*(p2+k2)%p64 */ | 354 | /* compute (p1+k1)*(p2+k2)%p64 */ |
355 | MUL64(rh, rl, p1, p2); | 355 | MUL64(rh, rl, p1, p2); |
356 | t = rh >> 56; | 356 | t = rh >> 56; |
357 | ADD128(t, rl, z, rh); | 357 | ADD128(t, rl, z, rh); |
358 | rh <<= 8; | 358 | rh <<= 8; |
359 | ADD128(t, rl, z, rh); | 359 | ADD128(t, rl, z, rh); |
360 | t += t << 8; | 360 | t += t << 8; |
361 | rl += t; | 361 | rl += t; |
362 | rl += (0 - (rl < t)) & 257; | 362 | rl += (0 - (rl < t)) & 257; |
363 | rl += (0 - (rl > p64-1)) & 257; | 363 | rl += (0 - (rl > p64-1)) & 257; |
364 | return rl; | 364 | return rl; |
365 | } | 365 | } |
366 | 366 | ||
367 | static void vhash_update(const unsigned char *m, | 367 | static void vhash_update(const unsigned char *m, |
368 | unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ | 368 | unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ |
369 | struct vmac_ctx *ctx) | 369 | struct vmac_ctx *ctx) |
370 | { | 370 | { |
371 | u64 rh, rl, *mptr; | 371 | u64 rh, rl, *mptr; |
372 | const u64 *kptr = (u64 *)ctx->nhkey; | 372 | const u64 *kptr = (u64 *)ctx->nhkey; |
373 | int i; | 373 | int i; |
374 | u64 ch, cl; | 374 | u64 ch, cl; |
375 | u64 pkh = ctx->polykey[0]; | 375 | u64 pkh = ctx->polykey[0]; |
376 | u64 pkl = ctx->polykey[1]; | 376 | u64 pkl = ctx->polykey[1]; |
377 | 377 | ||
378 | if (!mbytes) | 378 | if (!mbytes) |
379 | return; | 379 | return; |
380 | 380 | ||
381 | BUG_ON(mbytes % VMAC_NHBYTES); | 381 | BUG_ON(mbytes % VMAC_NHBYTES); |
382 | 382 | ||
383 | mptr = (u64 *)m; | 383 | mptr = (u64 *)m; |
384 | i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ | 384 | i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ |
385 | 385 | ||
386 | ch = ctx->polytmp[0]; | 386 | ch = ctx->polytmp[0]; |
387 | cl = ctx->polytmp[1]; | 387 | cl = ctx->polytmp[1]; |
388 | 388 | ||
389 | if (!ctx->first_block_processed) { | 389 | if (!ctx->first_block_processed) { |
390 | ctx->first_block_processed = 1; | 390 | ctx->first_block_processed = 1; |
391 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | 391 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); |
392 | rh &= m62; | 392 | rh &= m62; |
393 | ADD128(ch, cl, rh, rl); | 393 | ADD128(ch, cl, rh, rl); |
394 | mptr += (VMAC_NHBYTES/sizeof(u64)); | 394 | mptr += (VMAC_NHBYTES/sizeof(u64)); |
395 | i--; | 395 | i--; |
396 | } | 396 | } |
397 | 397 | ||
398 | while (i--) { | 398 | while (i--) { |
399 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | 399 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); |
400 | rh &= m62; | 400 | rh &= m62; |
401 | poly_step(ch, cl, pkh, pkl, rh, rl); | 401 | poly_step(ch, cl, pkh, pkl, rh, rl); |
402 | mptr += (VMAC_NHBYTES/sizeof(u64)); | 402 | mptr += (VMAC_NHBYTES/sizeof(u64)); |
403 | } | 403 | } |
404 | 404 | ||
405 | ctx->polytmp[0] = ch; | 405 | ctx->polytmp[0] = ch; |
406 | ctx->polytmp[1] = cl; | 406 | ctx->polytmp[1] = cl; |
407 | } | 407 | } |
408 | 408 | ||
409 | static u64 vhash(unsigned char m[], unsigned int mbytes, | 409 | static u64 vhash(unsigned char m[], unsigned int mbytes, |
410 | u64 *tagl, struct vmac_ctx *ctx) | 410 | u64 *tagl, struct vmac_ctx *ctx) |
411 | { | 411 | { |
412 | u64 rh, rl, *mptr; | 412 | u64 rh, rl, *mptr; |
413 | const u64 *kptr = (u64 *)ctx->nhkey; | 413 | const u64 *kptr = (u64 *)ctx->nhkey; |
414 | int i, remaining; | 414 | int i, remaining; |
415 | u64 ch, cl; | 415 | u64 ch, cl; |
416 | u64 pkh = ctx->polykey[0]; | 416 | u64 pkh = ctx->polykey[0]; |
417 | u64 pkl = ctx->polykey[1]; | 417 | u64 pkl = ctx->polykey[1]; |
418 | 418 | ||
419 | mptr = (u64 *)m; | 419 | mptr = (u64 *)m; |
420 | i = mbytes / VMAC_NHBYTES; | 420 | i = mbytes / VMAC_NHBYTES; |
421 | remaining = mbytes % VMAC_NHBYTES; | 421 | remaining = mbytes % VMAC_NHBYTES; |
422 | 422 | ||
423 | if (ctx->first_block_processed) { | 423 | if (ctx->first_block_processed) { |
424 | ch = ctx->polytmp[0]; | 424 | ch = ctx->polytmp[0]; |
425 | cl = ctx->polytmp[1]; | 425 | cl = ctx->polytmp[1]; |
426 | } else if (i) { | 426 | } else if (i) { |
427 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); | 427 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); |
428 | ch &= m62; | 428 | ch &= m62; |
429 | ADD128(ch, cl, pkh, pkl); | 429 | ADD128(ch, cl, pkh, pkl); |
430 | mptr += (VMAC_NHBYTES/sizeof(u64)); | 430 | mptr += (VMAC_NHBYTES/sizeof(u64)); |
431 | i--; | 431 | i--; |
432 | } else if (remaining) { | 432 | } else if (remaining) { |
433 | nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); | 433 | nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); |
434 | ch &= m62; | 434 | ch &= m62; |
435 | ADD128(ch, cl, pkh, pkl); | 435 | ADD128(ch, cl, pkh, pkl); |
436 | mptr += (VMAC_NHBYTES/sizeof(u64)); | 436 | mptr += (VMAC_NHBYTES/sizeof(u64)); |
437 | goto do_l3; | 437 | goto do_l3; |
438 | } else {/* Empty String */ | 438 | } else {/* Empty String */ |
439 | ch = pkh; cl = pkl; | 439 | ch = pkh; cl = pkl; |
440 | goto do_l3; | 440 | goto do_l3; |
441 | } | 441 | } |
442 | 442 | ||
443 | while (i--) { | 443 | while (i--) { |
444 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | 444 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); |
445 | rh &= m62; | 445 | rh &= m62; |
446 | poly_step(ch, cl, pkh, pkl, rh, rl); | 446 | poly_step(ch, cl, pkh, pkl, rh, rl); |
447 | mptr += (VMAC_NHBYTES/sizeof(u64)); | 447 | mptr += (VMAC_NHBYTES/sizeof(u64)); |
448 | } | 448 | } |
449 | if (remaining) { | 449 | if (remaining) { |
450 | nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); | 450 | nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); |
451 | rh &= m62; | 451 | rh &= m62; |
452 | poly_step(ch, cl, pkh, pkl, rh, rl); | 452 | poly_step(ch, cl, pkh, pkl, rh, rl); |
453 | } | 453 | } |
454 | 454 | ||
455 | do_l3: | 455 | do_l3: |
456 | vhash_abort(ctx); | 456 | vhash_abort(ctx); |
457 | remaining *= 8; | 457 | remaining *= 8; |
458 | return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); | 458 | return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); |
459 | } | 459 | } |
460 | 460 | ||
461 | static u64 vmac(unsigned char m[], unsigned int mbytes, | 461 | static u64 vmac(unsigned char m[], unsigned int mbytes, |
462 | const unsigned char n[16], u64 *tagl, | 462 | const unsigned char n[16], u64 *tagl, |
463 | struct vmac_ctx_t *ctx) | 463 | struct vmac_ctx_t *ctx) |
464 | { | 464 | { |
465 | u64 *in_n, *out_p; | 465 | u64 *in_n, *out_p; |
466 | u64 p, h; | 466 | u64 p, h; |
467 | int i; | 467 | int i; |
468 | 468 | ||
469 | in_n = ctx->__vmac_ctx.cached_nonce; | 469 | in_n = ctx->__vmac_ctx.cached_nonce; |
470 | out_p = ctx->__vmac_ctx.cached_aes; | 470 | out_p = ctx->__vmac_ctx.cached_aes; |
471 | 471 | ||
472 | i = n[15] & 1; | 472 | i = n[15] & 1; |
473 | if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { | 473 | if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { |
474 | in_n[0] = *(u64 *)(n); | 474 | in_n[0] = *(u64 *)(n); |
475 | in_n[1] = *(u64 *)(n+8); | 475 | in_n[1] = *(u64 *)(n+8); |
476 | ((unsigned char *)in_n)[15] &= 0xFE; | 476 | ((unsigned char *)in_n)[15] &= 0xFE; |
477 | crypto_cipher_encrypt_one(ctx->child, | 477 | crypto_cipher_encrypt_one(ctx->child, |
478 | (unsigned char *)out_p, (unsigned char *)in_n); | 478 | (unsigned char *)out_p, (unsigned char *)in_n); |
479 | 479 | ||
480 | ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); | 480 | ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); |
481 | } | 481 | } |
482 | p = be64_to_cpup(out_p + i); | 482 | p = be64_to_cpup(out_p + i); |
483 | h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); | 483 | h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); |
484 | return le64_to_cpu(p + h); | 484 | return le64_to_cpu(p + h); |
485 | } | 485 | } |
486 | 486 | ||
487 | static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) | 487 | static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) |
488 | { | 488 | { |
489 | u64 in[2] = {0}, out[2]; | 489 | u64 in[2] = {0}, out[2]; |
490 | unsigned i; | 490 | unsigned i; |
491 | int err = 0; | 491 | int err = 0; |
492 | 492 | ||
493 | err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); | 493 | err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); |
494 | if (err) | 494 | if (err) |
495 | return err; | 495 | return err; |
496 | 496 | ||
497 | /* Fill nh key */ | 497 | /* Fill nh key */ |
498 | ((unsigned char *)in)[0] = 0x80; | 498 | ((unsigned char *)in)[0] = 0x80; |
499 | for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { | 499 | for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { |
500 | crypto_cipher_encrypt_one(ctx->child, | 500 | crypto_cipher_encrypt_one(ctx->child, |
501 | (unsigned char *)out, (unsigned char *)in); | 501 | (unsigned char *)out, (unsigned char *)in); |
502 | ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); | 502 | ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); |
503 | ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); | 503 | ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); |
504 | ((unsigned char *)in)[15] += 1; | 504 | ((unsigned char *)in)[15] += 1; |
505 | } | 505 | } |
506 | 506 | ||
507 | /* Fill poly key */ | 507 | /* Fill poly key */ |
508 | ((unsigned char *)in)[0] = 0xC0; | 508 | ((unsigned char *)in)[0] = 0xC0; |
509 | in[1] = 0; | 509 | in[1] = 0; |
510 | for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { | 510 | for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { |
511 | crypto_cipher_encrypt_one(ctx->child, | 511 | crypto_cipher_encrypt_one(ctx->child, |
512 | (unsigned char *)out, (unsigned char *)in); | 512 | (unsigned char *)out, (unsigned char *)in); |
513 | ctx->__vmac_ctx.polytmp[i] = | 513 | ctx->__vmac_ctx.polytmp[i] = |
514 | ctx->__vmac_ctx.polykey[i] = | 514 | ctx->__vmac_ctx.polykey[i] = |
515 | be64_to_cpup(out) & mpoly; | 515 | be64_to_cpup(out) & mpoly; |
516 | ctx->__vmac_ctx.polytmp[i+1] = | 516 | ctx->__vmac_ctx.polytmp[i+1] = |
517 | ctx->__vmac_ctx.polykey[i+1] = | 517 | ctx->__vmac_ctx.polykey[i+1] = |
518 | be64_to_cpup(out+1) & mpoly; | 518 | be64_to_cpup(out+1) & mpoly; |
519 | ((unsigned char *)in)[15] += 1; | 519 | ((unsigned char *)in)[15] += 1; |
520 | } | 520 | } |
521 | 521 | ||
522 | /* Fill ip key */ | 522 | /* Fill ip key */ |
523 | ((unsigned char *)in)[0] = 0xE0; | 523 | ((unsigned char *)in)[0] = 0xE0; |
524 | in[1] = 0; | 524 | in[1] = 0; |
525 | for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { | 525 | for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { |
526 | do { | 526 | do { |
527 | crypto_cipher_encrypt_one(ctx->child, | 527 | crypto_cipher_encrypt_one(ctx->child, |
528 | (unsigned char *)out, (unsigned char *)in); | 528 | (unsigned char *)out, (unsigned char *)in); |
529 | ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); | 529 | ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); |
530 | ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); | 530 | ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); |
531 | ((unsigned char *)in)[15] += 1; | 531 | ((unsigned char *)in)[15] += 1; |
532 | } while (ctx->__vmac_ctx.l3key[i] >= p64 | 532 | } while (ctx->__vmac_ctx.l3key[i] >= p64 |
533 | || ctx->__vmac_ctx.l3key[i+1] >= p64); | 533 | || ctx->__vmac_ctx.l3key[i+1] >= p64); |
534 | } | 534 | } |
535 | 535 | ||
536 | /* Invalidate nonce/aes cache and reset other elements */ | 536 | /* Invalidate nonce/aes cache and reset other elements */ |
537 | ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ | 537 | ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ |
538 | ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ | 538 | ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ |
539 | ctx->__vmac_ctx.first_block_processed = 0; | 539 | ctx->__vmac_ctx.first_block_processed = 0; |
540 | 540 | ||
541 | return err; | 541 | return err; |
542 | } | 542 | } |
543 | 543 | ||
544 | static int vmac_setkey(struct crypto_shash *parent, | 544 | static int vmac_setkey(struct crypto_shash *parent, |
545 | const u8 *key, unsigned int keylen) | 545 | const u8 *key, unsigned int keylen) |
546 | { | 546 | { |
547 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | 547 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); |
548 | 548 | ||
549 | if (keylen != VMAC_KEY_LEN) { | 549 | if (keylen != VMAC_KEY_LEN) { |
550 | crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); | 550 | crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); |
551 | return -EINVAL; | 551 | return -EINVAL; |
552 | } | 552 | } |
553 | 553 | ||
554 | return vmac_set_key((u8 *)key, ctx); | 554 | return vmac_set_key((u8 *)key, ctx); |
555 | } | 555 | } |
556 | 556 | ||
557 | static int vmac_init(struct shash_desc *pdesc) | 557 | static int vmac_init(struct shash_desc *pdesc) |
558 | { | 558 | { |
559 | return 0; | 559 | return 0; |
560 | } | 560 | } |
561 | 561 | ||
562 | static int vmac_update(struct shash_desc *pdesc, const u8 *p, | 562 | static int vmac_update(struct shash_desc *pdesc, const u8 *p, |
563 | unsigned int len) | 563 | unsigned int len) |
564 | { | 564 | { |
565 | struct crypto_shash *parent = pdesc->tfm; | 565 | struct crypto_shash *parent = pdesc->tfm; |
566 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | 566 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); |
567 | int expand; | 567 | int expand; |
568 | int min; | 568 | int min; |
569 | 569 | ||
570 | expand = VMAC_NHBYTES - ctx->partial_size > 0 ? | 570 | expand = VMAC_NHBYTES - ctx->partial_size > 0 ? |
571 | VMAC_NHBYTES - ctx->partial_size : 0; | 571 | VMAC_NHBYTES - ctx->partial_size : 0; |
572 | 572 | ||
573 | min = len < expand ? len : expand; | 573 | min = len < expand ? len : expand; |
574 | 574 | ||
575 | memcpy(ctx->partial + ctx->partial_size, p, min); | 575 | memcpy(ctx->partial + ctx->partial_size, p, min); |
576 | ctx->partial_size += min; | 576 | ctx->partial_size += min; |
577 | 577 | ||
578 | if (len < expand) | 578 | if (len < expand) |
579 | return 0; | 579 | return 0; |
580 | 580 | ||
581 | vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx); | 581 | vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx); |
582 | ctx->partial_size = 0; | 582 | ctx->partial_size = 0; |
583 | 583 | ||
584 | len -= expand; | 584 | len -= expand; |
585 | p += expand; | 585 | p += expand; |
586 | 586 | ||
587 | if (len % VMAC_NHBYTES) { | 587 | if (len % VMAC_NHBYTES) { |
588 | memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), | 588 | memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), |
589 | len % VMAC_NHBYTES); | 589 | len % VMAC_NHBYTES); |
590 | ctx->partial_size = len % VMAC_NHBYTES; | 590 | ctx->partial_size = len % VMAC_NHBYTES; |
591 | } | 591 | } |
592 | 592 | ||
593 | vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx); | 593 | vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx); |
594 | 594 | ||
595 | return 0; | 595 | return 0; |
596 | } | 596 | } |
597 | 597 | ||
598 | static int vmac_final(struct shash_desc *pdesc, u8 *out) | 598 | static int vmac_final(struct shash_desc *pdesc, u8 *out) |
599 | { | 599 | { |
600 | struct crypto_shash *parent = pdesc->tfm; | 600 | struct crypto_shash *parent = pdesc->tfm; |
601 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | 601 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); |
602 | vmac_t mac; | 602 | vmac_t mac; |
603 | u8 nonce[16] = {}; | 603 | u8 nonce[16] = {}; |
604 | 604 | ||
605 | /* vmac() ends up accessing outside the array bounds that | 605 | /* vmac() ends up accessing outside the array bounds that |
606 | * we specify. In appears to access up to the next 2-word | 606 | * we specify. In appears to access up to the next 2-word |
607 | * boundary. We'll just be uber cautious and zero the | 607 | * boundary. We'll just be uber cautious and zero the |
608 | * unwritten bytes in the buffer. | 608 | * unwritten bytes in the buffer. |
609 | */ | 609 | */ |
610 | if (ctx->partial_size) { | 610 | if (ctx->partial_size) { |
611 | memset(ctx->partial + ctx->partial_size, 0, | 611 | memset(ctx->partial + ctx->partial_size, 0, |
612 | VMAC_NHBYTES - ctx->partial_size); | 612 | VMAC_NHBYTES - ctx->partial_size); |
613 | } | 613 | } |
614 | mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); | 614 | mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); |
615 | memcpy(out, &mac, sizeof(vmac_t)); | 615 | memcpy(out, &mac, sizeof(vmac_t)); |
616 | memzero_explicit(&mac, sizeof(vmac_t)); | 616 | memzero_explicit(&mac, sizeof(vmac_t)); |
617 | memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); | 617 | memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); |
618 | ctx->partial_size = 0; | 618 | ctx->partial_size = 0; |
619 | return 0; | 619 | return 0; |
620 | } | 620 | } |
621 | 621 | ||
622 | static int vmac_init_tfm(struct crypto_tfm *tfm) | 622 | static int vmac_init_tfm(struct crypto_tfm *tfm) |
623 | { | 623 | { |
624 | struct crypto_cipher *cipher; | 624 | struct crypto_cipher *cipher; |
625 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 625 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
626 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 626 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
627 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); | 627 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); |
628 | 628 | ||
629 | cipher = crypto_spawn_cipher(spawn); | 629 | cipher = crypto_spawn_cipher(spawn); |
630 | if (IS_ERR(cipher)) | 630 | if (IS_ERR(cipher)) |
631 | return PTR_ERR(cipher); | 631 | return PTR_ERR(cipher); |
632 | 632 | ||
633 | ctx->child = cipher; | 633 | ctx->child = cipher; |
634 | return 0; | 634 | return 0; |
635 | } | 635 | } |
636 | 636 | ||
637 | static void vmac_exit_tfm(struct crypto_tfm *tfm) | 637 | static void vmac_exit_tfm(struct crypto_tfm *tfm) |
638 | { | 638 | { |
639 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); | 639 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); |
640 | crypto_free_cipher(ctx->child); | 640 | crypto_free_cipher(ctx->child); |
641 | } | 641 | } |
642 | 642 | ||
643 | static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) | 643 | static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) |
644 | { | 644 | { |
645 | struct shash_instance *inst; | 645 | struct shash_instance *inst; |
646 | struct crypto_alg *alg; | 646 | struct crypto_alg *alg; |
647 | int err; | 647 | int err; |
648 | 648 | ||
649 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); | 649 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
650 | if (err) | 650 | if (err) |
651 | return err; | 651 | return err; |
652 | 652 | ||
653 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 653 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
654 | CRYPTO_ALG_TYPE_MASK); | 654 | CRYPTO_ALG_TYPE_MASK); |
655 | if (IS_ERR(alg)) | 655 | if (IS_ERR(alg)) |
656 | return PTR_ERR(alg); | 656 | return PTR_ERR(alg); |
657 | 657 | ||
658 | inst = shash_alloc_instance("vmac", alg); | 658 | inst = shash_alloc_instance("vmac", alg); |
659 | err = PTR_ERR(inst); | 659 | err = PTR_ERR(inst); |
660 | if (IS_ERR(inst)) | 660 | if (IS_ERR(inst)) |
661 | goto out_put_alg; | 661 | goto out_put_alg; |
662 | 662 | ||
663 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, | 663 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, |
664 | shash_crypto_instance(inst), | 664 | shash_crypto_instance(inst), |
665 | CRYPTO_ALG_TYPE_MASK); | 665 | CRYPTO_ALG_TYPE_MASK); |
666 | if (err) | 666 | if (err) |
667 | goto out_free_inst; | 667 | goto out_free_inst; |
668 | 668 | ||
669 | inst->alg.base.cra_priority = alg->cra_priority; | 669 | inst->alg.base.cra_priority = alg->cra_priority; |
670 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | 670 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
671 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | 671 | inst->alg.base.cra_alignmask = alg->cra_alignmask; |
672 | 672 | ||
673 | inst->alg.digestsize = sizeof(vmac_t); | 673 | inst->alg.digestsize = sizeof(vmac_t); |
674 | inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); | 674 | inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); |
675 | inst->alg.base.cra_init = vmac_init_tfm; | 675 | inst->alg.base.cra_init = vmac_init_tfm; |
676 | inst->alg.base.cra_exit = vmac_exit_tfm; | 676 | inst->alg.base.cra_exit = vmac_exit_tfm; |
677 | 677 | ||
678 | inst->alg.init = vmac_init; | 678 | inst->alg.init = vmac_init; |
679 | inst->alg.update = vmac_update; | 679 | inst->alg.update = vmac_update; |
680 | inst->alg.final = vmac_final; | 680 | inst->alg.final = vmac_final; |
681 | inst->alg.setkey = vmac_setkey; | 681 | inst->alg.setkey = vmac_setkey; |
682 | 682 | ||
683 | err = shash_register_instance(tmpl, inst); | 683 | err = shash_register_instance(tmpl, inst); |
684 | if (err) { | 684 | if (err) { |
685 | out_free_inst: | 685 | out_free_inst: |
686 | shash_free_instance(shash_crypto_instance(inst)); | 686 | shash_free_instance(shash_crypto_instance(inst)); |
687 | } | 687 | } |
688 | 688 | ||
689 | out_put_alg: | 689 | out_put_alg: |
690 | crypto_mod_put(alg); | 690 | crypto_mod_put(alg); |
691 | return err; | 691 | return err; |
692 | } | 692 | } |
693 | 693 | ||
694 | static struct crypto_template vmac_tmpl = { | 694 | static struct crypto_template vmac_tmpl = { |
695 | .name = "vmac", | 695 | .name = "vmac", |
696 | .create = vmac_create, | 696 | .create = vmac_create, |
697 | .free = shash_free_instance, | 697 | .free = shash_free_instance, |
698 | .module = THIS_MODULE, | 698 | .module = THIS_MODULE, |
699 | }; | 699 | }; |
700 | 700 | ||
701 | static int __init vmac_module_init(void) | 701 | static int __init vmac_module_init(void) |
702 | { | 702 | { |
703 | return crypto_register_template(&vmac_tmpl); | 703 | return crypto_register_template(&vmac_tmpl); |
704 | } | 704 | } |
705 | 705 | ||
706 | static void __exit vmac_module_exit(void) | 706 | static void __exit vmac_module_exit(void) |
707 | { | 707 | { |
708 | crypto_unregister_template(&vmac_tmpl); | 708 | crypto_unregister_template(&vmac_tmpl); |
709 | } | 709 | } |
710 | 710 | ||
711 | module_init(vmac_module_init); | 711 | module_init(vmac_module_init); |
712 | module_exit(vmac_module_exit); | 712 | module_exit(vmac_module_exit); |
713 | 713 | ||
714 | MODULE_LICENSE("GPL"); | 714 | MODULE_LICENSE("GPL"); |
715 | MODULE_DESCRIPTION("VMAC hash algorithm"); | 715 | MODULE_DESCRIPTION("VMAC hash algorithm"); |
716 | MODULE_ALIAS_CRYPTO("vmac"); | ||
716 | 717 |
crypto/xcbc.c
1 | /* | 1 | /* |
2 | * Copyright (C)2006 USAGI/WIDE Project | 2 | * Copyright (C)2006 USAGI/WIDE Project |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or | 6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. | 7 | * (at your option) any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | * | 17 | * |
18 | * Author: | 18 | * Author: |
19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> | 19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <crypto/internal/hash.h> | 22 | #include <crypto/internal/hash.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | 26 | ||
27 | static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, | 27 | static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, |
28 | 0x02020202, 0x02020202, 0x02020202, 0x02020202, | 28 | 0x02020202, 0x02020202, 0x02020202, 0x02020202, |
29 | 0x03030303, 0x03030303, 0x03030303, 0x03030303}; | 29 | 0x03030303, 0x03030303, 0x03030303, 0x03030303}; |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * +------------------------ | 32 | * +------------------------ |
33 | * | <parent tfm> | 33 | * | <parent tfm> |
34 | * +------------------------ | 34 | * +------------------------ |
35 | * | xcbc_tfm_ctx | 35 | * | xcbc_tfm_ctx |
36 | * +------------------------ | 36 | * +------------------------ |
37 | * | consts (block size * 2) | 37 | * | consts (block size * 2) |
38 | * +------------------------ | 38 | * +------------------------ |
39 | */ | 39 | */ |
40 | struct xcbc_tfm_ctx { | 40 | struct xcbc_tfm_ctx { |
41 | struct crypto_cipher *child; | 41 | struct crypto_cipher *child; |
42 | u8 ctx[]; | 42 | u8 ctx[]; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * +------------------------ | 46 | * +------------------------ |
47 | * | <shash desc> | 47 | * | <shash desc> |
48 | * +------------------------ | 48 | * +------------------------ |
49 | * | xcbc_desc_ctx | 49 | * | xcbc_desc_ctx |
50 | * +------------------------ | 50 | * +------------------------ |
51 | * | odds (block size) | 51 | * | odds (block size) |
52 | * +------------------------ | 52 | * +------------------------ |
53 | * | prev (block size) | 53 | * | prev (block size) |
54 | * +------------------------ | 54 | * +------------------------ |
55 | */ | 55 | */ |
56 | struct xcbc_desc_ctx { | 56 | struct xcbc_desc_ctx { |
57 | unsigned int len; | 57 | unsigned int len; |
58 | u8 ctx[]; | 58 | u8 ctx[]; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, | 61 | static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, |
62 | const u8 *inkey, unsigned int keylen) | 62 | const u8 *inkey, unsigned int keylen) |
63 | { | 63 | { |
64 | unsigned long alignmask = crypto_shash_alignmask(parent); | 64 | unsigned long alignmask = crypto_shash_alignmask(parent); |
65 | struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); | 65 | struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); |
66 | int bs = crypto_shash_blocksize(parent); | 66 | int bs = crypto_shash_blocksize(parent); |
67 | u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | 67 | u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); |
68 | int err = 0; | 68 | int err = 0; |
69 | u8 key1[bs]; | 69 | u8 key1[bs]; |
70 | 70 | ||
71 | if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) | 71 | if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) |
72 | return err; | 72 | return err; |
73 | 73 | ||
74 | crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); | 74 | crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); |
75 | crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); | 75 | crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); |
76 | crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); | 76 | crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); |
77 | 77 | ||
78 | return crypto_cipher_setkey(ctx->child, key1, bs); | 78 | return crypto_cipher_setkey(ctx->child, key1, bs); |
79 | 79 | ||
80 | } | 80 | } |
81 | 81 | ||
82 | static int crypto_xcbc_digest_init(struct shash_desc *pdesc) | 82 | static int crypto_xcbc_digest_init(struct shash_desc *pdesc) |
83 | { | 83 | { |
84 | unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); | 84 | unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); |
85 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); | 85 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
86 | int bs = crypto_shash_blocksize(pdesc->tfm); | 86 | int bs = crypto_shash_blocksize(pdesc->tfm); |
87 | u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; | 87 | u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; |
88 | 88 | ||
89 | ctx->len = 0; | 89 | ctx->len = 0; |
90 | memset(prev, 0, bs); | 90 | memset(prev, 0, bs); |
91 | 91 | ||
92 | return 0; | 92 | return 0; |
93 | } | 93 | } |
94 | 94 | ||
95 | static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, | 95 | static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, |
96 | unsigned int len) | 96 | unsigned int len) |
97 | { | 97 | { |
98 | struct crypto_shash *parent = pdesc->tfm; | 98 | struct crypto_shash *parent = pdesc->tfm; |
99 | unsigned long alignmask = crypto_shash_alignmask(parent); | 99 | unsigned long alignmask = crypto_shash_alignmask(parent); |
100 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); | 100 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); |
101 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); | 101 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
102 | struct crypto_cipher *tfm = tctx->child; | 102 | struct crypto_cipher *tfm = tctx->child; |
103 | int bs = crypto_shash_blocksize(parent); | 103 | int bs = crypto_shash_blocksize(parent); |
104 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | 104 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); |
105 | u8 *prev = odds + bs; | 105 | u8 *prev = odds + bs; |
106 | 106 | ||
107 | /* checking the data can fill the block */ | 107 | /* checking the data can fill the block */ |
108 | if ((ctx->len + len) <= bs) { | 108 | if ((ctx->len + len) <= bs) { |
109 | memcpy(odds + ctx->len, p, len); | 109 | memcpy(odds + ctx->len, p, len); |
110 | ctx->len += len; | 110 | ctx->len += len; |
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | /* filling odds with new data and encrypting it */ | 114 | /* filling odds with new data and encrypting it */ |
115 | memcpy(odds + ctx->len, p, bs - ctx->len); | 115 | memcpy(odds + ctx->len, p, bs - ctx->len); |
116 | len -= bs - ctx->len; | 116 | len -= bs - ctx->len; |
117 | p += bs - ctx->len; | 117 | p += bs - ctx->len; |
118 | 118 | ||
119 | crypto_xor(prev, odds, bs); | 119 | crypto_xor(prev, odds, bs); |
120 | crypto_cipher_encrypt_one(tfm, prev, prev); | 120 | crypto_cipher_encrypt_one(tfm, prev, prev); |
121 | 121 | ||
122 | /* clearing the length */ | 122 | /* clearing the length */ |
123 | ctx->len = 0; | 123 | ctx->len = 0; |
124 | 124 | ||
125 | /* encrypting the rest of data */ | 125 | /* encrypting the rest of data */ |
126 | while (len > bs) { | 126 | while (len > bs) { |
127 | crypto_xor(prev, p, bs); | 127 | crypto_xor(prev, p, bs); |
128 | crypto_cipher_encrypt_one(tfm, prev, prev); | 128 | crypto_cipher_encrypt_one(tfm, prev, prev); |
129 | p += bs; | 129 | p += bs; |
130 | len -= bs; | 130 | len -= bs; |
131 | } | 131 | } |
132 | 132 | ||
133 | /* keeping the surplus of blocksize */ | 133 | /* keeping the surplus of blocksize */ |
134 | if (len) { | 134 | if (len) { |
135 | memcpy(odds, p, len); | 135 | memcpy(odds, p, len); |
136 | ctx->len = len; | 136 | ctx->len = len; |
137 | } | 137 | } |
138 | 138 | ||
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | 141 | ||
142 | static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) | 142 | static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) |
143 | { | 143 | { |
144 | struct crypto_shash *parent = pdesc->tfm; | 144 | struct crypto_shash *parent = pdesc->tfm; |
145 | unsigned long alignmask = crypto_shash_alignmask(parent); | 145 | unsigned long alignmask = crypto_shash_alignmask(parent); |
146 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); | 146 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); |
147 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); | 147 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
148 | struct crypto_cipher *tfm = tctx->child; | 148 | struct crypto_cipher *tfm = tctx->child; |
149 | int bs = crypto_shash_blocksize(parent); | 149 | int bs = crypto_shash_blocksize(parent); |
150 | u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); | 150 | u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); |
151 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | 151 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); |
152 | u8 *prev = odds + bs; | 152 | u8 *prev = odds + bs; |
153 | unsigned int offset = 0; | 153 | unsigned int offset = 0; |
154 | 154 | ||
155 | if (ctx->len != bs) { | 155 | if (ctx->len != bs) { |
156 | unsigned int rlen; | 156 | unsigned int rlen; |
157 | u8 *p = odds + ctx->len; | 157 | u8 *p = odds + ctx->len; |
158 | 158 | ||
159 | *p = 0x80; | 159 | *p = 0x80; |
160 | p++; | 160 | p++; |
161 | 161 | ||
162 | rlen = bs - ctx->len -1; | 162 | rlen = bs - ctx->len -1; |
163 | if (rlen) | 163 | if (rlen) |
164 | memset(p, 0, rlen); | 164 | memset(p, 0, rlen); |
165 | 165 | ||
166 | offset += bs; | 166 | offset += bs; |
167 | } | 167 | } |
168 | 168 | ||
169 | crypto_xor(prev, odds, bs); | 169 | crypto_xor(prev, odds, bs); |
170 | crypto_xor(prev, consts + offset, bs); | 170 | crypto_xor(prev, consts + offset, bs); |
171 | 171 | ||
172 | crypto_cipher_encrypt_one(tfm, out, prev); | 172 | crypto_cipher_encrypt_one(tfm, out, prev); |
173 | 173 | ||
174 | return 0; | 174 | return 0; |
175 | } | 175 | } |
176 | 176 | ||
177 | static int xcbc_init_tfm(struct crypto_tfm *tfm) | 177 | static int xcbc_init_tfm(struct crypto_tfm *tfm) |
178 | { | 178 | { |
179 | struct crypto_cipher *cipher; | 179 | struct crypto_cipher *cipher; |
180 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 180 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
181 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 181 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
182 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); | 182 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
183 | 183 | ||
184 | cipher = crypto_spawn_cipher(spawn); | 184 | cipher = crypto_spawn_cipher(spawn); |
185 | if (IS_ERR(cipher)) | 185 | if (IS_ERR(cipher)) |
186 | return PTR_ERR(cipher); | 186 | return PTR_ERR(cipher); |
187 | 187 | ||
188 | ctx->child = cipher; | 188 | ctx->child = cipher; |
189 | 189 | ||
190 | return 0; | 190 | return 0; |
191 | }; | 191 | }; |
192 | 192 | ||
193 | static void xcbc_exit_tfm(struct crypto_tfm *tfm) | 193 | static void xcbc_exit_tfm(struct crypto_tfm *tfm) |
194 | { | 194 | { |
195 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); | 195 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
196 | crypto_free_cipher(ctx->child); | 196 | crypto_free_cipher(ctx->child); |
197 | } | 197 | } |
198 | 198 | ||
199 | static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) | 199 | static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) |
200 | { | 200 | { |
201 | struct shash_instance *inst; | 201 | struct shash_instance *inst; |
202 | struct crypto_alg *alg; | 202 | struct crypto_alg *alg; |
203 | unsigned long alignmask; | 203 | unsigned long alignmask; |
204 | int err; | 204 | int err; |
205 | 205 | ||
206 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); | 206 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
207 | if (err) | 207 | if (err) |
208 | return err; | 208 | return err; |
209 | 209 | ||
210 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 210 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
211 | CRYPTO_ALG_TYPE_MASK); | 211 | CRYPTO_ALG_TYPE_MASK); |
212 | if (IS_ERR(alg)) | 212 | if (IS_ERR(alg)) |
213 | return PTR_ERR(alg); | 213 | return PTR_ERR(alg); |
214 | 214 | ||
215 | switch(alg->cra_blocksize) { | 215 | switch(alg->cra_blocksize) { |
216 | case 16: | 216 | case 16: |
217 | break; | 217 | break; |
218 | default: | 218 | default: |
219 | goto out_put_alg; | 219 | goto out_put_alg; |
220 | } | 220 | } |
221 | 221 | ||
222 | inst = shash_alloc_instance("xcbc", alg); | 222 | inst = shash_alloc_instance("xcbc", alg); |
223 | err = PTR_ERR(inst); | 223 | err = PTR_ERR(inst); |
224 | if (IS_ERR(inst)) | 224 | if (IS_ERR(inst)) |
225 | goto out_put_alg; | 225 | goto out_put_alg; |
226 | 226 | ||
227 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, | 227 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, |
228 | shash_crypto_instance(inst), | 228 | shash_crypto_instance(inst), |
229 | CRYPTO_ALG_TYPE_MASK); | 229 | CRYPTO_ALG_TYPE_MASK); |
230 | if (err) | 230 | if (err) |
231 | goto out_free_inst; | 231 | goto out_free_inst; |
232 | 232 | ||
233 | alignmask = alg->cra_alignmask | 3; | 233 | alignmask = alg->cra_alignmask | 3; |
234 | inst->alg.base.cra_alignmask = alignmask; | 234 | inst->alg.base.cra_alignmask = alignmask; |
235 | inst->alg.base.cra_priority = alg->cra_priority; | 235 | inst->alg.base.cra_priority = alg->cra_priority; |
236 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | 236 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
237 | 237 | ||
238 | inst->alg.digestsize = alg->cra_blocksize; | 238 | inst->alg.digestsize = alg->cra_blocksize; |
239 | inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), | 239 | inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), |
240 | crypto_tfm_ctx_alignment()) + | 240 | crypto_tfm_ctx_alignment()) + |
241 | (alignmask & | 241 | (alignmask & |
242 | ~(crypto_tfm_ctx_alignment() - 1)) + | 242 | ~(crypto_tfm_ctx_alignment() - 1)) + |
243 | alg->cra_blocksize * 2; | 243 | alg->cra_blocksize * 2; |
244 | 244 | ||
245 | inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), | 245 | inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), |
246 | alignmask + 1) + | 246 | alignmask + 1) + |
247 | alg->cra_blocksize * 2; | 247 | alg->cra_blocksize * 2; |
248 | inst->alg.base.cra_init = xcbc_init_tfm; | 248 | inst->alg.base.cra_init = xcbc_init_tfm; |
249 | inst->alg.base.cra_exit = xcbc_exit_tfm; | 249 | inst->alg.base.cra_exit = xcbc_exit_tfm; |
250 | 250 | ||
251 | inst->alg.init = crypto_xcbc_digest_init; | 251 | inst->alg.init = crypto_xcbc_digest_init; |
252 | inst->alg.update = crypto_xcbc_digest_update; | 252 | inst->alg.update = crypto_xcbc_digest_update; |
253 | inst->alg.final = crypto_xcbc_digest_final; | 253 | inst->alg.final = crypto_xcbc_digest_final; |
254 | inst->alg.setkey = crypto_xcbc_digest_setkey; | 254 | inst->alg.setkey = crypto_xcbc_digest_setkey; |
255 | 255 | ||
256 | err = shash_register_instance(tmpl, inst); | 256 | err = shash_register_instance(tmpl, inst); |
257 | if (err) { | 257 | if (err) { |
258 | out_free_inst: | 258 | out_free_inst: |
259 | shash_free_instance(shash_crypto_instance(inst)); | 259 | shash_free_instance(shash_crypto_instance(inst)); |
260 | } | 260 | } |
261 | 261 | ||
262 | out_put_alg: | 262 | out_put_alg: |
263 | crypto_mod_put(alg); | 263 | crypto_mod_put(alg); |
264 | return err; | 264 | return err; |
265 | } | 265 | } |
266 | 266 | ||
267 | static struct crypto_template crypto_xcbc_tmpl = { | 267 | static struct crypto_template crypto_xcbc_tmpl = { |
268 | .name = "xcbc", | 268 | .name = "xcbc", |
269 | .create = xcbc_create, | 269 | .create = xcbc_create, |
270 | .free = shash_free_instance, | 270 | .free = shash_free_instance, |
271 | .module = THIS_MODULE, | 271 | .module = THIS_MODULE, |
272 | }; | 272 | }; |
273 | 273 | ||
274 | static int __init crypto_xcbc_module_init(void) | 274 | static int __init crypto_xcbc_module_init(void) |
275 | { | 275 | { |
276 | return crypto_register_template(&crypto_xcbc_tmpl); | 276 | return crypto_register_template(&crypto_xcbc_tmpl); |
277 | } | 277 | } |
278 | 278 | ||
279 | static void __exit crypto_xcbc_module_exit(void) | 279 | static void __exit crypto_xcbc_module_exit(void) |
280 | { | 280 | { |
281 | crypto_unregister_template(&crypto_xcbc_tmpl); | 281 | crypto_unregister_template(&crypto_xcbc_tmpl); |
282 | } | 282 | } |
283 | 283 | ||
284 | module_init(crypto_xcbc_module_init); | 284 | module_init(crypto_xcbc_module_init); |
285 | module_exit(crypto_xcbc_module_exit); | 285 | module_exit(crypto_xcbc_module_exit); |
286 | 286 | ||
287 | MODULE_LICENSE("GPL"); | 287 | MODULE_LICENSE("GPL"); |
288 | MODULE_DESCRIPTION("XCBC keyed hash algorithm"); | 288 | MODULE_DESCRIPTION("XCBC keyed hash algorithm"); |
289 | MODULE_ALIAS_CRYPTO("xcbc"); | ||
289 | 290 |
crypto/xts.c
1 | /* XTS: as defined in IEEE1619/D16 | 1 | /* XTS: as defined in IEEE1619/D16 |
2 | * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf | 2 | * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf |
3 | * (sector sizes which are not a multiple of 16 bytes are, | 3 | * (sector sizes which are not a multiple of 16 bytes are, |
4 | * however currently unsupported) | 4 | * however currently unsupported) |
5 | * | 5 | * |
6 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> | 6 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> |
7 | * | 7 | * |
8 | * Based om ecb.c | 8 | * Based om ecb.c |
9 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 9 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the Free | 12 | * under the terms of the GNU General Public License as published by the Free |
13 | * Software Foundation; either version 2 of the License, or (at your option) | 13 | * Software Foundation; either version 2 of the License, or (at your option) |
14 | * any later version. | 14 | * any later version. |
15 | */ | 15 | */ |
16 | #include <crypto/algapi.h> | 16 | #include <crypto/algapi.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/scatterlist.h> | 21 | #include <linux/scatterlist.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | 23 | ||
24 | #include <crypto/xts.h> | 24 | #include <crypto/xts.h> |
25 | #include <crypto/b128ops.h> | 25 | #include <crypto/b128ops.h> |
26 | #include <crypto/gf128mul.h> | 26 | #include <crypto/gf128mul.h> |
27 | 27 | ||
28 | struct priv { | 28 | struct priv { |
29 | struct crypto_cipher *child; | 29 | struct crypto_cipher *child; |
30 | struct crypto_cipher *tweak; | 30 | struct crypto_cipher *tweak; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static int setkey(struct crypto_tfm *parent, const u8 *key, | 33 | static int setkey(struct crypto_tfm *parent, const u8 *key, |
34 | unsigned int keylen) | 34 | unsigned int keylen) |
35 | { | 35 | { |
36 | struct priv *ctx = crypto_tfm_ctx(parent); | 36 | struct priv *ctx = crypto_tfm_ctx(parent); |
37 | struct crypto_cipher *child = ctx->tweak; | 37 | struct crypto_cipher *child = ctx->tweak; |
38 | u32 *flags = &parent->crt_flags; | 38 | u32 *flags = &parent->crt_flags; |
39 | int err; | 39 | int err; |
40 | 40 | ||
41 | /* key consists of keys of equal size concatenated, therefore | 41 | /* key consists of keys of equal size concatenated, therefore |
42 | * the length must be even */ | 42 | * the length must be even */ |
43 | if (keylen % 2) { | 43 | if (keylen % 2) { |
44 | /* tell the user why there was an error */ | 44 | /* tell the user why there was an error */ |
45 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 45 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
46 | return -EINVAL; | 46 | return -EINVAL; |
47 | } | 47 | } |
48 | 48 | ||
49 | /* we need two cipher instances: one to compute the initial 'tweak' | 49 | /* we need two cipher instances: one to compute the initial 'tweak' |
50 | * by encrypting the IV (usually the 'plain' iv) and the other | 50 | * by encrypting the IV (usually the 'plain' iv) and the other |
51 | * one to encrypt and decrypt the data */ | 51 | * one to encrypt and decrypt the data */ |
52 | 52 | ||
53 | /* tweak cipher, uses Key2 i.e. the second half of *key */ | 53 | /* tweak cipher, uses Key2 i.e. the second half of *key */ |
54 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 54 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
55 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 55 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & |
56 | CRYPTO_TFM_REQ_MASK); | 56 | CRYPTO_TFM_REQ_MASK); |
57 | err = crypto_cipher_setkey(child, key + keylen/2, keylen/2); | 57 | err = crypto_cipher_setkey(child, key + keylen/2, keylen/2); |
58 | if (err) | 58 | if (err) |
59 | return err; | 59 | return err; |
60 | 60 | ||
61 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 61 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & |
62 | CRYPTO_TFM_RES_MASK); | 62 | CRYPTO_TFM_RES_MASK); |
63 | 63 | ||
64 | child = ctx->child; | 64 | child = ctx->child; |
65 | 65 | ||
66 | /* data cipher, uses Key1 i.e. the first half of *key */ | 66 | /* data cipher, uses Key1 i.e. the first half of *key */ |
67 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 67 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
68 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 68 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & |
69 | CRYPTO_TFM_REQ_MASK); | 69 | CRYPTO_TFM_REQ_MASK); |
70 | err = crypto_cipher_setkey(child, key, keylen/2); | 70 | err = crypto_cipher_setkey(child, key, keylen/2); |
71 | if (err) | 71 | if (err) |
72 | return err; | 72 | return err; |
73 | 73 | ||
74 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 74 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & |
75 | CRYPTO_TFM_RES_MASK); | 75 | CRYPTO_TFM_RES_MASK); |
76 | 76 | ||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | 79 | ||
80 | struct sinfo { | 80 | struct sinfo { |
81 | be128 *t; | 81 | be128 *t; |
82 | struct crypto_tfm *tfm; | 82 | struct crypto_tfm *tfm; |
83 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); | 83 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); |
84 | }; | 84 | }; |
85 | 85 | ||
86 | static inline void xts_round(struct sinfo *s, void *dst, const void *src) | 86 | static inline void xts_round(struct sinfo *s, void *dst, const void *src) |
87 | { | 87 | { |
88 | be128_xor(dst, s->t, src); /* PP <- T xor P */ | 88 | be128_xor(dst, s->t, src); /* PP <- T xor P */ |
89 | s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ | 89 | s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ |
90 | be128_xor(dst, dst, s->t); /* C <- T xor CC */ | 90 | be128_xor(dst, dst, s->t); /* C <- T xor CC */ |
91 | } | 91 | } |
92 | 92 | ||
93 | static int crypt(struct blkcipher_desc *d, | 93 | static int crypt(struct blkcipher_desc *d, |
94 | struct blkcipher_walk *w, struct priv *ctx, | 94 | struct blkcipher_walk *w, struct priv *ctx, |
95 | void (*tw)(struct crypto_tfm *, u8 *, const u8 *), | 95 | void (*tw)(struct crypto_tfm *, u8 *, const u8 *), |
96 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) | 96 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) |
97 | { | 97 | { |
98 | int err; | 98 | int err; |
99 | unsigned int avail; | 99 | unsigned int avail; |
100 | const int bs = XTS_BLOCK_SIZE; | 100 | const int bs = XTS_BLOCK_SIZE; |
101 | struct sinfo s = { | 101 | struct sinfo s = { |
102 | .tfm = crypto_cipher_tfm(ctx->child), | 102 | .tfm = crypto_cipher_tfm(ctx->child), |
103 | .fn = fn | 103 | .fn = fn |
104 | }; | 104 | }; |
105 | u8 *wsrc; | 105 | u8 *wsrc; |
106 | u8 *wdst; | 106 | u8 *wdst; |
107 | 107 | ||
108 | err = blkcipher_walk_virt(d, w); | 108 | err = blkcipher_walk_virt(d, w); |
109 | if (!w->nbytes) | 109 | if (!w->nbytes) |
110 | return err; | 110 | return err; |
111 | 111 | ||
112 | s.t = (be128 *)w->iv; | 112 | s.t = (be128 *)w->iv; |
113 | avail = w->nbytes; | 113 | avail = w->nbytes; |
114 | 114 | ||
115 | wsrc = w->src.virt.addr; | 115 | wsrc = w->src.virt.addr; |
116 | wdst = w->dst.virt.addr; | 116 | wdst = w->dst.virt.addr; |
117 | 117 | ||
118 | /* calculate first value of T */ | 118 | /* calculate first value of T */ |
119 | tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv); | 119 | tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv); |
120 | 120 | ||
121 | goto first; | 121 | goto first; |
122 | 122 | ||
123 | for (;;) { | 123 | for (;;) { |
124 | do { | 124 | do { |
125 | gf128mul_x_ble(s.t, s.t); | 125 | gf128mul_x_ble(s.t, s.t); |
126 | 126 | ||
127 | first: | 127 | first: |
128 | xts_round(&s, wdst, wsrc); | 128 | xts_round(&s, wdst, wsrc); |
129 | 129 | ||
130 | wsrc += bs; | 130 | wsrc += bs; |
131 | wdst += bs; | 131 | wdst += bs; |
132 | } while ((avail -= bs) >= bs); | 132 | } while ((avail -= bs) >= bs); |
133 | 133 | ||
134 | err = blkcipher_walk_done(d, w, avail); | 134 | err = blkcipher_walk_done(d, w, avail); |
135 | if (!w->nbytes) | 135 | if (!w->nbytes) |
136 | break; | 136 | break; |
137 | 137 | ||
138 | avail = w->nbytes; | 138 | avail = w->nbytes; |
139 | 139 | ||
140 | wsrc = w->src.virt.addr; | 140 | wsrc = w->src.virt.addr; |
141 | wdst = w->dst.virt.addr; | 141 | wdst = w->dst.virt.addr; |
142 | } | 142 | } |
143 | 143 | ||
144 | return err; | 144 | return err; |
145 | } | 145 | } |
146 | 146 | ||
147 | static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 147 | static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
148 | struct scatterlist *src, unsigned int nbytes) | 148 | struct scatterlist *src, unsigned int nbytes) |
149 | { | 149 | { |
150 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 150 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); |
151 | struct blkcipher_walk w; | 151 | struct blkcipher_walk w; |
152 | 152 | ||
153 | blkcipher_walk_init(&w, dst, src, nbytes); | 153 | blkcipher_walk_init(&w, dst, src, nbytes); |
154 | return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, | 154 | return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, |
155 | crypto_cipher_alg(ctx->child)->cia_encrypt); | 155 | crypto_cipher_alg(ctx->child)->cia_encrypt); |
156 | } | 156 | } |
157 | 157 | ||
158 | static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 158 | static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
159 | struct scatterlist *src, unsigned int nbytes) | 159 | struct scatterlist *src, unsigned int nbytes) |
160 | { | 160 | { |
161 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 161 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); |
162 | struct blkcipher_walk w; | 162 | struct blkcipher_walk w; |
163 | 163 | ||
164 | blkcipher_walk_init(&w, dst, src, nbytes); | 164 | blkcipher_walk_init(&w, dst, src, nbytes); |
165 | return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, | 165 | return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, |
166 | crypto_cipher_alg(ctx->child)->cia_decrypt); | 166 | crypto_cipher_alg(ctx->child)->cia_decrypt); |
167 | } | 167 | } |
168 | 168 | ||
169 | int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, | 169 | int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
170 | struct scatterlist *ssrc, unsigned int nbytes, | 170 | struct scatterlist *ssrc, unsigned int nbytes, |
171 | struct xts_crypt_req *req) | 171 | struct xts_crypt_req *req) |
172 | { | 172 | { |
173 | const unsigned int bsize = XTS_BLOCK_SIZE; | 173 | const unsigned int bsize = XTS_BLOCK_SIZE; |
174 | const unsigned int max_blks = req->tbuflen / bsize; | 174 | const unsigned int max_blks = req->tbuflen / bsize; |
175 | struct blkcipher_walk walk; | 175 | struct blkcipher_walk walk; |
176 | unsigned int nblocks; | 176 | unsigned int nblocks; |
177 | be128 *src, *dst, *t; | 177 | be128 *src, *dst, *t; |
178 | be128 *t_buf = req->tbuf; | 178 | be128 *t_buf = req->tbuf; |
179 | int err, i; | 179 | int err, i; |
180 | 180 | ||
181 | BUG_ON(max_blks < 1); | 181 | BUG_ON(max_blks < 1); |
182 | 182 | ||
183 | blkcipher_walk_init(&walk, sdst, ssrc, nbytes); | 183 | blkcipher_walk_init(&walk, sdst, ssrc, nbytes); |
184 | 184 | ||
185 | err = blkcipher_walk_virt(desc, &walk); | 185 | err = blkcipher_walk_virt(desc, &walk); |
186 | nbytes = walk.nbytes; | 186 | nbytes = walk.nbytes; |
187 | if (!nbytes) | 187 | if (!nbytes) |
188 | return err; | 188 | return err; |
189 | 189 | ||
190 | nblocks = min(nbytes / bsize, max_blks); | 190 | nblocks = min(nbytes / bsize, max_blks); |
191 | src = (be128 *)walk.src.virt.addr; | 191 | src = (be128 *)walk.src.virt.addr; |
192 | dst = (be128 *)walk.dst.virt.addr; | 192 | dst = (be128 *)walk.dst.virt.addr; |
193 | 193 | ||
194 | /* calculate first value of T */ | 194 | /* calculate first value of T */ |
195 | req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); | 195 | req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); |
196 | 196 | ||
197 | i = 0; | 197 | i = 0; |
198 | goto first; | 198 | goto first; |
199 | 199 | ||
200 | for (;;) { | 200 | for (;;) { |
201 | do { | 201 | do { |
202 | for (i = 0; i < nblocks; i++) { | 202 | for (i = 0; i < nblocks; i++) { |
203 | gf128mul_x_ble(&t_buf[i], t); | 203 | gf128mul_x_ble(&t_buf[i], t); |
204 | first: | 204 | first: |
205 | t = &t_buf[i]; | 205 | t = &t_buf[i]; |
206 | 206 | ||
207 | /* PP <- T xor P */ | 207 | /* PP <- T xor P */ |
208 | be128_xor(dst + i, t, src + i); | 208 | be128_xor(dst + i, t, src + i); |
209 | } | 209 | } |
210 | 210 | ||
211 | /* CC <- E(Key2,PP) */ | 211 | /* CC <- E(Key2,PP) */ |
212 | req->crypt_fn(req->crypt_ctx, (u8 *)dst, | 212 | req->crypt_fn(req->crypt_ctx, (u8 *)dst, |
213 | nblocks * bsize); | 213 | nblocks * bsize); |
214 | 214 | ||
215 | /* C <- T xor CC */ | 215 | /* C <- T xor CC */ |
216 | for (i = 0; i < nblocks; i++) | 216 | for (i = 0; i < nblocks; i++) |
217 | be128_xor(dst + i, dst + i, &t_buf[i]); | 217 | be128_xor(dst + i, dst + i, &t_buf[i]); |
218 | 218 | ||
219 | src += nblocks; | 219 | src += nblocks; |
220 | dst += nblocks; | 220 | dst += nblocks; |
221 | nbytes -= nblocks * bsize; | 221 | nbytes -= nblocks * bsize; |
222 | nblocks = min(nbytes / bsize, max_blks); | 222 | nblocks = min(nbytes / bsize, max_blks); |
223 | } while (nblocks > 0); | 223 | } while (nblocks > 0); |
224 | 224 | ||
225 | *(be128 *)walk.iv = *t; | 225 | *(be128 *)walk.iv = *t; |
226 | 226 | ||
227 | err = blkcipher_walk_done(desc, &walk, nbytes); | 227 | err = blkcipher_walk_done(desc, &walk, nbytes); |
228 | nbytes = walk.nbytes; | 228 | nbytes = walk.nbytes; |
229 | if (!nbytes) | 229 | if (!nbytes) |
230 | break; | 230 | break; |
231 | 231 | ||
232 | nblocks = min(nbytes / bsize, max_blks); | 232 | nblocks = min(nbytes / bsize, max_blks); |
233 | src = (be128 *)walk.src.virt.addr; | 233 | src = (be128 *)walk.src.virt.addr; |
234 | dst = (be128 *)walk.dst.virt.addr; | 234 | dst = (be128 *)walk.dst.virt.addr; |
235 | } | 235 | } |
236 | 236 | ||
237 | return err; | 237 | return err; |
238 | } | 238 | } |
239 | EXPORT_SYMBOL_GPL(xts_crypt); | 239 | EXPORT_SYMBOL_GPL(xts_crypt); |
240 | 240 | ||
241 | static int init_tfm(struct crypto_tfm *tfm) | 241 | static int init_tfm(struct crypto_tfm *tfm) |
242 | { | 242 | { |
243 | struct crypto_cipher *cipher; | 243 | struct crypto_cipher *cipher; |
244 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 244 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
245 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 245 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
246 | struct priv *ctx = crypto_tfm_ctx(tfm); | 246 | struct priv *ctx = crypto_tfm_ctx(tfm); |
247 | u32 *flags = &tfm->crt_flags; | 247 | u32 *flags = &tfm->crt_flags; |
248 | 248 | ||
249 | cipher = crypto_spawn_cipher(spawn); | 249 | cipher = crypto_spawn_cipher(spawn); |
250 | if (IS_ERR(cipher)) | 250 | if (IS_ERR(cipher)) |
251 | return PTR_ERR(cipher); | 251 | return PTR_ERR(cipher); |
252 | 252 | ||
253 | if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { | 253 | if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { |
254 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | 254 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; |
255 | crypto_free_cipher(cipher); | 255 | crypto_free_cipher(cipher); |
256 | return -EINVAL; | 256 | return -EINVAL; |
257 | } | 257 | } |
258 | 258 | ||
259 | ctx->child = cipher; | 259 | ctx->child = cipher; |
260 | 260 | ||
261 | cipher = crypto_spawn_cipher(spawn); | 261 | cipher = crypto_spawn_cipher(spawn); |
262 | if (IS_ERR(cipher)) { | 262 | if (IS_ERR(cipher)) { |
263 | crypto_free_cipher(ctx->child); | 263 | crypto_free_cipher(ctx->child); |
264 | return PTR_ERR(cipher); | 264 | return PTR_ERR(cipher); |
265 | } | 265 | } |
266 | 266 | ||
267 | /* this check isn't really needed, leave it here just in case */ | 267 | /* this check isn't really needed, leave it here just in case */ |
268 | if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { | 268 | if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { |
269 | crypto_free_cipher(cipher); | 269 | crypto_free_cipher(cipher); |
270 | crypto_free_cipher(ctx->child); | 270 | crypto_free_cipher(ctx->child); |
271 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | 271 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; |
272 | return -EINVAL; | 272 | return -EINVAL; |
273 | } | 273 | } |
274 | 274 | ||
275 | ctx->tweak = cipher; | 275 | ctx->tweak = cipher; |
276 | 276 | ||
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | static void exit_tfm(struct crypto_tfm *tfm) | 280 | static void exit_tfm(struct crypto_tfm *tfm) |
281 | { | 281 | { |
282 | struct priv *ctx = crypto_tfm_ctx(tfm); | 282 | struct priv *ctx = crypto_tfm_ctx(tfm); |
283 | crypto_free_cipher(ctx->child); | 283 | crypto_free_cipher(ctx->child); |
284 | crypto_free_cipher(ctx->tweak); | 284 | crypto_free_cipher(ctx->tweak); |
285 | } | 285 | } |
286 | 286 | ||
287 | static struct crypto_instance *alloc(struct rtattr **tb) | 287 | static struct crypto_instance *alloc(struct rtattr **tb) |
288 | { | 288 | { |
289 | struct crypto_instance *inst; | 289 | struct crypto_instance *inst; |
290 | struct crypto_alg *alg; | 290 | struct crypto_alg *alg; |
291 | int err; | 291 | int err; |
292 | 292 | ||
293 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 293 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); |
294 | if (err) | 294 | if (err) |
295 | return ERR_PTR(err); | 295 | return ERR_PTR(err); |
296 | 296 | ||
297 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 297 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
298 | CRYPTO_ALG_TYPE_MASK); | 298 | CRYPTO_ALG_TYPE_MASK); |
299 | if (IS_ERR(alg)) | 299 | if (IS_ERR(alg)) |
300 | return ERR_CAST(alg); | 300 | return ERR_CAST(alg); |
301 | 301 | ||
302 | inst = crypto_alloc_instance("xts", alg); | 302 | inst = crypto_alloc_instance("xts", alg); |
303 | if (IS_ERR(inst)) | 303 | if (IS_ERR(inst)) |
304 | goto out_put_alg; | 304 | goto out_put_alg; |
305 | 305 | ||
306 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 306 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; |
307 | inst->alg.cra_priority = alg->cra_priority; | 307 | inst->alg.cra_priority = alg->cra_priority; |
308 | inst->alg.cra_blocksize = alg->cra_blocksize; | 308 | inst->alg.cra_blocksize = alg->cra_blocksize; |
309 | 309 | ||
310 | if (alg->cra_alignmask < 7) | 310 | if (alg->cra_alignmask < 7) |
311 | inst->alg.cra_alignmask = 7; | 311 | inst->alg.cra_alignmask = 7; |
312 | else | 312 | else |
313 | inst->alg.cra_alignmask = alg->cra_alignmask; | 313 | inst->alg.cra_alignmask = alg->cra_alignmask; |
314 | 314 | ||
315 | inst->alg.cra_type = &crypto_blkcipher_type; | 315 | inst->alg.cra_type = &crypto_blkcipher_type; |
316 | 316 | ||
317 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 317 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; |
318 | inst->alg.cra_blkcipher.min_keysize = | 318 | inst->alg.cra_blkcipher.min_keysize = |
319 | 2 * alg->cra_cipher.cia_min_keysize; | 319 | 2 * alg->cra_cipher.cia_min_keysize; |
320 | inst->alg.cra_blkcipher.max_keysize = | 320 | inst->alg.cra_blkcipher.max_keysize = |
321 | 2 * alg->cra_cipher.cia_max_keysize; | 321 | 2 * alg->cra_cipher.cia_max_keysize; |
322 | 322 | ||
323 | inst->alg.cra_ctxsize = sizeof(struct priv); | 323 | inst->alg.cra_ctxsize = sizeof(struct priv); |
324 | 324 | ||
325 | inst->alg.cra_init = init_tfm; | 325 | inst->alg.cra_init = init_tfm; |
326 | inst->alg.cra_exit = exit_tfm; | 326 | inst->alg.cra_exit = exit_tfm; |
327 | 327 | ||
328 | inst->alg.cra_blkcipher.setkey = setkey; | 328 | inst->alg.cra_blkcipher.setkey = setkey; |
329 | inst->alg.cra_blkcipher.encrypt = encrypt; | 329 | inst->alg.cra_blkcipher.encrypt = encrypt; |
330 | inst->alg.cra_blkcipher.decrypt = decrypt; | 330 | inst->alg.cra_blkcipher.decrypt = decrypt; |
331 | 331 | ||
332 | out_put_alg: | 332 | out_put_alg: |
333 | crypto_mod_put(alg); | 333 | crypto_mod_put(alg); |
334 | return inst; | 334 | return inst; |
335 | } | 335 | } |
336 | 336 | ||
337 | static void free(struct crypto_instance *inst) | 337 | static void free(struct crypto_instance *inst) |
338 | { | 338 | { |
339 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 339 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
340 | kfree(inst); | 340 | kfree(inst); |
341 | } | 341 | } |
342 | 342 | ||
343 | static struct crypto_template crypto_tmpl = { | 343 | static struct crypto_template crypto_tmpl = { |
344 | .name = "xts", | 344 | .name = "xts", |
345 | .alloc = alloc, | 345 | .alloc = alloc, |
346 | .free = free, | 346 | .free = free, |
347 | .module = THIS_MODULE, | 347 | .module = THIS_MODULE, |
348 | }; | 348 | }; |
349 | 349 | ||
350 | static int __init crypto_module_init(void) | 350 | static int __init crypto_module_init(void) |
351 | { | 351 | { |
352 | return crypto_register_template(&crypto_tmpl); | 352 | return crypto_register_template(&crypto_tmpl); |
353 | } | 353 | } |
354 | 354 | ||
355 | static void __exit crypto_module_exit(void) | 355 | static void __exit crypto_module_exit(void) |
356 | { | 356 | { |
357 | crypto_unregister_template(&crypto_tmpl); | 357 | crypto_unregister_template(&crypto_tmpl); |
358 | } | 358 | } |
359 | 359 | ||
360 | module_init(crypto_module_init); | 360 | module_init(crypto_module_init); |
361 | module_exit(crypto_module_exit); | 361 | module_exit(crypto_module_exit); |
362 | 362 | ||
363 | MODULE_LICENSE("GPL"); | 363 | MODULE_LICENSE("GPL"); |
364 | MODULE_DESCRIPTION("XTS block cipher mode"); | 364 | MODULE_DESCRIPTION("XTS block cipher mode"); |
365 | MODULE_ALIAS_CRYPTO("xts"); | ||
365 | 366 |