Commit 5f565d12d2b003ffe232adbf1ca8084b1aba3895
1 parent
159a82241b
Exists in
v3.2_SMARCT335xPSP_04.06.00.11
and in
2 other branches
Add 0002-AM335x-OCF-Driver-for-Linux-3.patch that wasn't part of the official PS…
…P 04.06.00.11 release
Showing 23 changed files with 6988 additions and 0 deletions Side-by-side Diff
- crypto/Kconfig
- crypto/Makefile
- crypto/ocf/Config.in
- crypto/ocf/Kconfig
- crypto/ocf/Makefile
- crypto/ocf/criov.c
- crypto/ocf/crypto.c
- crypto/ocf/cryptodev.c
- crypto/ocf/cryptodev.h
- crypto/ocf/cryptosoft.c
- crypto/ocf/ocf-bench.c
- crypto/ocf/ocf-compat.h
- crypto/ocf/ocfnull/Makefile
- crypto/ocf/ocfnull/ocfnull.c
- crypto/ocf/random.c
- crypto/ocf/rndtest.c
- crypto/ocf/rndtest.h
- crypto/ocf/uio.h
- drivers/char/random.c
- fs/fcntl.c
- include/linux/miscdevice.h
- include/linux/random.h
- kernel/pid.c
crypto/Kconfig
crypto/Makefile
crypto/ocf/Config.in
1 | +############################################################################# | |
2 | + | |
3 | +mainmenu_option next_comment | |
4 | +comment 'OCF Configuration' | |
5 | +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF | |
6 | +dep_mbool ' enable fips RNG checks (fips check on RNG data before use)' \ | |
7 | + CONFIG_OCF_FIPS $CONFIG_OCF_OCF | |
8 | +dep_mbool ' enable harvesting entropy for /dev/random' \ | |
9 | + CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF | |
10 | +dep_tristate ' cryptodev (user space support)' \ | |
11 | + CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF | |
12 | +dep_tristate ' cryptosoft (software crypto engine)' \ | |
13 | + CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF | |
14 | +dep_tristate ' ocfnull (does no crypto)' \ | |
15 | + CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF | |
16 | +dep_tristate ' ocf-bench (HW crypto in-kernel benchmark)' \ | |
17 | + CONFIG_OCF_BENCH $CONFIG_OCF_OCF | |
18 | +endmenu | |
19 | + | |
20 | +############################################################################# |
crypto/ocf/Kconfig
1 | +menu "OCF Configuration" | |
2 | + | |
3 | +config OCF_OCF | |
4 | + tristate "OCF (Open Cryptograhic Framework)" | |
5 | + help | |
6 | + A linux port of the OpenBSD/FreeBSD crypto framework. | |
7 | + | |
8 | +config OCF_RANDOMHARVEST | |
9 | + bool "crypto random --- harvest entropy for /dev/random" | |
10 | + depends on OCF_OCF | |
11 | + help | |
12 | + Includes code to harvest random numbers from devices that support it. | |
13 | + | |
14 | +config OCF_FIPS | |
15 | + bool "enable fips RNG checks" | |
16 | + depends on OCF_OCF && OCF_RANDOMHARVEST | |
17 | + help | |
18 | + Run all RNG provided data through a fips check before | |
19 | + adding it /dev/random's entropy pool. | |
20 | + | |
21 | +config OCF_CRYPTODEV | |
22 | + tristate "cryptodev (user space support)" | |
23 | + depends on OCF_OCF | |
24 | + help | |
25 | + The user space API to access crypto hardware. | |
26 | + | |
27 | +config OCF_CRYPTOSOFT | |
28 | + tristate "cryptosoft (software crypto engine)" | |
29 | + depends on OCF_OCF | |
30 | + help | |
31 | + A software driver for the OCF framework that uses | |
32 | + the kernel CryptoAPI. | |
33 | + | |
34 | +config OCF_OCFNULL | |
35 | + tristate "ocfnull (fake crypto engine)" | |
36 | + depends on OCF_OCF | |
37 | + help | |
38 | + OCF driver for measuring ipsec overheads (does no crypto) | |
39 | + | |
40 | +config OCF_BENCH | |
41 | + tristate "ocf-bench (HW crypto in-kernel benchmark)" | |
42 | + depends on OCF_OCF | |
43 | + help | |
44 | + A very simple encryption test for the in-kernel interface | |
45 | + of OCF. Also includes code to benchmark the IXP Access library | |
46 | + for comparison. | |
47 | + | |
48 | +endmenu |
crypto/ocf/Makefile
1 | +# for SGlinux builds | |
2 | +-include $(ROOTDIR)/modules/.config | |
3 | + | |
4 | +OCF_OBJS = crypto.o criov.o | |
5 | + | |
6 | +ifdef CONFIG_OCF_RANDOMHARVEST | |
7 | + OCF_OBJS += random.o | |
8 | +endif | |
9 | + | |
10 | +ifdef CONFIG_OCF_FIPS | |
11 | + OCF_OBJS += rndtest.o | |
12 | +endif | |
13 | + | |
14 | +# Add in autoconf.h to get #defines for CONFIG_xxx | |
15 | +AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h | |
16 | +ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H))) | |
17 | + EXTRA_CFLAGS += -include $(AUTOCONF_H) | |
18 | + export EXTRA_CFLAGS | |
19 | +endif | |
20 | + | |
21 | +ifndef obj | |
22 | + obj ?= . | |
23 | + _obj = subdir | |
24 | + mod-subdirs := safe hifn ixp4xx talitos ocfnull | |
25 | + export-objs += crypto.o criov.o random.o | |
26 | + list-multi += ocf.o | |
27 | + _slash := | |
28 | +else | |
29 | + _obj = obj | |
30 | + _slash := / | |
31 | +endif | |
32 | + | |
33 | +EXTRA_CFLAGS += -I$(obj)/. | |
34 | + | |
35 | +obj-$(CONFIG_OCF_OCF) += ocf.o | |
36 | +obj-$(CONFIG_OCF_CRYPTODEV) += cryptodev.o | |
37 | +obj-$(CONFIG_OCF_CRYPTOSOFT) += cryptosoft.o | |
38 | +obj-$(CONFIG_OCF_BENCH) += ocf-bench.o | |
39 | + | |
40 | +$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash) | |
41 | + | |
42 | +ocf-objs := $(OCF_OBJS) | |
43 | + | |
44 | +dummy: | |
45 | + @echo "Please consult the README for how to build OCF." | |
46 | + @echo "If you can't wait then the following should do it:" | |
47 | + @echo "" | |
48 | + @echo " make ocf_modules" | |
49 | + @echo " sudo make ocf_install" | |
50 | + @echo "" | |
51 | + @exit 1 | |
52 | + | |
53 | +$(list-multi) dummy1: $(ocf-objs) | |
54 | + $(LD) -r -o $@ $(ocf-objs) | |
55 | + | |
56 | +.PHONY: | |
57 | +clean: | |
58 | + rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c | |
59 | + rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags | |
60 | + rm -f */modules.order */modules.builtin modules.order modules.builtin | |
61 | + | |
62 | +ifdef TOPDIR | |
63 | +-include $(TOPDIR)/Rules.make | |
64 | +endif | |
65 | + | |
66 | +# | |
67 | +# targets to build easily on the current machine | |
68 | +# | |
69 | + | |
70 | +ocf_make: | |
71 | + make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m | |
72 | + make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_CRYPTOSOFT=m | |
73 | + -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_BENCH=m | |
74 | + -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_OCFNULL=m | |
75 | + -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_HIFN=m | |
76 | + | |
77 | +ocf_modules: | |
78 | + $(MAKE) ocf_make OCF_TARGET=modules | |
79 | + | |
80 | +ocf_install: | |
81 | + $(MAKE) ocf_make OCF_TARGET="modules modules_install" | |
82 | + depmod | |
83 | + mkdir -p /usr/include/crypto | |
84 | + cp cryptodev.h /usr/include/crypto/. | |
85 | + | |
86 | +# | |
87 | +# generate full kernel patches for 2.4 and 2.6 kernels to make patching | |
88 | +# your kernel easier | |
89 | +# | |
90 | + | |
91 | +.PHONY: patch | |
92 | +patch: | |
93 | + patchbase=.; \ | |
94 | + [ -d $$patchbase/patches ] || patchbase=..; \ | |
95 | + patch=ocf-linux-base.patch; \ | |
96 | + patch24=ocf-linux-24.patch; \ | |
97 | + patch26=ocf-linux-26.patch; \ | |
98 | + patch3=ocf-linux-3.patch; \ | |
99 | + ( \ | |
100 | + find . -name Makefile; \ | |
101 | + find . -name Config.in; \ | |
102 | + find . -name Kconfig; \ | |
103 | + find . -name README; \ | |
104 | + find . -name '*.[ch]' | grep -v '.mod.c'; \ | |
105 | + ) | while read t; do \ | |
106 | + diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \ | |
107 | + done > $$patch; \ | |
108 | + cat $$patchbase/patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \ | |
109 | + cat $$patchbase/patches/linux-2.6.38-ocf.patch $$patch > $$patch26; \ | |
110 | + cat $$patchbase/patches/linux-3.2.1-ocf.patch $$patch > $$patch3; \ | |
111 | + | |
112 | + | |
113 | +# | |
114 | +# this target probably does nothing for anyone but me - davidm | |
115 | +# | |
116 | + | |
117 | +.PHONY: release | |
118 | +release: | |
119 | + REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \ | |
120 | + CURDIR=`pwd`; \ | |
121 | + rm -rf /tmp/ocf-linux-$$REL*; \ | |
122 | + mkdir -p $$RELDIR/ocf; \ | |
123 | + mkdir -p $$RELDIR/patches; \ | |
124 | + mkdir -p $$RELDIR/crypto-tools; \ | |
125 | + cp README* $$RELDIR/.; \ | |
126 | + cp patches/[!C]* $$RELDIR/patches/.; \ | |
127 | + cp tools/[!C]* $$RELDIR/crypto-tools/.; \ | |
128 | + cp -r [!C]* Config.in $$RELDIR/ocf/.; \ | |
129 | + rm -rf $$RELDIR/ocf/patches $$RELDIR/ocf/tools; \ | |
130 | + rm -f $$RELDIR/ocf/README*; \ | |
131 | + cp $$CURDIR/../../user/crypto-tools/[!C]* $$RELDIR/crypto-tools/.; \ | |
132 | + make -C $$RELDIR/crypto-tools clean; \ | |
133 | + make -C $$RELDIR/ocf clean; \ | |
134 | + find $$RELDIR/ocf -name CVS | xargs rm -rf; \ | |
135 | + cd $$RELDIR/..; \ | |
136 | + tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \ | |
137 | + gzip -9 ocf-linux-$$REL.tar |
crypto/ocf/criov.c
1 | +/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */ | |
2 | + | |
3 | +/* | |
4 | + * Linux port done by David McCullough <david_mccullough@mcafee.com> | |
5 | + * Copyright (C) 2006-2010 David McCullough | |
6 | + * Copyright (C) 2004-2005 Intel Corporation. | |
7 | + * The license and original author are listed below. | |
8 | + * | |
9 | + * Copyright (c) 1999 Theo de Raadt | |
10 | + * | |
11 | + * Redistribution and use in source and binary forms, with or without | |
12 | + * modification, are permitted provided that the following conditions | |
13 | + * are met: | |
14 | + * | |
15 | + * 1. Redistributions of source code must retain the above copyright | |
16 | + * notice, this list of conditions and the following disclaimer. | |
17 | + * 2. Redistributions in binary form must reproduce the above copyright | |
18 | + * notice, this list of conditions and the following disclaimer in the | |
19 | + * documentation and/or other materials provided with the distribution. | |
20 | + * 3. The name of the author may not be used to endorse or promote products | |
21 | + * derived from this software without specific prior written permission. | |
22 | + * | |
23 | + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
24 | + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
25 | + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
26 | + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
27 | + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
28 | + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
29 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
30 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
31 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
32 | + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
33 | + * | |
34 | +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $"); | |
35 | + */ | |
36 | + | |
37 | +#include <linux/version.h> | |
38 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
39 | +#include <linux/config.h> | |
40 | +#endif | |
41 | +#include <linux/module.h> | |
42 | +#include <linux/init.h> | |
43 | +#include <linux/slab.h> | |
44 | +#include <linux/uio.h> | |
45 | +#include <linux/skbuff.h> | |
46 | +#include <linux/kernel.h> | |
47 | +#include <linux/mm.h> | |
48 | +#include <asm/io.h> | |
49 | + | |
50 | +#include <uio.h> | |
51 | +#include <cryptodev.h> | |
52 | + | |
53 | +/* | |
54 | + * This macro is only for avoiding code duplication, as we need to skip | |
55 | + * given number of bytes in the same way in three functions below. | |
56 | + */ | |
57 | +#define CUIO_SKIP() do { \ | |
58 | + KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ | |
59 | + KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ | |
60 | + while (off > 0) { \ | |
61 | + KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \ | |
62 | + if (off < iov->iov_len) \ | |
63 | + break; \ | |
64 | + off -= iov->iov_len; \ | |
65 | + iol--; \ | |
66 | + iov++; \ | |
67 | + } \ | |
68 | +} while (0) | |
69 | + | |
70 | +void | |
71 | +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) | |
72 | +{ | |
73 | + struct iovec *iov = uio->uio_iov; | |
74 | + int iol = uio->uio_iovcnt; | |
75 | + unsigned count; | |
76 | + | |
77 | + CUIO_SKIP(); | |
78 | + while (len > 0) { | |
79 | + KASSERT(iol >= 0, ("%s: empty", __func__)); | |
80 | + count = min((int)(iov->iov_len - off), len); | |
81 | + memcpy(cp, ((caddr_t)iov->iov_base) + off, count); | |
82 | + len -= count; | |
83 | + cp += count; | |
84 | + off = 0; | |
85 | + iol--; | |
86 | + iov++; | |
87 | + } | |
88 | +} | |
89 | + | |
90 | +void | |
91 | +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp) | |
92 | +{ | |
93 | + struct iovec *iov = uio->uio_iov; | |
94 | + int iol = uio->uio_iovcnt; | |
95 | + unsigned count; | |
96 | + | |
97 | + CUIO_SKIP(); | |
98 | + while (len > 0) { | |
99 | + KASSERT(iol >= 0, ("%s: empty", __func__)); | |
100 | + count = min((int)(iov->iov_len - off), len); | |
101 | + memcpy(((caddr_t)iov->iov_base) + off, cp, count); | |
102 | + len -= count; | |
103 | + cp += count; | |
104 | + off = 0; | |
105 | + iol--; | |
106 | + iov++; | |
107 | + } | |
108 | +} | |
109 | + | |
110 | +/* | |
111 | + * Return a pointer to iov/offset of location in iovec list. | |
112 | + */ | |
113 | +struct iovec * | |
114 | +cuio_getptr(struct uio *uio, int loc, int *off) | |
115 | +{ | |
116 | + struct iovec *iov = uio->uio_iov; | |
117 | + int iol = uio->uio_iovcnt; | |
118 | + | |
119 | + while (loc >= 0) { | |
120 | + /* Normal end of search */ | |
121 | + if (loc < iov->iov_len) { | |
122 | + *off = loc; | |
123 | + return (iov); | |
124 | + } | |
125 | + | |
126 | + loc -= iov->iov_len; | |
127 | + if (iol == 0) { | |
128 | + if (loc == 0) { | |
129 | + /* Point at the end of valid data */ | |
130 | + *off = iov->iov_len; | |
131 | + return (iov); | |
132 | + } else | |
133 | + return (NULL); | |
134 | + } else { | |
135 | + iov++, iol--; | |
136 | + } | |
137 | + } | |
138 | + | |
139 | + return (NULL); | |
140 | +} | |
141 | + | |
142 | +EXPORT_SYMBOL(cuio_copyback); | |
143 | +EXPORT_SYMBOL(cuio_copydata); | |
144 | +EXPORT_SYMBOL(cuio_getptr); | |
145 | + | |
146 | +static void | |
147 | +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len) | |
148 | +{ | |
149 | + int i; | |
150 | + if (offset < skb_headlen(skb)) { | |
151 | + memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len)); | |
152 | + len -= skb_headlen(skb); | |
153 | + cp += skb_headlen(skb); | |
154 | + } | |
155 | + offset -= skb_headlen(skb); | |
156 | + for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) { | |
157 | + if (offset < skb_shinfo(skb)->frags[i].size) { | |
158 | + memcpy(page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) + | |
159 | + skb_shinfo(skb)->frags[i].page_offset, | |
160 | + cp, min_t(int, skb_shinfo(skb)->frags[i].size, len)); | |
161 | + len -= skb_shinfo(skb)->frags[i].size; | |
162 | + cp += skb_shinfo(skb)->frags[i].size; | |
163 | + } | |
164 | + offset -= skb_shinfo(skb)->frags[i].size; | |
165 | + } | |
166 | +} | |
167 | + | |
168 | +void | |
169 | +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in) | |
170 | +{ | |
171 | + | |
172 | + if ((flags & CRYPTO_F_SKBUF) != 0) | |
173 | + skb_copy_bits_back((struct sk_buff *)buf, off, in, size); | |
174 | + else if ((flags & CRYPTO_F_IOV) != 0) | |
175 | + cuio_copyback((struct uio *)buf, off, size, in); | |
176 | + else | |
177 | + bcopy(in, buf + off, size); | |
178 | +} | |
179 | + | |
180 | +void | |
181 | +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out) | |
182 | +{ | |
183 | + | |
184 | + if ((flags & CRYPTO_F_SKBUF) != 0) | |
185 | + skb_copy_bits((struct sk_buff *)buf, off, out, size); | |
186 | + else if ((flags & CRYPTO_F_IOV) != 0) | |
187 | + cuio_copydata((struct uio *)buf, off, size, out); | |
188 | + else | |
189 | + bcopy(buf + off, out, size); | |
190 | +} | |
191 | + | |
192 | +int | |
193 | +crypto_apply(int flags, caddr_t buf, int off, int len, | |
194 | + int (*f)(void *, void *, u_int), void *arg) | |
195 | +{ | |
196 | +#if 0 | |
197 | + int error; | |
198 | + | |
199 | + if ((flags & CRYPTO_F_SKBUF) != 0) | |
200 | + error = XXXXXX((struct mbuf *)buf, off, len, f, arg); | |
201 | + else if ((flags & CRYPTO_F_IOV) != 0) | |
202 | + error = cuio_apply((struct uio *)buf, off, len, f, arg); | |
203 | + else | |
204 | + error = (*f)(arg, buf + off, len); | |
205 | + return (error); | |
206 | +#else | |
207 | + KASSERT(0, ("crypto_apply not implemented!\n")); | |
208 | +#endif | |
209 | + return 0; | |
210 | +} | |
211 | + | |
212 | +EXPORT_SYMBOL(crypto_copyback); | |
213 | +EXPORT_SYMBOL(crypto_copydata); | |
214 | +EXPORT_SYMBOL(crypto_apply); |
crypto/ocf/crypto.c
Changes suppressed. Click to show
1 | +/*- | |
2 | + * Linux port done by David McCullough <david_mccullough@mcafee.com> | |
3 | + * Copyright (C) 2006-2010 David McCullough | |
4 | + * Copyright (C) 2004-2005 Intel Corporation. | |
5 | + * The license and original author are listed below. | |
6 | + * | |
7 | + * Redistribution and use in source and binary forms, with or without | |
8 | + * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. | |
9 | + * | |
10 | + * modification, are permitted provided that the following conditions | |
11 | + * are met: | |
12 | + * 1. Redistributions of source code must retain the above copyright | |
13 | + * notice, this list of conditions and the following disclaimer. | |
14 | + * 2. Redistributions in binary form must reproduce the above copyright | |
15 | + * notice, this list of conditions and the following disclaimer in the | |
16 | + * documentation and/or other materials provided with the distribution. | |
17 | + * | |
18 | + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
19 | + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
20 | + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
21 | + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
22 | + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
23 | + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
27 | + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | + */ | |
29 | + | |
30 | +#if 0 | |
31 | +#include <sys/cdefs.h> | |
32 | +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $"); | |
33 | +#endif | |
34 | + | |
35 | +/* | |
36 | + * Cryptographic Subsystem. | |
37 | + * | |
38 | + * This code is derived from the Openbsd Cryptographic Framework (OCF) | |
39 | + * that has the copyright shown below. Very little of the original | |
40 | + * code remains. | |
41 | + */ | |
42 | +/*- | |
43 | + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) | |
44 | + * | |
45 | + * This code was written by Angelos D. Keromytis in Athens, Greece, in | |
46 | + * February 2000. Network Security Technologies Inc. (NSTI) kindly | |
47 | + * supported the development of this code. | |
48 | + * | |
49 | + * Copyright (c) 2000, 2001 Angelos D. Keromytis | |
50 | + * | |
51 | + * Permission to use, copy, and modify this software with or without fee | |
52 | + * is hereby granted, provided that this entire notice is included in | |
53 | + * all source code copies of any software which is or includes a copy or | |
54 | + * modification of this software. | |
55 | + * | |
56 | + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR | |
57 | + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY | |
58 | + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE | |
59 | + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR | |
60 | + * PURPOSE. | |
61 | + * | |
62 | +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $"); | |
63 | + */ | |
64 | + | |
65 | + | |
66 | +#include <linux/version.h> | |
67 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
68 | +#include <linux/config.h> | |
69 | +#endif | |
70 | +#include <linux/module.h> | |
71 | +#include <linux/init.h> | |
72 | +#include <linux/list.h> | |
73 | +#include <linux/slab.h> | |
74 | +#include <linux/wait.h> | |
75 | +#include <linux/sched.h> | |
76 | +#include <linux/spinlock.h> | |
77 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,4) | |
78 | +#include <linux/kthread.h> | |
79 | +#endif | |
80 | +#include <cryptodev.h> | |
81 | + | |
82 | +/* | |
83 | + * keep track of whether or not we have been initialised, a big | |
84 | + * issue if we are linked into the kernel and a driver gets started before | |
85 | + * us | |
86 | + */ | |
87 | +static int crypto_initted = 0; | |
88 | + | |
89 | +/* | |
90 | + * Crypto drivers register themselves by allocating a slot in the | |
91 | + * crypto_drivers table with crypto_get_driverid() and then registering | |
92 | + * each algorithm they support with crypto_register() and crypto_kregister(). | |
93 | + */ | |
94 | + | |
95 | +/* | |
96 | + * lock on driver table | |
97 | + * we track its state as spin_is_locked does not do anything on non-SMP boxes | |
98 | + */ | |
99 | +static spinlock_t crypto_drivers_lock; | |
100 | +static int crypto_drivers_locked; /* for non-SMP boxes */ | |
101 | + | |
102 | +#define CRYPTO_DRIVER_LOCK() \ | |
103 | + ({ \ | |
104 | + spin_lock_irqsave(&crypto_drivers_lock, d_flags); \ | |
105 | + crypto_drivers_locked = 1; \ | |
106 | + dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \ | |
107 | + }) | |
108 | +#define CRYPTO_DRIVER_UNLOCK() \ | |
109 | + ({ \ | |
110 | + dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \ | |
111 | + crypto_drivers_locked = 0; \ | |
112 | + spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \ | |
113 | + }) | |
114 | +#define CRYPTO_DRIVER_ASSERT() \ | |
115 | + ({ \ | |
116 | + if (!crypto_drivers_locked) { \ | |
117 | + dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \ | |
118 | + } \ | |
119 | + }) | |
120 | + | |
121 | +/* | |
122 | + * Crypto device/driver capabilities structure. | |
123 | + * | |
124 | + * Synchronization: | |
125 | + * (d) - protected by CRYPTO_DRIVER_LOCK() | |
126 | + * (q) - protected by CRYPTO_Q_LOCK() | |
127 | + * Not tagged fields are read-only. | |
128 | + */ | |
129 | +struct cryptocap { | |
130 | + device_t cc_dev; /* (d) device/driver */ | |
131 | + u_int32_t cc_sessions; /* (d) # of sessions */ | |
132 | + u_int32_t cc_koperations; /* (d) # os asym operations */ | |
133 | + /* | |
134 | + * Largest possible operator length (in bits) for each type of | |
135 | + * encryption algorithm. XXX not used | |
136 | + */ | |
137 | + u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1]; | |
138 | + u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1]; | |
139 | + u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; | |
140 | + | |
141 | + int cc_flags; /* (d) flags */ | |
142 | +#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ | |
143 | + int cc_qblocked; /* (q) symmetric q blocked */ | |
144 | + int cc_kqblocked; /* (q) asymmetric q blocked */ | |
145 | + | |
146 | + int cc_unqblocked; /* (q) symmetric q blocked */ | |
147 | + int cc_unkqblocked; /* (q) asymmetric q blocked */ | |
148 | +}; | |
149 | +static struct cryptocap *crypto_drivers = NULL; | |
150 | +static int crypto_drivers_num = 0; | |
151 | + | |
152 | +/* | |
153 | + * There are two queues for crypto requests; one for symmetric (e.g. | |
154 | + * cipher) operations and one for asymmetric (e.g. MOD)operations. | |
155 | + * A single mutex is used to lock access to both queues. We could | |
156 | + * have one per-queue but having one simplifies handling of block/unblock | |
157 | + * operations. | |
158 | + */ | |
159 | +static LIST_HEAD(crp_q); /* crypto request queue */ | |
160 | +static LIST_HEAD(crp_kq); /* asym request queue */ | |
161 | + | |
162 | +static spinlock_t crypto_q_lock; | |
163 | + | |
164 | +int crypto_all_qblocked = 0; /* protect with Q_LOCK */ | |
165 | +module_param(crypto_all_qblocked, int, 0444); | |
166 | +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked"); | |
167 | + | |
168 | +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */ | |
169 | +module_param(crypto_all_kqblocked, int, 0444); | |
170 | +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked"); | |
171 | + | |
172 | +#define CRYPTO_Q_LOCK() \ | |
173 | + ({ \ | |
174 | + spin_lock_irqsave(&crypto_q_lock, q_flags); \ | |
175 | + dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \ | |
176 | + }) | |
177 | +#define CRYPTO_Q_UNLOCK() \ | |
178 | + ({ \ | |
179 | + dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \ | |
180 | + spin_unlock_irqrestore(&crypto_q_lock, q_flags); \ | |
181 | + }) | |
182 | + | |
183 | +/* | |
184 | + * There are two queues for processing completed crypto requests; one | |
185 | + * for the symmetric and one for the asymmetric ops. We only need one | |
186 | + * but have two to avoid type futzing (cryptop vs. cryptkop). A single | |
187 | + * mutex is used to lock access to both queues. Note that this lock | |
188 | + * must be separate from the lock on request queues to insure driver | |
189 | + * callbacks don't generate lock order reversals. | |
190 | + */ | |
191 | +static LIST_HEAD(crp_ret_q); /* callback queues */ | |
192 | +static LIST_HEAD(crp_ret_kq); | |
193 | + | |
194 | +static spinlock_t crypto_ret_q_lock; | |
195 | +#define CRYPTO_RETQ_LOCK() \ | |
196 | + ({ \ | |
197 | + spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \ | |
198 | + dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \ | |
199 | + }) | |
200 | +#define CRYPTO_RETQ_UNLOCK() \ | |
201 | + ({ \ | |
202 | + dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \ | |
203 | + spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \ | |
204 | + }) | |
205 | +#define CRYPTO_RETQ_EMPTY() (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq)) | |
206 | + | |
207 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) | |
208 | +static kmem_cache_t *cryptop_zone; | |
209 | +static kmem_cache_t *cryptodesc_zone; | |
210 | +#else | |
211 | +static struct kmem_cache *cryptop_zone; | |
212 | +static struct kmem_cache *cryptodesc_zone; | |
213 | +#endif | |
214 | + | |
215 | +#define debug crypto_debug | |
216 | +int crypto_debug = 0; | |
217 | +module_param(crypto_debug, int, 0644); | |
218 | +MODULE_PARM_DESC(crypto_debug, "Enable debug"); | |
219 | +EXPORT_SYMBOL(crypto_debug); | |
220 | + | |
221 | +/* | |
222 | + * Maximum number of outstanding crypto requests before we start | |
223 | + * failing requests. We need this to prevent DOS when too many | |
224 | + * requests are arriving for us to keep up. Otherwise we will | |
225 | + * run the system out of memory. Since crypto is slow, we are | |
226 | + * usually the bottleneck that needs to say, enough is enough. | |
227 | + * | |
228 | + * We cannot print errors when this condition occurs, we are already too | |
229 | + * slow, printing anything will just kill us | |
230 | + */ | |
231 | + | |
232 | +static int crypto_q_cnt = 0; | |
233 | +module_param(crypto_q_cnt, int, 0444); | |
234 | +MODULE_PARM_DESC(crypto_q_cnt, | |
235 | + "Current number of outstanding crypto requests"); | |
236 | + | |
237 | +static int crypto_q_max = 1000; | |
238 | +module_param(crypto_q_max, int, 0644); | |
239 | +MODULE_PARM_DESC(crypto_q_max, | |
240 | + "Maximum number of outstanding crypto requests"); | |
241 | + | |
242 | +#define bootverbose crypto_verbose | |
243 | +static int crypto_verbose = 0; | |
244 | +module_param(crypto_verbose, int, 0644); | |
245 | +MODULE_PARM_DESC(crypto_verbose, | |
246 | + "Enable verbose crypto startup"); | |
247 | + | |
248 | +int crypto_usercrypto = 1; /* userland may do crypto reqs */ | |
249 | +module_param(crypto_usercrypto, int, 0644); | |
250 | +MODULE_PARM_DESC(crypto_usercrypto, | |
251 | + "Enable/disable user-mode access to crypto support"); | |
252 | + | |
253 | +int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ | |
254 | +module_param(crypto_userasymcrypto, int, 0644); | |
255 | +MODULE_PARM_DESC(crypto_userasymcrypto, | |
256 | + "Enable/disable user-mode access to asymmetric crypto support"); | |
257 | + | |
258 | +int crypto_devallowsoft = 0; /* only use hardware crypto */ | |
259 | +module_param(crypto_devallowsoft, int, 0644); | |
260 | +MODULE_PARM_DESC(crypto_devallowsoft, | |
261 | + "Enable/disable use of software crypto support"); | |
262 | + | |
263 | +/* | |
264 | + * This parameter controls the maximum number of crypto operations to | |
265 | + * do consecutively in the crypto kernel thread before scheduling to allow | |
266 | + * other processes to run. Without it, it is possible to get into a | |
267 | + * situation where the crypto thread never allows any other processes to run. | |
268 | + * Default to 1000 which should be less than one second. | |
269 | + */ | |
270 | +static int crypto_max_loopcount = 1000; | |
271 | +module_param(crypto_max_loopcount, int, 0644); | |
272 | +MODULE_PARM_DESC(crypto_max_loopcount, | |
273 | + "Maximum number of crypto ops to do before yielding to other processes"); | |
274 | + | |
275 | +#ifndef CONFIG_NR_CPUS | |
276 | +#define CONFIG_NR_CPUS 1 | |
277 | +#endif | |
278 | + | |
279 | +static struct task_struct *cryptoproc[CONFIG_NR_CPUS]; | |
280 | +static struct task_struct *cryptoretproc[CONFIG_NR_CPUS]; | |
281 | +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait); | |
282 | +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait); | |
283 | + | |
284 | +static int crypto_proc(void *arg); | |
285 | +static int crypto_ret_proc(void *arg); | |
286 | +static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); | |
287 | +static int crypto_kinvoke(struct cryptkop *krp, int flags); | |
288 | +static void crypto_exit(void); | |
289 | +static int crypto_init(void); | |
290 | + | |
291 | +static struct cryptostats cryptostats; | |
292 | + | |
293 | +static struct cryptocap * | |
294 | +crypto_checkdriver(u_int32_t hid) | |
295 | +{ | |
296 | + if (crypto_drivers == NULL) | |
297 | + return NULL; | |
298 | + return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); | |
299 | +} | |
300 | + | |
301 | +/* | |
302 | + * Compare a driver's list of supported algorithms against another | |
303 | + * list; return non-zero if all algorithms are supported. | |
304 | + */ | |
305 | +static int | |
306 | +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri) | |
307 | +{ | |
308 | + const struct cryptoini *cr; | |
309 | + | |
310 | + /* See if all the algorithms are supported. */ | |
311 | + for (cr = cri; cr; cr = cr->cri_next) | |
312 | + if (cap->cc_alg[cr->cri_alg] == 0) | |
313 | + return 0; | |
314 | + return 1; | |
315 | +} | |
316 | + | |
317 | + | |
318 | +/* | |
319 | + * Select a driver for a new session that supports the specified | |
320 | + * algorithms and, optionally, is constrained according to the flags. | |
321 | + * The algorithm we use here is pretty stupid; just use the | |
322 | + * first driver that supports all the algorithms we need. If there | |
323 | + * are multiple drivers we choose the driver with the fewest active | |
324 | + * sessions. We prefer hardware-backed drivers to software ones. | |
325 | + * | |
326 | + * XXX We need more smarts here (in real life too, but that's | |
327 | + * XXX another story altogether). | |
328 | + */ | |
329 | +static struct cryptocap * | |
330 | +crypto_select_driver(const struct cryptoini *cri, int flags) | |
331 | +{ | |
332 | + struct cryptocap *cap, *best; | |
333 | + int match, hid; | |
334 | + | |
335 | + CRYPTO_DRIVER_ASSERT(); | |
336 | + | |
337 | + /* | |
338 | + * Look first for hardware crypto devices if permitted. | |
339 | + */ | |
340 | + if (flags & CRYPTOCAP_F_HARDWARE) | |
341 | + match = CRYPTOCAP_F_HARDWARE; | |
342 | + else | |
343 | + match = CRYPTOCAP_F_SOFTWARE; | |
344 | + best = NULL; | |
345 | +again: | |
346 | + for (hid = 0; hid < crypto_drivers_num; hid++) { | |
347 | + cap = &crypto_drivers[hid]; | |
348 | + /* | |
349 | + * If it's not initialized, is in the process of | |
350 | + * going away, or is not appropriate (hardware | |
351 | + * or software based on match), then skip. | |
352 | + */ | |
353 | + if (cap->cc_dev == NULL || | |
354 | + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || | |
355 | + (cap->cc_flags & match) == 0) | |
356 | + continue; | |
357 | + | |
358 | + /* verify all the algorithms are supported. */ | |
359 | + if (driver_suitable(cap, cri)) { | |
360 | + if (best == NULL || | |
361 | + cap->cc_sessions < best->cc_sessions) | |
362 | + best = cap; | |
363 | + } | |
364 | + } | |
365 | + if (best != NULL) | |
366 | + return best; | |
367 | + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { | |
368 | + /* sort of an Algol 68-style for loop */ | |
369 | + match = CRYPTOCAP_F_SOFTWARE; | |
370 | + goto again; | |
371 | + } | |
372 | + return best; | |
373 | +} | |
374 | + | |
375 | +/* | |
376 | + * Create a new session. The crid argument specifies a crypto | |
377 | + * driver to use or constraints on a driver to select (hardware | |
378 | + * only, software only, either). Whatever driver is selected | |
379 | + * must be capable of the requested crypto algorithms. | |
380 | + */ | |
381 | +int | |
382 | +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid) | |
383 | +{ | |
384 | + struct cryptocap *cap; | |
385 | + u_int32_t hid, lid; | |
386 | + int err; | |
387 | + unsigned long d_flags; | |
388 | + | |
389 | + CRYPTO_DRIVER_LOCK(); | |
390 | + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { | |
391 | + /* | |
392 | + * Use specified driver; verify it is capable. | |
393 | + */ | |
394 | + cap = crypto_checkdriver(crid); | |
395 | + if (cap != NULL && !driver_suitable(cap, cri)) | |
396 | + cap = NULL; | |
397 | + } else { | |
398 | + /* | |
399 | + * No requested driver; select based on crid flags. | |
400 | + */ | |
401 | + cap = crypto_select_driver(cri, crid); | |
402 | + /* | |
403 | + * if NULL then can't do everything in one session. | |
404 | + * XXX Fix this. We need to inject a "virtual" session | |
405 | + * XXX layer right about here. | |
406 | + */ | |
407 | + } | |
408 | + if (cap != NULL) { | |
409 | + /* Call the driver initialization routine. */ | |
410 | + hid = cap - crypto_drivers; | |
411 | + lid = hid; /* Pass the driver ID. */ | |
412 | + cap->cc_sessions++; | |
413 | + CRYPTO_DRIVER_UNLOCK(); | |
414 | + err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri); | |
415 | + CRYPTO_DRIVER_LOCK(); | |
416 | + if (err == 0) { | |
417 | + (*sid) = (cap->cc_flags & 0xff000000) | |
418 | + | (hid & 0x00ffffff); | |
419 | + (*sid) <<= 32; | |
420 | + (*sid) |= (lid & 0xffffffff); | |
421 | + } else | |
422 | + cap->cc_sessions--; | |
423 | + } else | |
424 | + err = EINVAL; | |
425 | + CRYPTO_DRIVER_UNLOCK(); | |
426 | + return err; | |
427 | +} | |
428 | + | |
429 | +static void | |
430 | +crypto_remove(struct cryptocap *cap) | |
431 | +{ | |
432 | + CRYPTO_DRIVER_ASSERT(); | |
433 | + if (cap->cc_sessions == 0 && cap->cc_koperations == 0) | |
434 | + bzero(cap, sizeof(*cap)); | |
435 | +} | |
436 | + | |
437 | +/* | |
438 | + * Delete an existing session (or a reserved session on an unregistered | |
439 | + * driver). | |
440 | + */ | |
441 | +int | |
442 | +crypto_freesession(u_int64_t sid) | |
443 | +{ | |
444 | + struct cryptocap *cap; | |
445 | + u_int32_t hid; | |
446 | + int err = 0; | |
447 | + unsigned long d_flags; | |
448 | + | |
449 | + dprintk("%s()\n", __FUNCTION__); | |
450 | + CRYPTO_DRIVER_LOCK(); | |
451 | + | |
452 | + if (crypto_drivers == NULL) { | |
453 | + err = EINVAL; | |
454 | + goto done; | |
455 | + } | |
456 | + | |
457 | + /* Determine two IDs. */ | |
458 | + hid = CRYPTO_SESID2HID(sid); | |
459 | + | |
460 | + if (hid >= crypto_drivers_num) { | |
461 | + dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid); | |
462 | + err = ENOENT; | |
463 | + goto done; | |
464 | + } | |
465 | + cap = &crypto_drivers[hid]; | |
466 | + | |
467 | + if (cap->cc_dev) { | |
468 | + CRYPTO_DRIVER_UNLOCK(); | |
469 | + /* Call the driver cleanup routine, if available, unlocked. */ | |
470 | + err = CRYPTODEV_FREESESSION(cap->cc_dev, sid); | |
471 | + CRYPTO_DRIVER_LOCK(); | |
472 | + } | |
473 | + | |
474 | + if (cap->cc_sessions) | |
475 | + cap->cc_sessions--; | |
476 | + | |
477 | + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) | |
478 | + crypto_remove(cap); | |
479 | + | |
480 | +done: | |
481 | + CRYPTO_DRIVER_UNLOCK(); | |
482 | + return err; | |
483 | +} | |
484 | + | |
485 | +/* | |
486 | + * Return an unused driver id. Used by drivers prior to registering | |
487 | + * support for the algorithms they handle. | |
488 | + */ | |
489 | +int32_t | |
490 | +crypto_get_driverid(device_t dev, int flags) | |
491 | +{ | |
492 | + struct cryptocap *newdrv; | |
493 | + int i; | |
494 | + unsigned long d_flags; | |
495 | + | |
496 | + if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { | |
497 | + printf("%s: no flags specified when registering driver\n", | |
498 | + device_get_nameunit(dev)); | |
499 | + return -1; | |
500 | + } | |
501 | + | |
502 | + CRYPTO_DRIVER_LOCK(); | |
503 | + | |
504 | + for (i = 0; i < crypto_drivers_num; i++) { | |
505 | + if (crypto_drivers[i].cc_dev == NULL && | |
506 | + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { | |
507 | + break; | |
508 | + } | |
509 | + } | |
510 | + | |
511 | + /* Out of entries, allocate some more. */ | |
512 | + if (i == crypto_drivers_num) { | |
513 | + /* Be careful about wrap-around. */ | |
514 | + if (2 * crypto_drivers_num <= crypto_drivers_num) { | |
515 | + CRYPTO_DRIVER_UNLOCK(); | |
516 | + printk("crypto: driver count wraparound!\n"); | |
517 | + return -1; | |
518 | + } | |
519 | + | |
520 | + newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap), | |
521 | + GFP_KERNEL); | |
522 | + if (newdrv == NULL) { | |
523 | + CRYPTO_DRIVER_UNLOCK(); | |
524 | + printk("crypto: no space to expand driver table!\n"); | |
525 | + return -1; | |
526 | + } | |
527 | + | |
528 | + memcpy(newdrv, crypto_drivers, | |
529 | + crypto_drivers_num * sizeof(struct cryptocap)); | |
530 | + memset(&newdrv[crypto_drivers_num], 0, | |
531 | + crypto_drivers_num * sizeof(struct cryptocap)); | |
532 | + | |
533 | + crypto_drivers_num *= 2; | |
534 | + | |
535 | + kfree(crypto_drivers); | |
536 | + crypto_drivers = newdrv; | |
537 | + } | |
538 | + | |
539 | + /* NB: state is zero'd on free */ | |
540 | + crypto_drivers[i].cc_sessions = 1; /* Mark */ | |
541 | + crypto_drivers[i].cc_dev = dev; | |
542 | + crypto_drivers[i].cc_flags = flags; | |
543 | + if (bootverbose) | |
544 | + printf("crypto: assign %s driver id %u, flags %u\n", | |
545 | + device_get_nameunit(dev), i, flags); | |
546 | + | |
547 | + CRYPTO_DRIVER_UNLOCK(); | |
548 | + | |
549 | + return i; | |
550 | +} | |
551 | + | |
552 | +/* | |
553 | + * Lookup a driver by name. We match against the full device | |
554 | + * name and unit, and against just the name. The latter gives | |
555 | + * us a simple widlcarding by device name. On success return the | |
556 | + * driver/hardware identifier; otherwise return -1. | |
557 | + */ | |
558 | +int | |
559 | +crypto_find_driver(const char *match) | |
560 | +{ | |
561 | + int i, len = strlen(match); | |
562 | + unsigned long d_flags; | |
563 | + | |
564 | + CRYPTO_DRIVER_LOCK(); | |
565 | + for (i = 0; i < crypto_drivers_num; i++) { | |
566 | + device_t dev = crypto_drivers[i].cc_dev; | |
567 | + if (dev == NULL || | |
568 | + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP)) | |
569 | + continue; | |
570 | + if (strncmp(match, device_get_nameunit(dev), len) == 0 || | |
571 | + strncmp(match, device_get_name(dev), len) == 0) | |
572 | + break; | |
573 | + } | |
574 | + CRYPTO_DRIVER_UNLOCK(); | |
575 | + return i < crypto_drivers_num ? i : -1; | |
576 | +} | |
577 | + | |
578 | +/* | |
579 | + * Return the device_t for the specified driver or NULL | |
580 | + * if the driver identifier is invalid. | |
581 | + */ | |
582 | +device_t | |
583 | +crypto_find_device_byhid(int hid) | |
584 | +{ | |
585 | + struct cryptocap *cap = crypto_checkdriver(hid); | |
586 | + return cap != NULL ? cap->cc_dev : NULL; | |
587 | +} | |
588 | + | |
589 | +/* | |
590 | + * Return the device/driver capabilities. | |
591 | + */ | |
592 | +int | |
593 | +crypto_getcaps(int hid) | |
594 | +{ | |
595 | + struct cryptocap *cap = crypto_checkdriver(hid); | |
596 | + return cap != NULL ? cap->cc_flags : 0; | |
597 | +} | |
598 | + | |
599 | +/* | |
600 | + * Register support for a key-related algorithm. This routine | |
601 | + * is called once for each algorithm supported a driver. | |
602 | + */ | |
603 | +int | |
604 | +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) | |
605 | +{ | |
606 | + struct cryptocap *cap; | |
607 | + int err; | |
608 | + unsigned long d_flags; | |
609 | + | |
610 | + dprintk("%s()\n", __FUNCTION__); | |
611 | + CRYPTO_DRIVER_LOCK(); | |
612 | + | |
613 | + cap = crypto_checkdriver(driverid); | |
614 | + if (cap != NULL && | |
615 | + (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { | |
616 | + /* | |
617 | + * XXX Do some performance testing to determine placing. | |
618 | + * XXX We probably need an auxiliary data structure that | |
619 | + * XXX describes relative performances. | |
620 | + */ | |
621 | + | |
622 | + cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; | |
623 | + if (bootverbose) | |
624 | + printf("crypto: %s registers key alg %u flags %u\n" | |
625 | + , device_get_nameunit(cap->cc_dev) | |
626 | + , kalg | |
627 | + , flags | |
628 | + ); | |
629 | + err = 0; | |
630 | + } else | |
631 | + err = EINVAL; | |
632 | + | |
633 | + CRYPTO_DRIVER_UNLOCK(); | |
634 | + return err; | |
635 | +} | |
636 | + | |
637 | +/* | |
638 | + * Register support for a non-key-related algorithm. This routine | |
639 | + * is called once for each such algorithm supported by a driver. | |
640 | + */ | |
641 | +int | |
642 | +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, | |
643 | + u_int32_t flags) | |
644 | +{ | |
645 | + struct cryptocap *cap; | |
646 | + int err; | |
647 | + unsigned long d_flags; | |
648 | + | |
649 | + dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__, | |
650 | + driverid, alg, maxoplen, flags); | |
651 | + | |
652 | + CRYPTO_DRIVER_LOCK(); | |
653 | + | |
654 | + cap = crypto_checkdriver(driverid); | |
655 | + /* NB: algorithms are in the range [1..max] */ | |
656 | + if (cap != NULL && | |
657 | + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { | |
658 | + /* | |
659 | + * XXX Do some performance testing to determine placing. | |
660 | + * XXX We probably need an auxiliary data structure that | |
661 | + * XXX describes relative performances. | |
662 | + */ | |
663 | + | |
664 | + cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; | |
665 | + cap->cc_max_op_len[alg] = maxoplen; | |
666 | + if (bootverbose) | |
667 | + printf("crypto: %s registers alg %u flags %u maxoplen %u\n" | |
668 | + , device_get_nameunit(cap->cc_dev) | |
669 | + , alg | |
670 | + , flags | |
671 | + , maxoplen | |
672 | + ); | |
673 | + cap->cc_sessions = 0; /* Unmark */ | |
674 | + err = 0; | |
675 | + } else | |
676 | + err = EINVAL; | |
677 | + | |
678 | + CRYPTO_DRIVER_UNLOCK(); | |
679 | + return err; | |
680 | +} | |
681 | + | |
682 | +static void | |
683 | +driver_finis(struct cryptocap *cap) | |
684 | +{ | |
685 | + u_int32_t ses, kops; | |
686 | + | |
687 | + CRYPTO_DRIVER_ASSERT(); | |
688 | + | |
689 | + ses = cap->cc_sessions; | |
690 | + kops = cap->cc_koperations; | |
691 | + bzero(cap, sizeof(*cap)); | |
692 | + if (ses != 0 || kops != 0) { | |
693 | + /* | |
694 | + * If there are pending sessions, | |
695 | + * just mark as invalid. | |
696 | + */ | |
697 | + cap->cc_flags |= CRYPTOCAP_F_CLEANUP; | |
698 | + cap->cc_sessions = ses; | |
699 | + cap->cc_koperations = kops; | |
700 | + } | |
701 | +} | |
702 | + | |
703 | +/* | |
704 | + * Unregister a crypto driver. If there are pending sessions using it, | |
705 | + * leave enough information around so that subsequent calls using those | |
706 | + * sessions will correctly detect the driver has been unregistered and | |
707 | + * reroute requests. | |
708 | + */ | |
709 | +int | |
710 | +crypto_unregister(u_int32_t driverid, int alg) | |
711 | +{ | |
712 | + struct cryptocap *cap; | |
713 | + int i, err; | |
714 | + unsigned long d_flags; | |
715 | + | |
716 | + dprintk("%s()\n", __FUNCTION__); | |
717 | + CRYPTO_DRIVER_LOCK(); | |
718 | + | |
719 | + cap = crypto_checkdriver(driverid); | |
720 | + if (cap != NULL && | |
721 | + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && | |
722 | + cap->cc_alg[alg] != 0) { | |
723 | + cap->cc_alg[alg] = 0; | |
724 | + cap->cc_max_op_len[alg] = 0; | |
725 | + | |
726 | + /* Was this the last algorithm ? */ | |
727 | + for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) | |
728 | + if (cap->cc_alg[i] != 0) | |
729 | + break; | |
730 | + | |
731 | + if (i == CRYPTO_ALGORITHM_MAX + 1) | |
732 | + driver_finis(cap); | |
733 | + err = 0; | |
734 | + } else | |
735 | + err = EINVAL; | |
736 | + CRYPTO_DRIVER_UNLOCK(); | |
737 | + return err; | |
738 | +} | |
739 | + | |
740 | +/* | |
741 | + * Unregister all algorithms associated with a crypto driver. | |
742 | + * If there are pending sessions using it, leave enough information | |
743 | + * around so that subsequent calls using those sessions will | |
744 | + * correctly detect the driver has been unregistered and reroute | |
745 | + * requests. | |
746 | + */ | |
747 | +int | |
748 | +crypto_unregister_all(u_int32_t driverid) | |
749 | +{ | |
750 | + struct cryptocap *cap; | |
751 | + int err; | |
752 | + unsigned long d_flags; | |
753 | + | |
754 | + dprintk("%s()\n", __FUNCTION__); | |
755 | + CRYPTO_DRIVER_LOCK(); | |
756 | + cap = crypto_checkdriver(driverid); | |
757 | + if (cap != NULL) { | |
758 | + driver_finis(cap); | |
759 | + err = 0; | |
760 | + } else | |
761 | + err = EINVAL; | |
762 | + CRYPTO_DRIVER_UNLOCK(); | |
763 | + | |
764 | + return err; | |
765 | +} | |
766 | + | |
767 | +/* | |
768 | + * Clear blockage on a driver. The what parameter indicates whether | |
769 | + * the driver is now ready for cryptop's and/or cryptokop's. | |
770 | + */ | |
771 | +int | |
772 | +crypto_unblock(u_int32_t driverid, int what) | |
773 | +{ | |
774 | + struct cryptocap *cap; | |
775 | + int err; | |
776 | + unsigned long q_flags; | |
777 | + | |
778 | + CRYPTO_Q_LOCK(); | |
779 | + cap = crypto_checkdriver(driverid); | |
780 | + if (cap != NULL) { | |
781 | + if (what & CRYPTO_SYMQ) { | |
782 | + cap->cc_qblocked = 0; | |
783 | + cap->cc_unqblocked = 0; | |
784 | + crypto_all_qblocked = 0; | |
785 | + } | |
786 | + if (what & CRYPTO_ASYMQ) { | |
787 | + cap->cc_kqblocked = 0; | |
788 | + cap->cc_unkqblocked = 0; | |
789 | + crypto_all_kqblocked = 0; | |
790 | + } | |
791 | + wake_up_interruptible(&cryptoproc_wait); | |
792 | + err = 0; | |
793 | + } else | |
794 | + err = EINVAL; | |
795 | + CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock | |
796 | + | |
797 | + return err; | |
798 | +} | |
799 | + | |
800 | +/* | |
801 | + * Add a crypto request to a queue, to be processed by the kernel thread. | |
802 | + */ | |
803 | +int | |
804 | +crypto_dispatch(struct cryptop *crp) | |
805 | +{ | |
806 | + struct cryptocap *cap; | |
807 | + int result = -1; | |
808 | + unsigned long q_flags; | |
809 | + | |
810 | + dprintk("%s()\n", __FUNCTION__); | |
811 | + | |
812 | + cryptostats.cs_ops++; | |
813 | + | |
814 | + CRYPTO_Q_LOCK(); | |
815 | + if (crypto_q_cnt >= crypto_q_max) { | |
816 | + cryptostats.cs_drops++; | |
817 | + CRYPTO_Q_UNLOCK(); | |
818 | + return ENOMEM; | |
819 | + } | |
820 | + crypto_q_cnt++; | |
821 | + | |
822 | + /* make sure we are starting a fresh run on this crp. */ | |
823 | + crp->crp_flags &= ~CRYPTO_F_DONE; | |
824 | + crp->crp_etype = 0; | |
825 | + | |
826 | + /* | |
827 | + * Caller marked the request to be processed immediately; dispatch | |
828 | + * it directly to the driver unless the driver is currently blocked. | |
829 | + */ | |
830 | + if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { | |
831 | + int hid = CRYPTO_SESID2HID(crp->crp_sid); | |
832 | + cap = crypto_checkdriver(hid); | |
833 | + /* Driver cannot disappear when there is an active session. */ | |
834 | + KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__)); | |
835 | + if (!cap->cc_qblocked) { | |
836 | + crypto_all_qblocked = 0; | |
837 | + crypto_drivers[hid].cc_unqblocked = 1; | |
838 | + CRYPTO_Q_UNLOCK(); | |
839 | + result = crypto_invoke(cap, crp, 0); | |
840 | + CRYPTO_Q_LOCK(); | |
841 | + if (result == ERESTART) | |
842 | + if (crypto_drivers[hid].cc_unqblocked) | |
843 | + crypto_drivers[hid].cc_qblocked = 1; | |
844 | + crypto_drivers[hid].cc_unqblocked = 0; | |
845 | + } | |
846 | + } | |
847 | + if (result == ERESTART) { | |
848 | + /* | |
849 | + * The driver ran out of resources, mark the | |
850 | + * driver ``blocked'' for cryptop's and put | |
851 | + * the request back in the queue. It would | |
852 | + * best to put the request back where we got | |
853 | + * it but that's hard so for now we put it | |
854 | + * at the front. This should be ok; putting | |
855 | + * it at the end does not work. | |
856 | + */ | |
857 | + list_add(&crp->crp_next, &crp_q); | |
858 | + cryptostats.cs_blocks++; | |
859 | + result = 0; | |
860 | + } else if (result == -1) { | |
861 | + TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); | |
862 | + result = 0; | |
863 | + } | |
864 | + wake_up_interruptible(&cryptoproc_wait); | |
865 | + CRYPTO_Q_UNLOCK(); | |
866 | + return result; | |
867 | +} | |
868 | + | |
869 | +/* | |
870 | + * Add an asymetric crypto request to a queue, | |
871 | + * to be processed by the kernel thread. | |
872 | + */ | |
873 | +int | |
874 | +crypto_kdispatch(struct cryptkop *krp) | |
875 | +{ | |
876 | + int error; | |
877 | + unsigned long q_flags; | |
878 | + | |
879 | + cryptostats.cs_kops++; | |
880 | + | |
881 | + error = crypto_kinvoke(krp, krp->krp_crid); | |
882 | + if (error == ERESTART) { | |
883 | + CRYPTO_Q_LOCK(); | |
884 | + TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); | |
885 | + wake_up_interruptible(&cryptoproc_wait); | |
886 | + CRYPTO_Q_UNLOCK(); | |
887 | + error = 0; | |
888 | + } | |
889 | + return error; | |
890 | +} | |
891 | + | |
892 | +/* | |
893 | + * Verify a driver is suitable for the specified operation. | |
894 | + */ | |
895 | +static __inline int | |
896 | +kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) | |
897 | +{ | |
898 | + return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; | |
899 | +} | |
900 | + | |
901 | +/* | |
902 | + * Select a driver for an asym operation. The driver must | |
903 | + * support the necessary algorithm. The caller can constrain | |
904 | + * which device is selected with the flags parameter. The | |
905 | + * algorithm we use here is pretty stupid; just use the first | |
906 | + * driver that supports the algorithms we need. If there are | |
907 | + * multiple suitable drivers we choose the driver with the | |
908 | + * fewest active operations. We prefer hardware-backed | |
909 | + * drivers to software ones when either may be used. | |
910 | + */ | |
911 | +static struct cryptocap * | |
912 | +crypto_select_kdriver(const struct cryptkop *krp, int flags) | |
913 | +{ | |
914 | + struct cryptocap *cap, *best, *blocked; | |
915 | + int match, hid; | |
916 | + | |
917 | + CRYPTO_DRIVER_ASSERT(); | |
918 | + | |
919 | + /* | |
920 | + * Look first for hardware crypto devices if permitted. | |
921 | + */ | |
922 | + if (flags & CRYPTOCAP_F_HARDWARE) | |
923 | + match = CRYPTOCAP_F_HARDWARE; | |
924 | + else | |
925 | + match = CRYPTOCAP_F_SOFTWARE; | |
926 | + best = NULL; | |
927 | + blocked = NULL; | |
928 | +again: | |
929 | + for (hid = 0; hid < crypto_drivers_num; hid++) { | |
930 | + cap = &crypto_drivers[hid]; | |
931 | + /* | |
932 | + * If it's not initialized, is in the process of | |
933 | + * going away, or is not appropriate (hardware | |
934 | + * or software based on match), then skip. | |
935 | + */ | |
936 | + if (cap->cc_dev == NULL || | |
937 | + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || | |
938 | + (cap->cc_flags & match) == 0) | |
939 | + continue; | |
940 | + | |
941 | + /* verify all the algorithms are supported. */ | |
942 | + if (kdriver_suitable(cap, krp)) { | |
943 | + if (best == NULL || | |
944 | + cap->cc_koperations < best->cc_koperations) | |
945 | + best = cap; | |
946 | + } | |
947 | + } | |
948 | + if (best != NULL) | |
949 | + return best; | |
950 | + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { | |
951 | + /* sort of an Algol 68-style for loop */ | |
952 | + match = CRYPTOCAP_F_SOFTWARE; | |
953 | + goto again; | |
954 | + } | |
955 | + return best; | |
956 | +} | |
957 | + | |
958 | +/* | |
959 | + * Dispatch an assymetric crypto request. | |
960 | + */ | |
961 | +static int | |
962 | +crypto_kinvoke(struct cryptkop *krp, int crid) | |
963 | +{ | |
964 | + struct cryptocap *cap = NULL; | |
965 | + int error; | |
966 | + unsigned long d_flags; | |
967 | + | |
968 | + KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); | |
969 | + KASSERT(krp->krp_callback != NULL, | |
970 | + ("%s: krp->crp_callback == NULL", __func__)); | |
971 | + | |
972 | + CRYPTO_DRIVER_LOCK(); | |
973 | + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { | |
974 | + cap = crypto_checkdriver(crid); | |
975 | + if (cap != NULL) { | |
976 | + /* | |
977 | + * Driver present, it must support the necessary | |
978 | + * algorithm and, if s/w drivers are excluded, | |
979 | + * it must be registered as hardware-backed. | |
980 | + */ | |
981 | + if (!kdriver_suitable(cap, krp) || | |
982 | + (!crypto_devallowsoft && | |
983 | + (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) | |
984 | + cap = NULL; | |
985 | + } | |
986 | + } else { | |
987 | + /* | |
988 | + * No requested driver; select based on crid flags. | |
989 | + */ | |
990 | + if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ | |
991 | + crid &= ~CRYPTOCAP_F_SOFTWARE; | |
992 | + cap = crypto_select_kdriver(krp, crid); | |
993 | + } | |
994 | + if (cap != NULL && !cap->cc_kqblocked) { | |
995 | + krp->krp_hid = cap - crypto_drivers; | |
996 | + cap->cc_koperations++; | |
997 | + CRYPTO_DRIVER_UNLOCK(); | |
998 | + error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); | |
999 | + CRYPTO_DRIVER_LOCK(); | |
1000 | + if (error == ERESTART) { | |
1001 | + cap->cc_koperations--; | |
1002 | + CRYPTO_DRIVER_UNLOCK(); | |
1003 | + return (error); | |
1004 | + } | |
1005 | + /* return the actual device used */ | |
1006 | + krp->krp_crid = krp->krp_hid; | |
1007 | + } else { | |
1008 | + /* | |
1009 | + * NB: cap is !NULL if device is blocked; in | |
1010 | + * that case return ERESTART so the operation | |
1011 | + * is resubmitted if possible. | |
1012 | + */ | |
1013 | + error = (cap == NULL) ? ENODEV : ERESTART; | |
1014 | + } | |
1015 | + CRYPTO_DRIVER_UNLOCK(); | |
1016 | + | |
1017 | + if (error) { | |
1018 | + krp->krp_status = error; | |
1019 | + crypto_kdone(krp); | |
1020 | + } | |
1021 | + return 0; | |
1022 | +} | |
1023 | + | |
1024 | + | |
1025 | +/* | |
1026 | + * Dispatch a crypto request to the appropriate crypto devices. | |
1027 | + */ | |
1028 | +static int | |
1029 | +crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) | |
1030 | +{ | |
1031 | + KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); | |
1032 | + KASSERT(crp->crp_callback != NULL, | |
1033 | + ("%s: crp->crp_callback == NULL", __func__)); | |
1034 | + KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__)); | |
1035 | + | |
1036 | + dprintk("%s()\n", __FUNCTION__); | |
1037 | + | |
1038 | +#ifdef CRYPTO_TIMING | |
1039 | + if (crypto_timing) | |
1040 | + crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); | |
1041 | +#endif | |
1042 | + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { | |
1043 | + struct cryptodesc *crd; | |
1044 | + u_int64_t nid; | |
1045 | + | |
1046 | + /* | |
1047 | + * Driver has unregistered; migrate the session and return | |
1048 | + * an error to the caller so they'll resubmit the op. | |
1049 | + * | |
1050 | + * XXX: What if there are more already queued requests for this | |
1051 | + * session? | |
1052 | + */ | |
1053 | + crypto_freesession(crp->crp_sid); | |
1054 | + | |
1055 | + for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) | |
1056 | + crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); | |
1057 | + | |
1058 | + /* XXX propagate flags from initial session? */ | |
1059 | + if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), | |
1060 | + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) | |
1061 | + crp->crp_sid = nid; | |
1062 | + | |
1063 | + crp->crp_etype = EAGAIN; | |
1064 | + crypto_done(crp); | |
1065 | + return 0; | |
1066 | + } else { | |
1067 | + /* | |
1068 | + * Invoke the driver to process the request. | |
1069 | + */ | |
1070 | + return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); | |
1071 | + } | |
1072 | +} | |
1073 | + | |
1074 | +/* | |
1075 | + * Release a set of crypto descriptors. | |
1076 | + */ | |
1077 | +void | |
1078 | +crypto_freereq(struct cryptop *crp) | |
1079 | +{ | |
1080 | + struct cryptodesc *crd; | |
1081 | + | |
1082 | + if (crp == NULL) | |
1083 | + return; | |
1084 | + | |
1085 | +#ifdef DIAGNOSTIC | |
1086 | + { | |
1087 | + struct cryptop *crp2; | |
1088 | + unsigned long q_flags; | |
1089 | + | |
1090 | + CRYPTO_Q_LOCK(); | |
1091 | + TAILQ_FOREACH(crp2, &crp_q, crp_next) { | |
1092 | + KASSERT(crp2 != crp, | |
1093 | + ("Freeing cryptop from the crypto queue (%p).", | |
1094 | + crp)); | |
1095 | + } | |
1096 | + CRYPTO_Q_UNLOCK(); | |
1097 | + CRYPTO_RETQ_LOCK(); | |
1098 | + TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) { | |
1099 | + KASSERT(crp2 != crp, | |
1100 | + ("Freeing cryptop from the return queue (%p).", | |
1101 | + crp)); | |
1102 | + } | |
1103 | + CRYPTO_RETQ_UNLOCK(); | |
1104 | + } | |
1105 | +#endif | |
1106 | + | |
1107 | + while ((crd = crp->crp_desc) != NULL) { | |
1108 | + crp->crp_desc = crd->crd_next; | |
1109 | + kmem_cache_free(cryptodesc_zone, crd); | |
1110 | + } | |
1111 | + kmem_cache_free(cryptop_zone, crp); | |
1112 | +} | |
1113 | + | |
1114 | +/* | |
1115 | + * Acquire a set of crypto descriptors. | |
1116 | + */ | |
1117 | +struct cryptop * | |
1118 | +crypto_getreq(int num) | |
1119 | +{ | |
1120 | + struct cryptodesc *crd; | |
1121 | + struct cryptop *crp; | |
1122 | + | |
1123 | + crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC); | |
1124 | + if (crp != NULL) { | |
1125 | + memset(crp, 0, sizeof(*crp)); | |
1126 | + INIT_LIST_HEAD(&crp->crp_next); | |
1127 | + init_waitqueue_head(&crp->crp_waitq); | |
1128 | + while (num--) { | |
1129 | + crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC); | |
1130 | + if (crd == NULL) { | |
1131 | + crypto_freereq(crp); | |
1132 | + return NULL; | |
1133 | + } | |
1134 | + memset(crd, 0, sizeof(*crd)); | |
1135 | + crd->crd_next = crp->crp_desc; | |
1136 | + crp->crp_desc = crd; | |
1137 | + } | |
1138 | + } | |
1139 | + return crp; | |
1140 | +} | |
1141 | + | |
1142 | +/* | |
1143 | + * Invoke the callback on behalf of the driver. | |
1144 | + */ | |
1145 | +void | |
1146 | +crypto_done(struct cryptop *crp) | |
1147 | +{ | |
1148 | + unsigned long q_flags; | |
1149 | + | |
1150 | + dprintk("%s()\n", __FUNCTION__); | |
1151 | + if ((crp->crp_flags & CRYPTO_F_DONE) == 0) { | |
1152 | + crp->crp_flags |= CRYPTO_F_DONE; | |
1153 | + CRYPTO_Q_LOCK(); | |
1154 | + crypto_q_cnt--; | |
1155 | + CRYPTO_Q_UNLOCK(); | |
1156 | + } else | |
1157 | + printk("crypto: crypto_done op already done, flags 0x%x", | |
1158 | + crp->crp_flags); | |
1159 | + if (crp->crp_etype != 0) | |
1160 | + cryptostats.cs_errs++; | |
1161 | + /* | |
1162 | + * CBIMM means unconditionally do the callback immediately; | |
1163 | + * CBIFSYNC means do the callback immediately only if the | |
1164 | + * operation was done synchronously. Both are used to avoid | |
1165 | + * doing extraneous context switches; the latter is mostly | |
1166 | + * used with the software crypto driver. | |
1167 | + */ | |
1168 | + if ((crp->crp_flags & CRYPTO_F_CBIMM) || | |
1169 | + ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && | |
1170 | + (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) { | |
1171 | + /* | |
1172 | + * Do the callback directly. This is ok when the | |
1173 | + * callback routine does very little (e.g. the | |
1174 | + * /dev/crypto callback method just does a wakeup). | |
1175 | + */ | |
1176 | + crp->crp_callback(crp); | |
1177 | + } else { | |
1178 | + unsigned long r_flags; | |
1179 | + /* | |
1180 | + * Normal case; queue the callback for the thread. | |
1181 | + */ | |
1182 | + CRYPTO_RETQ_LOCK(); | |
1183 | + wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */ | |
1184 | + TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next); | |
1185 | + CRYPTO_RETQ_UNLOCK(); | |
1186 | + } | |
1187 | +} | |
1188 | + | |
1189 | +/* | |
1190 | + * Invoke the callback on behalf of the driver. | |
1191 | + */ | |
1192 | +void | |
1193 | +crypto_kdone(struct cryptkop *krp) | |
1194 | +{ | |
1195 | + struct cryptocap *cap; | |
1196 | + unsigned long d_flags; | |
1197 | + | |
1198 | + if ((krp->krp_flags & CRYPTO_KF_DONE) != 0) | |
1199 | + printk("crypto: crypto_kdone op already done, flags 0x%x", | |
1200 | + krp->krp_flags); | |
1201 | + krp->krp_flags |= CRYPTO_KF_DONE; | |
1202 | + if (krp->krp_status != 0) | |
1203 | + cryptostats.cs_kerrs++; | |
1204 | + | |
1205 | + CRYPTO_DRIVER_LOCK(); | |
1206 | + /* XXX: What if driver is loaded in the meantime? */ | |
1207 | + if (krp->krp_hid < crypto_drivers_num) { | |
1208 | + cap = &crypto_drivers[krp->krp_hid]; | |
1209 | + cap->cc_koperations--; | |
1210 | + KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0")); | |
1211 | + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) | |
1212 | + crypto_remove(cap); | |
1213 | + } | |
1214 | + CRYPTO_DRIVER_UNLOCK(); | |
1215 | + | |
1216 | + /* | |
1217 | + * CBIMM means unconditionally do the callback immediately; | |
1218 | + * This is used to avoid doing extraneous context switches | |
1219 | + */ | |
1220 | + if ((krp->krp_flags & CRYPTO_KF_CBIMM)) { | |
1221 | + /* | |
1222 | + * Do the callback directly. This is ok when the | |
1223 | + * callback routine does very little (e.g. the | |
1224 | + * /dev/crypto callback method just does a wakeup). | |
1225 | + */ | |
1226 | + krp->krp_callback(krp); | |
1227 | + } else { | |
1228 | + unsigned long r_flags; | |
1229 | + /* | |
1230 | + * Normal case; queue the callback for the thread. | |
1231 | + */ | |
1232 | + CRYPTO_RETQ_LOCK(); | |
1233 | + wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */ | |
1234 | + TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next); | |
1235 | + CRYPTO_RETQ_UNLOCK(); | |
1236 | + } | |
1237 | +} | |
1238 | + | |
1239 | +int | |
1240 | +crypto_getfeat(int *featp) | |
1241 | +{ | |
1242 | + int hid, kalg, feat = 0; | |
1243 | + unsigned long d_flags; | |
1244 | + | |
1245 | + CRYPTO_DRIVER_LOCK(); | |
1246 | + for (hid = 0; hid < crypto_drivers_num; hid++) { | |
1247 | + const struct cryptocap *cap = &crypto_drivers[hid]; | |
1248 | + | |
1249 | + if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && | |
1250 | + !crypto_devallowsoft) { | |
1251 | + continue; | |
1252 | + } | |
1253 | + for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) | |
1254 | + if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) | |
1255 | + feat |= 1 << kalg; | |
1256 | + } | |
1257 | + CRYPTO_DRIVER_UNLOCK(); | |
1258 | + *featp = feat; | |
1259 | + return (0); | |
1260 | +} | |
1261 | + | |
1262 | +/* | |
1263 | + * Crypto thread, dispatches crypto requests. | |
1264 | + */ | |
1265 | +static int | |
1266 | +crypto_proc(void *arg) | |
1267 | +{ | |
1268 | + struct cryptop *crp, *submit; | |
1269 | + struct cryptkop *krp, *krpp; | |
1270 | + struct cryptocap *cap; | |
1271 | + u_int32_t hid; | |
1272 | + int result, hint; | |
1273 | + unsigned long q_flags; | |
1274 | + int loopcount = 0; | |
1275 | + | |
1276 | + set_current_state(TASK_INTERRUPTIBLE); | |
1277 | + | |
1278 | + CRYPTO_Q_LOCK(); | |
1279 | + for (;;) { | |
1280 | + /* | |
1281 | + * we need to make sure we don't get into a busy loop with nothing | |
1282 | + * to do, the two crypto_all_*blocked vars help us find out when | |
1283 | + * we are all full and can do nothing on any driver or Q. If so we | |
1284 | + * wait for an unblock. | |
1285 | + */ | |
1286 | + crypto_all_qblocked = !list_empty(&crp_q); | |
1287 | + | |
1288 | + /* | |
1289 | + * Find the first element in the queue that can be | |
1290 | + * processed and look-ahead to see if multiple ops | |
1291 | + * are ready for the same driver. | |
1292 | + */ | |
1293 | + submit = NULL; | |
1294 | + hint = 0; | |
1295 | + list_for_each_entry(crp, &crp_q, crp_next) { | |
1296 | + hid = CRYPTO_SESID2HID(crp->crp_sid); | |
1297 | + cap = crypto_checkdriver(hid); | |
1298 | + /* | |
1299 | + * Driver cannot disappear when there is an active | |
1300 | + * session. | |
1301 | + */ | |
1302 | + KASSERT(cap != NULL, ("%s:%u Driver disappeared.", | |
1303 | + __func__, __LINE__)); | |
1304 | + if (cap == NULL || cap->cc_dev == NULL) { | |
1305 | + /* Op needs to be migrated, process it. */ | |
1306 | + if (submit == NULL) | |
1307 | + submit = crp; | |
1308 | + break; | |
1309 | + } | |
1310 | + if (!cap->cc_qblocked) { | |
1311 | + if (submit != NULL) { | |
1312 | + /* | |
1313 | + * We stop on finding another op, | |
1314 | + * regardless whether its for the same | |
1315 | + * driver or not. We could keep | |
1316 | + * searching the queue but it might be | |
1317 | + * better to just use a per-driver | |
1318 | + * queue instead. | |
1319 | + */ | |
1320 | + if (CRYPTO_SESID2HID(submit->crp_sid) == hid) | |
1321 | + hint = CRYPTO_HINT_MORE; | |
1322 | + break; | |
1323 | + } else { | |
1324 | + submit = crp; | |
1325 | + if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) | |
1326 | + break; | |
1327 | + /* keep scanning for more are q'd */ | |
1328 | + } | |
1329 | + } | |
1330 | + } | |
1331 | + if (submit != NULL) { | |
1332 | + hid = CRYPTO_SESID2HID(submit->crp_sid); | |
1333 | + crypto_all_qblocked = 0; | |
1334 | + list_del(&submit->crp_next); | |
1335 | + crypto_drivers[hid].cc_unqblocked = 1; | |
1336 | + cap = crypto_checkdriver(hid); | |
1337 | + CRYPTO_Q_UNLOCK(); | |
1338 | + KASSERT(cap != NULL, ("%s:%u Driver disappeared.", | |
1339 | + __func__, __LINE__)); | |
1340 | + result = crypto_invoke(cap, submit, hint); | |
1341 | + CRYPTO_Q_LOCK(); | |
1342 | + if (result == ERESTART) { | |
1343 | + /* | |
1344 | + * The driver ran out of resources, mark the | |
1345 | + * driver ``blocked'' for cryptop's and put | |
1346 | + * the request back in the queue. It would | |
1347 | + * best to put the request back where we got | |
1348 | + * it but that's hard so for now we put it | |
1349 | + * at the front. This should be ok; putting | |
1350 | + * it at the end does not work. | |
1351 | + */ | |
1352 | + /* XXX validate sid again? */ | |
1353 | + list_add(&submit->crp_next, &crp_q); | |
1354 | + cryptostats.cs_blocks++; | |
1355 | + if (crypto_drivers[hid].cc_unqblocked) | |
1356 | + crypto_drivers[hid].cc_qblocked=0; | |
1357 | + crypto_drivers[hid].cc_unqblocked=0; | |
1358 | + } | |
1359 | + crypto_drivers[hid].cc_unqblocked = 0; | |
1360 | + } | |
1361 | + | |
1362 | + crypto_all_kqblocked = !list_empty(&crp_kq); | |
1363 | + | |
1364 | + /* As above, but for key ops */ | |
1365 | + krp = NULL; | |
1366 | + list_for_each_entry(krpp, &crp_kq, krp_next) { | |
1367 | + cap = crypto_checkdriver(krpp->krp_hid); | |
1368 | + if (cap == NULL || cap->cc_dev == NULL) { | |
1369 | + /* | |
1370 | + * Operation needs to be migrated, invalidate | |
1371 | + * the assigned device so it will reselect a | |
1372 | + * new one below. Propagate the original | |
1373 | + * crid selection flags if supplied. | |
1374 | + */ | |
1375 | + krp->krp_hid = krp->krp_crid & | |
1376 | + (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE); | |
1377 | + if (krp->krp_hid == 0) | |
1378 | + krp->krp_hid = | |
1379 | + CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE; | |
1380 | + break; | |
1381 | + } | |
1382 | + if (!cap->cc_kqblocked) { | |
1383 | + krp = krpp; | |
1384 | + break; | |
1385 | + } | |
1386 | + } | |
1387 | + if (krp != NULL) { | |
1388 | + crypto_all_kqblocked = 0; | |
1389 | + list_del(&krp->krp_next); | |
1390 | + crypto_drivers[krp->krp_hid].cc_kqblocked = 1; | |
1391 | + CRYPTO_Q_UNLOCK(); | |
1392 | + result = crypto_kinvoke(krp, krp->krp_hid); | |
1393 | + CRYPTO_Q_LOCK(); | |
1394 | + if (result == ERESTART) { | |
1395 | + /* | |
1396 | + * The driver ran out of resources, mark the | |
1397 | + * driver ``blocked'' for cryptkop's and put | |
1398 | + * the request back in the queue. It would | |
1399 | + * best to put the request back where we got | |
1400 | + * it but that's hard so for now we put it | |
1401 | + * at the front. This should be ok; putting | |
1402 | + * it at the end does not work. | |
1403 | + */ | |
1404 | + /* XXX validate sid again? */ | |
1405 | + list_add(&krp->krp_next, &crp_kq); | |
1406 | + cryptostats.cs_kblocks++; | |
1407 | + } else | |
1408 | + crypto_drivers[krp->krp_hid].cc_kqblocked = 0; | |
1409 | + } | |
1410 | + | |
1411 | + if (submit == NULL && krp == NULL) { | |
1412 | + /* | |
1413 | + * Nothing more to be processed. Sleep until we're | |
1414 | + * woken because there are more ops to process. | |
1415 | + * This happens either by submission or by a driver | |
1416 | + * becoming unblocked and notifying us through | |
1417 | + * crypto_unblock. Note that when we wakeup we | |
1418 | + * start processing each queue again from the | |
1419 | + * front. It's not clear that it's important to | |
1420 | + * preserve this ordering since ops may finish | |
1421 | + * out of order if dispatched to different devices | |
1422 | + * and some become blocked while others do not. | |
1423 | + */ | |
1424 | + dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n", | |
1425 | + __FUNCTION__, | |
1426 | + list_empty(&crp_q), crypto_all_qblocked, | |
1427 | + list_empty(&crp_kq), crypto_all_kqblocked); | |
1428 | + loopcount = 0; | |
1429 | + CRYPTO_Q_UNLOCK(); | |
1430 | + wait_event_interruptible(cryptoproc_wait, | |
1431 | + !(list_empty(&crp_q) || crypto_all_qblocked) || | |
1432 | + !(list_empty(&crp_kq) || crypto_all_kqblocked) || | |
1433 | + kthread_should_stop()); | |
1434 | + if (signal_pending (current)) { | |
1435 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
1436 | + spin_lock_irq(¤t->sigmask_lock); | |
1437 | +#endif | |
1438 | + flush_signals(current); | |
1439 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
1440 | + spin_unlock_irq(¤t->sigmask_lock); | |
1441 | +#endif | |
1442 | + } | |
1443 | + CRYPTO_Q_LOCK(); | |
1444 | + dprintk("%s - awake\n", __FUNCTION__); | |
1445 | + if (kthread_should_stop()) | |
1446 | + break; | |
1447 | + cryptostats.cs_intrs++; | |
1448 | + } else if (loopcount > crypto_max_loopcount) { | |
1449 | + /* | |
1450 | + * Give other processes a chance to run if we've | |
1451 | + * been using the CPU exclusively for a while. | |
1452 | + */ | |
1453 | + loopcount = 0; | |
1454 | + CRYPTO_Q_UNLOCK(); | |
1455 | + schedule(); | |
1456 | + CRYPTO_Q_LOCK(); | |
1457 | + } | |
1458 | + loopcount++; | |
1459 | + } | |
1460 | + CRYPTO_Q_UNLOCK(); | |
1461 | + return 0; | |
1462 | +} | |
1463 | + | |
1464 | +/* | |
1465 | + * Crypto returns thread, does callbacks for processed crypto requests. | |
1466 | + * Callbacks are done here, rather than in the crypto drivers, because | |
1467 | + * callbacks typically are expensive and would slow interrupt handling. | |
1468 | + */ | |
1469 | +static int | |
1470 | +crypto_ret_proc(void *arg) | |
1471 | +{ | |
1472 | + struct cryptop *crpt; | |
1473 | + struct cryptkop *krpt; | |
1474 | + unsigned long r_flags; | |
1475 | + | |
1476 | + set_current_state(TASK_INTERRUPTIBLE); | |
1477 | + | |
1478 | + CRYPTO_RETQ_LOCK(); | |
1479 | + for (;;) { | |
1480 | + /* Harvest return q's for completed ops */ | |
1481 | + crpt = NULL; | |
1482 | + if (!list_empty(&crp_ret_q)) | |
1483 | + crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next); | |
1484 | + if (crpt != NULL) | |
1485 | + list_del(&crpt->crp_next); | |
1486 | + | |
1487 | + krpt = NULL; | |
1488 | + if (!list_empty(&crp_ret_kq)) | |
1489 | + krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next); | |
1490 | + if (krpt != NULL) | |
1491 | + list_del(&krpt->krp_next); | |
1492 | + | |
1493 | + if (crpt != NULL || krpt != NULL) { | |
1494 | + CRYPTO_RETQ_UNLOCK(); | |
1495 | + /* | |
1496 | + * Run callbacks unlocked. | |
1497 | + */ | |
1498 | + if (crpt != NULL) | |
1499 | + crpt->crp_callback(crpt); | |
1500 | + if (krpt != NULL) | |
1501 | + krpt->krp_callback(krpt); | |
1502 | + CRYPTO_RETQ_LOCK(); | |
1503 | + } else { | |
1504 | + /* | |
1505 | + * Nothing more to be processed. Sleep until we're | |
1506 | + * woken because there are more returns to process. | |
1507 | + */ | |
1508 | + dprintk("%s - sleeping\n", __FUNCTION__); | |
1509 | + CRYPTO_RETQ_UNLOCK(); | |
1510 | + wait_event_interruptible(cryptoretproc_wait, | |
1511 | + !list_empty(&crp_ret_q) || | |
1512 | + !list_empty(&crp_ret_kq) || | |
1513 | + kthread_should_stop()); | |
1514 | + if (signal_pending (current)) { | |
1515 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
1516 | + spin_lock_irq(¤t->sigmask_lock); | |
1517 | +#endif | |
1518 | + flush_signals(current); | |
1519 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
1520 | + spin_unlock_irq(¤t->sigmask_lock); | |
1521 | +#endif | |
1522 | + } | |
1523 | + CRYPTO_RETQ_LOCK(); | |
1524 | + dprintk("%s - awake\n", __FUNCTION__); | |
1525 | + if (kthread_should_stop()) { | |
1526 | + dprintk("%s - EXITING!\n", __FUNCTION__); | |
1527 | + break; | |
1528 | + } | |
1529 | + cryptostats.cs_rets++; | |
1530 | + } | |
1531 | + } | |
1532 | + CRYPTO_RETQ_UNLOCK(); | |
1533 | + return 0; | |
1534 | +} | |
1535 | + | |
1536 | + | |
1537 | +#if 0 /* should put this into /proc or something */ | |
1538 | +static void | |
1539 | +db_show_drivers(void) | |
1540 | +{ | |
1541 | + int hid; | |
1542 | + | |
1543 | + db_printf("%12s %4s %4s %8s %2s %2s\n" | |
1544 | + , "Device" | |
1545 | + , "Ses" | |
1546 | + , "Kops" | |
1547 | + , "Flags" | |
1548 | + , "QB" | |
1549 | + , "KB" | |
1550 | + ); | |
1551 | + for (hid = 0; hid < crypto_drivers_num; hid++) { | |
1552 | + const struct cryptocap *cap = &crypto_drivers[hid]; | |
1553 | + if (cap->cc_dev == NULL) | |
1554 | + continue; | |
1555 | + db_printf("%-12s %4u %4u %08x %2u %2u\n" | |
1556 | + , device_get_nameunit(cap->cc_dev) | |
1557 | + , cap->cc_sessions | |
1558 | + , cap->cc_koperations | |
1559 | + , cap->cc_flags | |
1560 | + , cap->cc_qblocked | |
1561 | + , cap->cc_kqblocked | |
1562 | + ); | |
1563 | + } | |
1564 | +} | |
1565 | + | |
1566 | +DB_SHOW_COMMAND(crypto, db_show_crypto) | |
1567 | +{ | |
1568 | + struct cryptop *crp; | |
1569 | + | |
1570 | + db_show_drivers(); | |
1571 | + db_printf("\n"); | |
1572 | + | |
1573 | + db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", | |
1574 | + "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", | |
1575 | + "Desc", "Callback"); | |
1576 | + TAILQ_FOREACH(crp, &crp_q, crp_next) { | |
1577 | + db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" | |
1578 | + , (int) CRYPTO_SESID2HID(crp->crp_sid) | |
1579 | + , (int) CRYPTO_SESID2CAPS(crp->crp_sid) | |
1580 | + , crp->crp_ilen, crp->crp_olen | |
1581 | + , crp->crp_etype | |
1582 | + , crp->crp_flags | |
1583 | + , crp->crp_desc | |
1584 | + , crp->crp_callback | |
1585 | + ); | |
1586 | + } | |
1587 | + if (!TAILQ_EMPTY(&crp_ret_q)) { | |
1588 | + db_printf("\n%4s %4s %4s %8s\n", | |
1589 | + "HID", "Etype", "Flags", "Callback"); | |
1590 | + TAILQ_FOREACH(crp, &crp_ret_q, crp_next) { | |
1591 | + db_printf("%4u %4u %04x %8p\n" | |
1592 | + , (int) CRYPTO_SESID2HID(crp->crp_sid) | |
1593 | + , crp->crp_etype | |
1594 | + , crp->crp_flags | |
1595 | + , crp->crp_callback | |
1596 | + ); | |
1597 | + } | |
1598 | + } | |
1599 | +} | |
1600 | + | |
1601 | +DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) | |
1602 | +{ | |
1603 | + struct cryptkop *krp; | |
1604 | + | |
1605 | + db_show_drivers(); | |
1606 | + db_printf("\n"); | |
1607 | + | |
1608 | + db_printf("%4s %5s %4s %4s %8s %4s %8s\n", | |
1609 | + "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); | |
1610 | + TAILQ_FOREACH(krp, &crp_kq, krp_next) { | |
1611 | + db_printf("%4u %5u %4u %4u %08x %4u %8p\n" | |
1612 | + , krp->krp_op | |
1613 | + , krp->krp_status | |
1614 | + , krp->krp_iparams, krp->krp_oparams | |
1615 | + , krp->krp_crid, krp->krp_hid | |
1616 | + , krp->krp_callback | |
1617 | + ); | |
1618 | + } | |
1619 | + if (!TAILQ_EMPTY(&crp_ret_q)) { | |
1620 | + db_printf("%4s %5s %8s %4s %8s\n", | |
1621 | + "Op", "Status", "CRID", "HID", "Callback"); | |
1622 | + TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) { | |
1623 | + db_printf("%4u %5u %08x %4u %8p\n" | |
1624 | + , krp->krp_op | |
1625 | + , krp->krp_status | |
1626 | + , krp->krp_crid, krp->krp_hid | |
1627 | + , krp->krp_callback | |
1628 | + ); | |
1629 | + } | |
1630 | + } | |
1631 | +} | |
1632 | +#endif | |
1633 | + | |
1634 | + | |
1635 | +static int | |
1636 | +crypto_init(void) | |
1637 | +{ | |
1638 | + int error; | |
1639 | + unsigned long cpu; | |
1640 | + | |
1641 | + dprintk("%s(%p)\n", __FUNCTION__, (void *) crypto_init); | |
1642 | + | |
1643 | + if (crypto_initted) | |
1644 | + return 0; | |
1645 | + crypto_initted = 1; | |
1646 | + | |
1647 | + spin_lock_init(&crypto_drivers_lock); | |
1648 | + spin_lock_init(&crypto_q_lock); | |
1649 | + spin_lock_init(&crypto_ret_q_lock); | |
1650 | + | |
1651 | + cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop), | |
1652 | + 0, SLAB_HWCACHE_ALIGN, NULL | |
1653 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) | |
1654 | + , NULL | |
1655 | +#endif | |
1656 | + ); | |
1657 | + | |
1658 | + cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc), | |
1659 | + 0, SLAB_HWCACHE_ALIGN, NULL | |
1660 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) | |
1661 | + , NULL | |
1662 | +#endif | |
1663 | + ); | |
1664 | + | |
1665 | + if (cryptodesc_zone == NULL || cryptop_zone == NULL) { | |
1666 | + printk("crypto: crypto_init cannot setup crypto zones\n"); | |
1667 | + error = ENOMEM; | |
1668 | + goto bad; | |
1669 | + } | |
1670 | + | |
1671 | + crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; | |
1672 | + crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap), | |
1673 | + GFP_KERNEL); | |
1674 | + if (crypto_drivers == NULL) { | |
1675 | + printk("crypto: crypto_init cannot setup crypto drivers\n"); | |
1676 | + error = ENOMEM; | |
1677 | + goto bad; | |
1678 | + } | |
1679 | + | |
1680 | + memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap)); | |
1681 | + | |
1682 | + ocf_for_each_cpu(cpu) { | |
1683 | + cryptoproc[cpu] = kthread_create(crypto_proc, (void *) cpu, | |
1684 | + "ocf_%d", (int) cpu); | |
1685 | + if (IS_ERR(cryptoproc[cpu])) { | |
1686 | + error = PTR_ERR(cryptoproc[cpu]); | |
1687 | + printk("crypto: crypto_init cannot start crypto thread; error %d", | |
1688 | + error); | |
1689 | + goto bad; | |
1690 | + } | |
1691 | + kthread_bind(cryptoproc[cpu], cpu); | |
1692 | + wake_up_process(cryptoproc[cpu]); | |
1693 | + | |
1694 | + cryptoretproc[cpu] = kthread_create(crypto_ret_proc, (void *) cpu, | |
1695 | + "ocf_ret_%d", (int) cpu); | |
1696 | + if (IS_ERR(cryptoretproc[cpu])) { | |
1697 | + error = PTR_ERR(cryptoretproc[cpu]); | |
1698 | + printk("crypto: crypto_init cannot start cryptoret thread; error %d", | |
1699 | + error); | |
1700 | + goto bad; | |
1701 | + } | |
1702 | + kthread_bind(cryptoretproc[cpu], cpu); | |
1703 | + wake_up_process(cryptoretproc[cpu]); | |
1704 | + } | |
1705 | + | |
1706 | + return 0; | |
1707 | +bad: | |
1708 | + crypto_exit(); | |
1709 | + return error; | |
1710 | +} | |
1711 | + | |
1712 | + | |
1713 | +static void | |
1714 | +crypto_exit(void) | |
1715 | +{ | |
1716 | + int cpu; | |
1717 | + | |
1718 | + dprintk("%s()\n", __FUNCTION__); | |
1719 | + | |
1720 | + /* | |
1721 | + * Terminate any crypto threads. | |
1722 | + */ | |
1723 | + ocf_for_each_cpu(cpu) { | |
1724 | + kthread_stop(cryptoproc[cpu]); | |
1725 | + kthread_stop(cryptoretproc[cpu]); | |
1726 | + } | |
1727 | + | |
1728 | + /* | |
1729 | + * Reclaim dynamically allocated resources. | |
1730 | + */ | |
1731 | + if (crypto_drivers != NULL) | |
1732 | + kfree(crypto_drivers); | |
1733 | + | |
1734 | + if (cryptodesc_zone != NULL) | |
1735 | + kmem_cache_destroy(cryptodesc_zone); | |
1736 | + if (cryptop_zone != NULL) | |
1737 | + kmem_cache_destroy(cryptop_zone); | |
1738 | +} | |
1739 | + | |
1740 | + | |
1741 | +EXPORT_SYMBOL(crypto_newsession); | |
1742 | +EXPORT_SYMBOL(crypto_freesession); | |
1743 | +EXPORT_SYMBOL(crypto_get_driverid); | |
1744 | +EXPORT_SYMBOL(crypto_kregister); | |
1745 | +EXPORT_SYMBOL(crypto_register); | |
1746 | +EXPORT_SYMBOL(crypto_unregister); | |
1747 | +EXPORT_SYMBOL(crypto_unregister_all); | |
1748 | +EXPORT_SYMBOL(crypto_unblock); | |
1749 | +EXPORT_SYMBOL(crypto_dispatch); | |
1750 | +EXPORT_SYMBOL(crypto_kdispatch); | |
1751 | +EXPORT_SYMBOL(crypto_freereq); | |
1752 | +EXPORT_SYMBOL(crypto_getreq); | |
1753 | +EXPORT_SYMBOL(crypto_done); | |
1754 | +EXPORT_SYMBOL(crypto_kdone); | |
1755 | +EXPORT_SYMBOL(crypto_getfeat); | |
1756 | +EXPORT_SYMBOL(crypto_userasymcrypto); | |
1757 | +EXPORT_SYMBOL(crypto_getcaps); | |
1758 | +EXPORT_SYMBOL(crypto_find_driver); | |
1759 | +EXPORT_SYMBOL(crypto_find_device_byhid); | |
1760 | + | |
1761 | +module_init(crypto_init); | |
1762 | +module_exit(crypto_exit); | |
1763 | + | |
1764 | +MODULE_LICENSE("BSD"); | |
1765 | +MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>"); | |
1766 | +MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)"); |
crypto/ocf/cryptodev.c
Changes suppressed. Click to show
1 | +/* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */ | |
2 | + | |
3 | +/*- | |
4 | + * Linux port done by David McCullough <david_mccullough@mcafee.com> | |
5 | + * Copyright (C) 2006-2010 David McCullough | |
6 | + * Copyright (C) 2004-2005 Intel Corporation. | |
7 | + * The license and original author are listed below. | |
8 | + * | |
9 | + * Copyright (c) 2001 Theo de Raadt | |
10 | + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting | |
11 | + * | |
12 | + * Redistribution and use in source and binary forms, with or without | |
13 | + * modification, are permitted provided that the following conditions | |
14 | + * are met: | |
15 | + * | |
16 | + * 1. Redistributions of source code must retain the above copyright | |
17 | + * notice, this list of conditions and the following disclaimer. | |
18 | + * 2. Redistributions in binary form must reproduce the above copyright | |
19 | + * notice, this list of conditions and the following disclaimer in the | |
20 | + * documentation and/or other materials provided with the distribution. | |
21 | + * 3. The name of the author may not be used to endorse or promote products | |
22 | + * derived from this software without specific prior written permission. | |
23 | + * | |
24 | + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
25 | + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
26 | + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
27 | + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
28 | + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
29 | + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
30 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
31 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
32 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
33 | + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
34 | + * | |
35 | + * Effort sponsored in part by the Defense Advanced Research Projects | |
36 | + * Agency (DARPA) and Air Force Research Laboratory, Air Force | |
37 | + * Materiel Command, USAF, under agreement number F30602-01-2-0537. | |
38 | + * | |
39 | +__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $"); | |
40 | + */ | |
41 | + | |
42 | +#include <linux/version.h> | |
43 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
44 | +#include <linux/config.h> | |
45 | +#endif | |
46 | +#include <linux/types.h> | |
47 | +#include <linux/time.h> | |
48 | +#include <linux/delay.h> | |
49 | +#include <linux/list.h> | |
50 | +#include <linux/init.h> | |
51 | +#include <linux/sched.h> | |
52 | +#include <linux/unistd.h> | |
53 | +#include <linux/module.h> | |
54 | +#include <linux/wait.h> | |
55 | +#include <linux/slab.h> | |
56 | +#include <linux/fs.h> | |
57 | +#include <linux/dcache.h> | |
58 | +#include <linux/file.h> | |
59 | +#include <linux/mount.h> | |
60 | +#include <linux/miscdevice.h> | |
61 | +#include <asm/uaccess.h> | |
62 | + | |
63 | +#include <cryptodev.h> | |
64 | +#include <uio.h> | |
65 | + | |
66 | +extern asmlinkage long sys_dup(unsigned int fildes); | |
67 | + | |
68 | +#define debug cryptodev_debug | |
69 | +int cryptodev_debug = 0; | |
70 | +module_param(cryptodev_debug, int, 0644); | |
71 | +MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug"); | |
72 | + | |
73 | +struct csession_info { | |
74 | + u_int16_t blocksize; | |
75 | + u_int16_t minkey, maxkey; | |
76 | + | |
77 | + u_int16_t keysize; | |
78 | + /* u_int16_t hashsize; */ | |
79 | + u_int16_t authsize; | |
80 | + u_int16_t authkey; | |
81 | + /* u_int16_t ctxsize; */ | |
82 | +}; | |
83 | + | |
84 | +struct csession { | |
85 | + struct list_head list; | |
86 | + u_int64_t sid; | |
87 | + u_int32_t ses; | |
88 | + | |
89 | + wait_queue_head_t waitq; | |
90 | + | |
91 | + u_int32_t cipher; | |
92 | + | |
93 | + u_int32_t mac; | |
94 | + | |
95 | + caddr_t key; | |
96 | + int keylen; | |
97 | + u_char tmp_iv[EALG_MAX_BLOCK_LEN]; | |
98 | + | |
99 | + caddr_t mackey; | |
100 | + int mackeylen; | |
101 | + | |
102 | + struct csession_info info; | |
103 | + | |
104 | + struct iovec iovec; | |
105 | + struct uio uio; | |
106 | + int error; | |
107 | +}; | |
108 | + | |
109 | +struct fcrypt { | |
110 | + struct list_head csessions; | |
111 | + int sesn; | |
112 | +}; | |
113 | + | |
114 | +static struct csession *csefind(struct fcrypt *, u_int); | |
115 | +static int csedelete(struct fcrypt *, struct csession *); | |
116 | +static struct csession *cseadd(struct fcrypt *, struct csession *); | |
117 | +static struct csession *csecreate(struct fcrypt *, u_int64_t, | |
118 | + struct cryptoini *crie, struct cryptoini *cria, struct csession_info *); | |
119 | +static int csefree(struct csession *); | |
120 | + | |
121 | +static int cryptodev_op(struct csession *, struct crypt_op *); | |
122 | +static int cryptodev_key(struct crypt_kop *); | |
123 | +static int cryptodev_find(struct crypt_find_op *); | |
124 | + | |
125 | +static int cryptodev_cb(void *); | |
126 | +static int cryptodev_open(struct inode *inode, struct file *filp); | |
127 | + | |
128 | +/* | |
129 | + * Check a crypto identifier to see if it requested | |
130 | + * a valid crid and it's capabilities match. | |
131 | + */ | |
132 | +static int | |
133 | +checkcrid(int crid) | |
134 | +{ | |
135 | + int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE); | |
136 | + int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE); | |
137 | + int caps = 0; | |
138 | + | |
139 | + /* if the user hasn't selected a driver, then just call newsession */ | |
140 | + if (hid == 0 && typ != 0) | |
141 | + return 0; | |
142 | + | |
143 | + caps = crypto_getcaps(hid); | |
144 | + | |
145 | + /* didn't find anything with capabilities */ | |
146 | + if (caps == 0) { | |
147 | + dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ); | |
148 | + return EINVAL; | |
149 | + } | |
150 | + | |
151 | + /* the user didn't specify SW or HW, so the driver is ok */ | |
152 | + if (typ == 0) | |
153 | + return 0; | |
154 | + | |
155 | + /* if the type specified didn't match */ | |
156 | + if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) { | |
157 | + dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__, | |
158 | + hid, typ, caps); | |
159 | + return EINVAL; | |
160 | + } | |
161 | + | |
162 | + return 0; | |
163 | +} | |
164 | + | |
165 | +static int | |
166 | +cryptodev_op(struct csession *cse, struct crypt_op *cop) | |
167 | +{ | |
168 | + struct cryptop *crp = NULL; | |
169 | + struct cryptodesc *crde = NULL, *crda = NULL; | |
170 | + int error = 0; | |
171 | + | |
172 | + dprintk("%s()\n", __FUNCTION__); | |
173 | + if (cop->len > CRYPTO_MAX_DATA_LEN) { | |
174 | + dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN); | |
175 | + return (E2BIG); | |
176 | + } | |
177 | + | |
178 | + if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) { | |
179 | + dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize, | |
180 | + cop->len); | |
181 | + return (EINVAL); | |
182 | + } | |
183 | + | |
184 | + cse->uio.uio_iov = &cse->iovec; | |
185 | + cse->uio.uio_iovcnt = 1; | |
186 | + cse->uio.uio_offset = 0; | |
187 | +#if 0 | |
188 | + cse->uio.uio_resid = cop->len; | |
189 | + cse->uio.uio_segflg = UIO_SYSSPACE; | |
190 | + cse->uio.uio_rw = UIO_WRITE; | |
191 | + cse->uio.uio_td = td; | |
192 | +#endif | |
193 | + cse->uio.uio_iov[0].iov_len = cop->len; | |
194 | + if (cse->info.authsize) | |
195 | + cse->uio.uio_iov[0].iov_len += cse->info.authsize; | |
196 | + cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len, | |
197 | + GFP_KERNEL); | |
198 | + | |
199 | + if (cse->uio.uio_iov[0].iov_base == NULL) { | |
200 | + dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__, | |
201 | + (int)cse->uio.uio_iov[0].iov_len); | |
202 | + return (ENOMEM); | |
203 | + } | |
204 | + | |
205 | + crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0)); | |
206 | + if (crp == NULL) { | |
207 | + dprintk("%s: ENOMEM\n", __FUNCTION__); | |
208 | + error = ENOMEM; | |
209 | + goto bail; | |
210 | + } | |
211 | + | |
212 | + if (cse->info.authsize && cse->info.blocksize) { | |
213 | + if (cop->op == COP_ENCRYPT) { | |
214 | + crde = crp->crp_desc; | |
215 | + crda = crde->crd_next; | |
216 | + } else { | |
217 | + crda = crp->crp_desc; | |
218 | + crde = crda->crd_next; | |
219 | + } | |
220 | + } else if (cse->info.authsize) { | |
221 | + crda = crp->crp_desc; | |
222 | + } else if (cse->info.blocksize) { | |
223 | + crde = crp->crp_desc; | |
224 | + } else { | |
225 | + dprintk("%s: bad request\n", __FUNCTION__); | |
226 | + error = EINVAL; | |
227 | + goto bail; | |
228 | + } | |
229 | + | |
230 | + if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src, | |
231 | + cop->len))) { | |
232 | + dprintk("%s: bad copy\n", __FUNCTION__); | |
233 | + goto bail; | |
234 | + } | |
235 | + | |
236 | + if (crda) { | |
237 | + crda->crd_skip = 0; | |
238 | + crda->crd_len = cop->len; | |
239 | + crda->crd_inject = cop->len; | |
240 | + | |
241 | + crda->crd_alg = cse->mac; | |
242 | + crda->crd_key = cse->mackey; | |
243 | + crda->crd_klen = cse->mackeylen * 8; | |
244 | + } | |
245 | + | |
246 | + if (crde) { | |
247 | + if (cop->op == COP_ENCRYPT) | |
248 | + crde->crd_flags |= CRD_F_ENCRYPT; | |
249 | + else | |
250 | + crde->crd_flags &= ~CRD_F_ENCRYPT; | |
251 | + crde->crd_len = cop->len; | |
252 | + crde->crd_inject = 0; | |
253 | + | |
254 | + crde->crd_alg = cse->cipher; | |
255 | + crde->crd_key = cse->key; | |
256 | + crde->crd_klen = cse->keylen * 8; | |
257 | + } | |
258 | + | |
259 | + crp->crp_ilen = cse->uio.uio_iov[0].iov_len; | |
260 | + crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM | |
261 | + | (cop->flags & COP_F_BATCH); | |
262 | + crp->crp_buf = (caddr_t)&cse->uio; | |
263 | + crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb; | |
264 | + crp->crp_sid = cse->sid; | |
265 | + crp->crp_opaque = (void *)cse; | |
266 | + | |
267 | + if (cop->iv) { | |
268 | + if (crde == NULL) { | |
269 | + error = EINVAL; | |
270 | + dprintk("%s no crde\n", __FUNCTION__); | |
271 | + goto bail; | |
272 | + } | |
273 | + if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ | |
274 | + error = EINVAL; | |
275 | + dprintk("%s arc4 with IV\n", __FUNCTION__); | |
276 | + goto bail; | |
277 | + } | |
278 | + if ((error = copy_from_user(cse->tmp_iv, cop->iv, | |
279 | + cse->info.blocksize))) { | |
280 | + dprintk("%s bad iv copy\n", __FUNCTION__); | |
281 | + goto bail; | |
282 | + } | |
283 | + memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize); | |
284 | + crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; | |
285 | + crde->crd_skip = 0; | |
286 | + } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ | |
287 | + crde->crd_skip = 0; | |
288 | + } else if (crde) { | |
289 | + crde->crd_flags |= CRD_F_IV_PRESENT; | |
290 | + crde->crd_skip = cse->info.blocksize; | |
291 | + crde->crd_len -= cse->info.blocksize; | |
292 | + } | |
293 | + | |
294 | + if (cop->mac && crda == NULL) { | |
295 | + error = EINVAL; | |
296 | + dprintk("%s no crda\n", __FUNCTION__); | |
297 | + goto bail; | |
298 | + } | |
299 | + | |
300 | + /* | |
301 | + * Let the dispatch run unlocked, then, interlock against the | |
302 | + * callback before checking if the operation completed and going | |
303 | + * to sleep. This insures drivers don't inherit our lock which | |
304 | + * results in a lock order reversal between crypto_dispatch forced | |
305 | + * entry and the crypto_done callback into us. | |
306 | + */ | |
307 | + error = crypto_dispatch(crp); | |
308 | + if (error) { | |
309 | + dprintk("%s error in crypto_dispatch\n", __FUNCTION__); | |
310 | + goto bail; | |
311 | + } | |
312 | + | |
313 | + dprintk("%s about to WAIT\n", __FUNCTION__); | |
314 | + /* | |
315 | + * we really need to wait for driver to complete to maintain | |
316 | + * state, luckily interrupts will be remembered | |
317 | + */ | |
318 | + do { | |
319 | + error = wait_event_interruptible(crp->crp_waitq, | |
320 | + ((crp->crp_flags & CRYPTO_F_DONE) != 0)); | |
321 | + /* | |
322 | + * we can't break out of this loop or we will leave behind | |
323 | + * a huge mess, however, staying here means if your driver | |
324 | + * is broken user applications can hang and not be killed. | |
325 | + * The solution, fix your driver :-) | |
326 | + */ | |
327 | + if (error) { | |
328 | + schedule(); | |
329 | + error = 0; | |
330 | + } | |
331 | + } while ((crp->crp_flags & CRYPTO_F_DONE) == 0); | |
332 | + dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error); | |
333 | + | |
334 | + if (crp->crp_etype != 0) { | |
335 | + error = crp->crp_etype; | |
336 | + dprintk("%s error in crp processing\n", __FUNCTION__); | |
337 | + goto bail; | |
338 | + } | |
339 | + | |
340 | + if (cse->error) { | |
341 | + error = cse->error; | |
342 | + dprintk("%s error in cse processing\n", __FUNCTION__); | |
343 | + goto bail; | |
344 | + } | |
345 | + | |
346 | + if (cop->dst && (error = copy_to_user(cop->dst, | |
347 | + cse->uio.uio_iov[0].iov_base, cop->len))) { | |
348 | + dprintk("%s bad dst copy\n", __FUNCTION__); | |
349 | + goto bail; | |
350 | + } | |
351 | + | |
352 | + if (cop->mac && | |
353 | + (error=copy_to_user(cop->mac, | |
354 | + (caddr_t)cse->uio.uio_iov[0].iov_base + cop->len, | |
355 | + cse->info.authsize))) { | |
356 | + dprintk("%s bad mac copy\n", __FUNCTION__); | |
357 | + goto bail; | |
358 | + } | |
359 | + | |
360 | +bail: | |
361 | + if (crp) | |
362 | + crypto_freereq(crp); | |
363 | + if (cse->uio.uio_iov[0].iov_base) | |
364 | + kfree(cse->uio.uio_iov[0].iov_base); | |
365 | + | |
366 | + return (error); | |
367 | +} | |
368 | + | |
369 | +static int | |
370 | +cryptodev_cb(void *op) | |
371 | +{ | |
372 | + struct cryptop *crp = (struct cryptop *) op; | |
373 | + struct csession *cse = (struct csession *)crp->crp_opaque; | |
374 | + int error; | |
375 | + | |
376 | + dprintk("%s()\n", __FUNCTION__); | |
377 | + error = crp->crp_etype; | |
378 | + if (error == EAGAIN) { | |
379 | + crp->crp_flags &= ~CRYPTO_F_DONE; | |
380 | +#ifdef NOTYET | |
381 | + /* | |
382 | + * DAVIDM I am fairly sure that we should turn this into a batch | |
383 | + * request to stop bad karma/lockup, revisit | |
384 | + */ | |
385 | + crp->crp_flags |= CRYPTO_F_BATCH; | |
386 | +#endif | |
387 | + return crypto_dispatch(crp); | |
388 | + } | |
389 | + if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) { | |
390 | + cse->error = error; | |
391 | + wake_up_interruptible(&crp->crp_waitq); | |
392 | + } | |
393 | + return (0); | |
394 | +} | |
395 | + | |
396 | +static int | |
397 | +cryptodevkey_cb(void *op) | |
398 | +{ | |
399 | + struct cryptkop *krp = (struct cryptkop *) op; | |
400 | + dprintk("%s()\n", __FUNCTION__); | |
401 | + wake_up_interruptible(&krp->krp_waitq); | |
402 | + return (0); | |
403 | +} | |
404 | + | |
405 | +static int | |
406 | +cryptodev_key(struct crypt_kop *kop) | |
407 | +{ | |
408 | + struct cryptkop *krp = NULL; | |
409 | + int error = EINVAL; | |
410 | + int in, out, size, i; | |
411 | + | |
412 | + dprintk("%s()\n", __FUNCTION__); | |
413 | + if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) { | |
414 | + dprintk("%s params too big\n", __FUNCTION__); | |
415 | + return (EFBIG); | |
416 | + } | |
417 | + | |
418 | + in = kop->crk_iparams; | |
419 | + out = kop->crk_oparams; | |
420 | + switch (kop->crk_op) { | |
421 | + case CRK_MOD_EXP: | |
422 | + if (in == 3 && out == 1) | |
423 | + break; | |
424 | + return (EINVAL); | |
425 | + case CRK_MOD_EXP_CRT: | |
426 | + if (in == 6 && out == 1) | |
427 | + break; | |
428 | + return (EINVAL); | |
429 | + case CRK_DSA_SIGN: | |
430 | + if (in == 5 && out == 2) | |
431 | + break; | |
432 | + return (EINVAL); | |
433 | + case CRK_DSA_VERIFY: | |
434 | + if (in == 7 && out == 0) | |
435 | + break; | |
436 | + return (EINVAL); | |
437 | + case CRK_DH_COMPUTE_KEY: | |
438 | + if (in == 3 && out == 1) | |
439 | + break; | |
440 | + return (EINVAL); | |
441 | + default: | |
442 | + return (EINVAL); | |
443 | + } | |
444 | + | |
445 | + krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL); | |
446 | + if (!krp) | |
447 | + return (ENOMEM); | |
448 | + bzero(krp, sizeof *krp); | |
449 | + krp->krp_op = kop->crk_op; | |
450 | + krp->krp_status = kop->crk_status; | |
451 | + krp->krp_iparams = kop->crk_iparams; | |
452 | + krp->krp_oparams = kop->crk_oparams; | |
453 | + krp->krp_crid = kop->crk_crid; | |
454 | + krp->krp_status = 0; | |
455 | + krp->krp_flags = CRYPTO_KF_CBIMM; | |
456 | + krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb; | |
457 | + init_waitqueue_head(&krp->krp_waitq); | |
458 | + | |
459 | + for (i = 0; i < CRK_MAXPARAM; i++) | |
460 | + krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits; | |
461 | + for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) { | |
462 | + size = (krp->krp_param[i].crp_nbits + 7) / 8; | |
463 | + if (size == 0) | |
464 | + continue; | |
465 | + krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL); | |
466 | + if (i >= krp->krp_iparams) | |
467 | + continue; | |
468 | + error = copy_from_user(krp->krp_param[i].crp_p, | |
469 | + kop->crk_param[i].crp_p, size); | |
470 | + if (error) | |
471 | + goto fail; | |
472 | + } | |
473 | + | |
474 | + error = crypto_kdispatch(krp); | |
475 | + if (error) | |
476 | + goto fail; | |
477 | + | |
478 | + do { | |
479 | + error = wait_event_interruptible(krp->krp_waitq, | |
480 | + ((krp->krp_flags & CRYPTO_KF_DONE) != 0)); | |
481 | + /* | |
482 | + * we can't break out of this loop or we will leave behind | |
483 | + * a huge mess, however, staying here means if your driver | |
484 | + * is broken user applications can hang and not be killed. | |
485 | + * The solution, fix your driver :-) | |
486 | + */ | |
487 | + if (error) { | |
488 | + schedule(); | |
489 | + error = 0; | |
490 | + } | |
491 | + } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0); | |
492 | + | |
493 | + dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error); | |
494 | + | |
495 | + kop->crk_crid = krp->krp_crid; /* device that did the work */ | |
496 | + if (krp->krp_status != 0) { | |
497 | + error = krp->krp_status; | |
498 | + goto fail; | |
499 | + } | |
500 | + | |
501 | + for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) { | |
502 | + size = (krp->krp_param[i].crp_nbits + 7) / 8; | |
503 | + if (size == 0) | |
504 | + continue; | |
505 | + error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p, | |
506 | + size); | |
507 | + if (error) | |
508 | + goto fail; | |
509 | + } | |
510 | + | |
511 | +fail: | |
512 | + if (krp) { | |
513 | + kop->crk_status = krp->krp_status; | |
514 | + for (i = 0; i < CRK_MAXPARAM; i++) { | |
515 | + if (krp->krp_param[i].crp_p) | |
516 | + kfree(krp->krp_param[i].crp_p); | |
517 | + } | |
518 | + kfree(krp); | |
519 | + } | |
520 | + return (error); | |
521 | +} | |
522 | + | |
523 | +static int | |
524 | +cryptodev_find(struct crypt_find_op *find) | |
525 | +{ | |
526 | + device_t dev; | |
527 | + | |
528 | + if (find->crid != -1) { | |
529 | + dev = crypto_find_device_byhid(find->crid); | |
530 | + if (dev == NULL) | |
531 | + return (ENOENT); | |
532 | + strlcpy(find->name, device_get_nameunit(dev), | |
533 | + sizeof(find->name)); | |
534 | + } else { | |
535 | + find->crid = crypto_find_driver(find->name); | |
536 | + if (find->crid == -1) | |
537 | + return (ENOENT); | |
538 | + } | |
539 | + return (0); | |
540 | +} | |
541 | + | |
542 | +static struct csession * | |
543 | +csefind(struct fcrypt *fcr, u_int ses) | |
544 | +{ | |
545 | + struct csession *cse; | |
546 | + | |
547 | + dprintk("%s()\n", __FUNCTION__); | |
548 | + list_for_each_entry(cse, &fcr->csessions, list) | |
549 | + if (cse->ses == ses) | |
550 | + return (cse); | |
551 | + return (NULL); | |
552 | +} | |
553 | + | |
554 | +static int | |
555 | +csedelete(struct fcrypt *fcr, struct csession *cse_del) | |
556 | +{ | |
557 | + struct csession *cse; | |
558 | + | |
559 | + dprintk("%s()\n", __FUNCTION__); | |
560 | + list_for_each_entry(cse, &fcr->csessions, list) { | |
561 | + if (cse == cse_del) { | |
562 | + list_del(&cse->list); | |
563 | + return (1); | |
564 | + } | |
565 | + } | |
566 | + return (0); | |
567 | +} | |
568 | + | |
569 | +static struct csession * | |
570 | +cseadd(struct fcrypt *fcr, struct csession *cse) | |
571 | +{ | |
572 | + dprintk("%s()\n", __FUNCTION__); | |
573 | + list_add_tail(&cse->list, &fcr->csessions); | |
574 | + cse->ses = fcr->sesn++; | |
575 | + return (cse); | |
576 | +} | |
577 | + | |
578 | +static struct csession * | |
579 | +csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie, | |
580 | + struct cryptoini *cria, struct csession_info *info) | |
581 | +{ | |
582 | + struct csession *cse; | |
583 | + | |
584 | + dprintk("%s()\n", __FUNCTION__); | |
585 | + cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL); | |
586 | + if (cse == NULL) | |
587 | + return NULL; | |
588 | + memset(cse, 0, sizeof(struct csession)); | |
589 | + | |
590 | + INIT_LIST_HEAD(&cse->list); | |
591 | + init_waitqueue_head(&cse->waitq); | |
592 | + | |
593 | + cse->key = crie->cri_key; | |
594 | + cse->keylen = crie->cri_klen/8; | |
595 | + cse->mackey = cria->cri_key; | |
596 | + cse->mackeylen = cria->cri_klen/8; | |
597 | + cse->sid = sid; | |
598 | + cse->cipher = crie->cri_alg; | |
599 | + cse->mac = cria->cri_alg; | |
600 | + cse->info = *info; | |
601 | + cseadd(fcr, cse); | |
602 | + return (cse); | |
603 | +} | |
604 | + | |
605 | +static int | |
606 | +csefree(struct csession *cse) | |
607 | +{ | |
608 | + int error; | |
609 | + | |
610 | + dprintk("%s()\n", __FUNCTION__); | |
611 | + error = crypto_freesession(cse->sid); | |
612 | + if (cse->key) | |
613 | + kfree(cse->key); | |
614 | + if (cse->mackey) | |
615 | + kfree(cse->mackey); | |
616 | + kfree(cse); | |
617 | + return(error); | |
618 | +} | |
619 | + | |
620 | +static int | |
621 | +cryptodev_ioctl( | |
622 | + struct inode *inode, | |
623 | + struct file *filp, | |
624 | + unsigned int cmd, | |
625 | + unsigned long arg) | |
626 | +{ | |
627 | + struct cryptoini cria, crie; | |
628 | + struct fcrypt *fcr = filp->private_data; | |
629 | + struct csession *cse; | |
630 | + struct csession_info info; | |
631 | + struct session2_op sop; | |
632 | + struct crypt_op cop; | |
633 | + struct crypt_kop kop; | |
634 | + struct crypt_find_op fop; | |
635 | + u_int64_t sid; | |
636 | + u_int32_t ses = 0; | |
637 | + int feat, fd, error = 0, crid; | |
638 | + mm_segment_t fs; | |
639 | + | |
640 | + dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg); | |
641 | + | |
642 | + switch (cmd) { | |
643 | + | |
644 | + case CRIOGET: { | |
645 | + dprintk("%s(CRIOGET)\n", __FUNCTION__); | |
646 | + fs = get_fs(); | |
647 | + set_fs(get_ds()); | |
648 | + for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++) | |
649 | + if (files_fdtable(current->files)->fd[fd] == filp) | |
650 | + break; | |
651 | + fd = sys_dup(fd); | |
652 | + set_fs(fs); | |
653 | + put_user(fd, (int *) arg); | |
654 | + return IS_ERR_VALUE(fd) ? fd : 0; | |
655 | + } | |
656 | + | |
657 | +#define CIOCGSESSSTR (cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2") | |
658 | + case CIOCGSESSION: | |
659 | + case CIOCGSESSION2: | |
660 | + dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR); | |
661 | + memset(&crie, 0, sizeof(crie)); | |
662 | + memset(&cria, 0, sizeof(cria)); | |
663 | + memset(&info, 0, sizeof(info)); | |
664 | + memset(&sop, 0, sizeof(sop)); | |
665 | + | |
666 | + if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ? | |
667 | + sizeof(struct session_op) : sizeof(sop))) { | |
668 | + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR); | |
669 | + error = EFAULT; | |
670 | + goto bail; | |
671 | + } | |
672 | + | |
673 | + switch (sop.cipher) { | |
674 | + case 0: | |
675 | + dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR); | |
676 | + break; | |
677 | + case CRYPTO_NULL_CBC: | |
678 | + info.blocksize = NULL_BLOCK_LEN; | |
679 | + info.minkey = NULL_MIN_KEY_LEN; | |
680 | + info.maxkey = NULL_MAX_KEY_LEN; | |
681 | + break; | |
682 | + case CRYPTO_DES_CBC: | |
683 | + info.blocksize = DES_BLOCK_LEN; | |
684 | + info.minkey = DES_MIN_KEY_LEN; | |
685 | + info.maxkey = DES_MAX_KEY_LEN; | |
686 | + break; | |
687 | + case CRYPTO_3DES_CBC: | |
688 | + info.blocksize = DES3_BLOCK_LEN; | |
689 | + info.minkey = DES3_MIN_KEY_LEN; | |
690 | + info.maxkey = DES3_MAX_KEY_LEN; | |
691 | + break; | |
692 | + case CRYPTO_BLF_CBC: | |
693 | + info.blocksize = BLOWFISH_BLOCK_LEN; | |
694 | + info.minkey = BLOWFISH_MIN_KEY_LEN; | |
695 | + info.maxkey = BLOWFISH_MAX_KEY_LEN; | |
696 | + break; | |
697 | + case CRYPTO_CAST_CBC: | |
698 | + info.blocksize = CAST128_BLOCK_LEN; | |
699 | + info.minkey = CAST128_MIN_KEY_LEN; | |
700 | + info.maxkey = CAST128_MAX_KEY_LEN; | |
701 | + break; | |
702 | + case CRYPTO_SKIPJACK_CBC: | |
703 | + info.blocksize = SKIPJACK_BLOCK_LEN; | |
704 | + info.minkey = SKIPJACK_MIN_KEY_LEN; | |
705 | + info.maxkey = SKIPJACK_MAX_KEY_LEN; | |
706 | + break; | |
707 | + case CRYPTO_AES_CBC: | |
708 | + info.blocksize = AES_BLOCK_LEN; | |
709 | + info.minkey = AES_MIN_KEY_LEN; | |
710 | + info.maxkey = AES_MAX_KEY_LEN; | |
711 | + break; | |
712 | + case CRYPTO_ARC4: | |
713 | + info.blocksize = ARC4_BLOCK_LEN; | |
714 | + info.minkey = ARC4_MIN_KEY_LEN; | |
715 | + info.maxkey = ARC4_MAX_KEY_LEN; | |
716 | + break; | |
717 | + case CRYPTO_CAMELLIA_CBC: | |
718 | + info.blocksize = CAMELLIA_BLOCK_LEN; | |
719 | + info.minkey = CAMELLIA_MIN_KEY_LEN; | |
720 | + info.maxkey = CAMELLIA_MAX_KEY_LEN; | |
721 | + break; | |
722 | + default: | |
723 | + dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR); | |
724 | + error = EINVAL; | |
725 | + goto bail; | |
726 | + } | |
727 | + | |
728 | + switch (sop.mac) { | |
729 | + case 0: | |
730 | + dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR); | |
731 | + break; | |
732 | + case CRYPTO_NULL_HMAC: | |
733 | + info.authsize = NULL_HASH_LEN; | |
734 | + break; | |
735 | + case CRYPTO_MD5: | |
736 | + info.authsize = MD5_HASH_LEN; | |
737 | + break; | |
738 | + case CRYPTO_SHA1: | |
739 | + info.authsize = SHA1_HASH_LEN; | |
740 | + break; | |
741 | + case CRYPTO_SHA2_256: | |
742 | + info.authsize = SHA2_256_HASH_LEN; | |
743 | + break; | |
744 | + case CRYPTO_SHA2_384: | |
745 | + info.authsize = SHA2_384_HASH_LEN; | |
746 | + break; | |
747 | + case CRYPTO_SHA2_512: | |
748 | + info.authsize = SHA2_512_HASH_LEN; | |
749 | + break; | |
750 | + case CRYPTO_RIPEMD160: | |
751 | + info.authsize = RIPEMD160_HASH_LEN; | |
752 | + break; | |
753 | + case CRYPTO_MD5_HMAC: | |
754 | + info.authsize = MD5_HASH_LEN; | |
755 | + info.authkey = 16; | |
756 | + break; | |
757 | + case CRYPTO_SHA1_HMAC: | |
758 | + info.authsize = SHA1_HASH_LEN; | |
759 | + info.authkey = 20; | |
760 | + break; | |
761 | + case CRYPTO_SHA2_256_HMAC: | |
762 | + info.authsize = SHA2_256_HASH_LEN; | |
763 | + info.authkey = 32; | |
764 | + break; | |
765 | + case CRYPTO_SHA2_384_HMAC: | |
766 | + info.authsize = SHA2_384_HASH_LEN; | |
767 | + info.authkey = 48; | |
768 | + break; | |
769 | + case CRYPTO_SHA2_512_HMAC: | |
770 | + info.authsize = SHA2_512_HASH_LEN; | |
771 | + info.authkey = 64; | |
772 | + break; | |
773 | + case CRYPTO_RIPEMD160_HMAC: | |
774 | + info.authsize = RIPEMD160_HASH_LEN; | |
775 | + info.authkey = 20; | |
776 | + break; | |
777 | + default: | |
778 | + dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR); | |
779 | + error = EINVAL; | |
780 | + goto bail; | |
781 | + } | |
782 | + | |
783 | + if (info.blocksize) { | |
784 | + crie.cri_alg = sop.cipher; | |
785 | + crie.cri_klen = sop.keylen * 8; | |
786 | + if ((info.maxkey && sop.keylen > info.maxkey) || | |
787 | + sop.keylen < info.minkey) { | |
788 | + dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR); | |
789 | + error = EINVAL; | |
790 | + goto bail; | |
791 | + } | |
792 | + | |
793 | + crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL); | |
794 | + if (copy_from_user(crie.cri_key, sop.key, | |
795 | + crie.cri_klen/8)) { | |
796 | + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR); | |
797 | + error = EFAULT; | |
798 | + goto bail; | |
799 | + } | |
800 | + if (info.authsize) | |
801 | + crie.cri_next = &cria; | |
802 | + } | |
803 | + | |
804 | + if (info.authsize) { | |
805 | + cria.cri_alg = sop.mac; | |
806 | + cria.cri_klen = sop.mackeylen * 8; | |
807 | + if (info.authkey && sop.mackeylen != info.authkey) { | |
808 | + dprintk("%s(%s) - mackeylen %d != %d\n", __FUNCTION__, | |
809 | + CIOCGSESSSTR, sop.mackeylen, info.authkey); | |
810 | + error = EINVAL; | |
811 | + goto bail; | |
812 | + } | |
813 | + | |
814 | + if (cria.cri_klen) { | |
815 | + cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL); | |
816 | + if (copy_from_user(cria.cri_key, sop.mackey, | |
817 | + cria.cri_klen / 8)) { | |
818 | + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR); | |
819 | + error = EFAULT; | |
820 | + goto bail; | |
821 | + } | |
822 | + } | |
823 | + } | |
824 | + | |
825 | + /* NB: CIOGSESSION2 has the crid */ | |
826 | + if (cmd == CIOCGSESSION2) { | |
827 | + crid = sop.crid; | |
828 | + error = checkcrid(crid); | |
829 | + if (error) { | |
830 | + dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__, | |
831 | + CIOCGSESSSTR, error); | |
832 | + goto bail; | |
833 | + } | |
834 | + } else { | |
835 | + /* allow either HW or SW to be used */ | |
836 | + crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE; | |
837 | + } | |
838 | + error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid); | |
839 | + if (error) { | |
840 | + dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error); | |
841 | + goto bail; | |
842 | + } | |
843 | + | |
844 | + cse = csecreate(fcr, sid, &crie, &cria, &info); | |
845 | + if (cse == NULL) { | |
846 | + crypto_freesession(sid); | |
847 | + error = EINVAL; | |
848 | + dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR); | |
849 | + goto bail; | |
850 | + } | |
851 | + sop.ses = cse->ses; | |
852 | + | |
853 | + if (cmd == CIOCGSESSION2) { | |
854 | + /* return hardware/driver id */ | |
855 | + sop.crid = CRYPTO_SESID2HID(cse->sid); | |
856 | + } | |
857 | + | |
858 | + if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ? | |
859 | + sizeof(struct session_op) : sizeof(sop))) { | |
860 | + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR); | |
861 | + error = EFAULT; | |
862 | + } | |
863 | +bail: | |
864 | + if (error) { | |
865 | + dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error); | |
866 | + if (crie.cri_key) | |
867 | + kfree(crie.cri_key); | |
868 | + if (cria.cri_key) | |
869 | + kfree(cria.cri_key); | |
870 | + } | |
871 | + break; | |
872 | + case CIOCFSESSION: | |
873 | + dprintk("%s(CIOCFSESSION)\n", __FUNCTION__); | |
874 | + get_user(ses, (uint32_t*)arg); | |
875 | + cse = csefind(fcr, ses); | |
876 | + if (cse == NULL) { | |
877 | + error = EINVAL; | |
878 | + dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error); | |
879 | + break; | |
880 | + } | |
881 | + csedelete(fcr, cse); | |
882 | + error = csefree(cse); | |
883 | + break; | |
884 | + case CIOCCRYPT: | |
885 | + dprintk("%s(CIOCCRYPT)\n", __FUNCTION__); | |
886 | + if(copy_from_user(&cop, (void*)arg, sizeof(cop))) { | |
887 | + dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__); | |
888 | + error = EFAULT; | |
889 | + goto bail; | |
890 | + } | |
891 | + cse = csefind(fcr, cop.ses); | |
892 | + if (cse == NULL) { | |
893 | + error = EINVAL; | |
894 | + dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error); | |
895 | + break; | |
896 | + } | |
897 | + error = cryptodev_op(cse, &cop); | |
898 | + if(copy_to_user((void*)arg, &cop, sizeof(cop))) { | |
899 | + dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__); | |
900 | + error = EFAULT; | |
901 | + goto bail; | |
902 | + } | |
903 | + break; | |
904 | + case CIOCKEY: | |
905 | + case CIOCKEY2: | |
906 | + dprintk("%s(CIOCKEY)\n", __FUNCTION__); | |
907 | + if (!crypto_userasymcrypto) | |
908 | + return (EPERM); /* XXX compat? */ | |
909 | + if(copy_from_user(&kop, (void*)arg, sizeof(kop))) { | |
910 | + dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__); | |
911 | + error = EFAULT; | |
912 | + goto bail; | |
913 | + } | |
914 | + if (cmd == CIOCKEY) { | |
915 | + /* NB: crypto core enforces s/w driver use */ | |
916 | + kop.crk_crid = | |
917 | + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE; | |
918 | + } | |
919 | + error = cryptodev_key(&kop); | |
920 | + if(copy_to_user((void*)arg, &kop, sizeof(kop))) { | |
921 | + dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__); | |
922 | + error = EFAULT; | |
923 | + goto bail; | |
924 | + } | |
925 | + break; | |
926 | + case CIOCASYMFEAT: | |
927 | + dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__); | |
928 | + if (!crypto_userasymcrypto) { | |
929 | + /* | |
930 | + * NB: if user asym crypto operations are | |
931 | + * not permitted return "no algorithms" | |
932 | + * so well-behaved applications will just | |
933 | + * fallback to doing them in software. | |
934 | + */ | |
935 | + feat = 0; | |
936 | + } else | |
937 | + error = crypto_getfeat(&feat); | |
938 | + if (!error) { | |
939 | + error = copy_to_user((void*)arg, &feat, sizeof(feat)); | |
940 | + } | |
941 | + break; | |
942 | + case CIOCFINDDEV: | |
943 | + if (copy_from_user(&fop, (void*)arg, sizeof(fop))) { | |
944 | + dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__); | |
945 | + error = EFAULT; | |
946 | + goto bail; | |
947 | + } | |
948 | + error = cryptodev_find(&fop); | |
949 | + if (copy_to_user((void*)arg, &fop, sizeof(fop))) { | |
950 | + dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__); | |
951 | + error = EFAULT; | |
952 | + goto bail; | |
953 | + } | |
954 | + break; | |
955 | + default: | |
956 | + dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd); | |
957 | + error = EINVAL; | |
958 | + break; | |
959 | + } | |
960 | + return(-error); | |
961 | +} | |
962 | + | |
963 | +#ifdef HAVE_UNLOCKED_IOCTL | |
964 | +static long | |
965 | +cryptodev_unlocked_ioctl( | |
966 | + struct file *filp, | |
967 | + unsigned int cmd, | |
968 | + unsigned long arg) | |
969 | +{ | |
970 | + return cryptodev_ioctl(NULL, filp, cmd, arg); | |
971 | +} | |
972 | +#endif | |
973 | + | |
974 | +static int | |
975 | +cryptodev_open(struct inode *inode, struct file *filp) | |
976 | +{ | |
977 | + struct fcrypt *fcr; | |
978 | + | |
979 | + dprintk("%s()\n", __FUNCTION__); | |
980 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) | |
981 | + /* | |
982 | + * on 2.6.35 private_data points to a miscdevice structure, we override | |
983 | + * it, which is currently safe to do. | |
984 | + */ | |
985 | + if (filp->private_data) { | |
986 | + printk("cryptodev: Private data already exists - %p!\n", filp->private_data); | |
987 | + return(-ENODEV); | |
988 | + } | |
989 | +#endif | |
990 | + | |
991 | + fcr = kmalloc(sizeof(*fcr), GFP_KERNEL); | |
992 | + if (!fcr) { | |
993 | + dprintk("%s() - malloc failed\n", __FUNCTION__); | |
994 | + return(-ENOMEM); | |
995 | + } | |
996 | + memset(fcr, 0, sizeof(*fcr)); | |
997 | + | |
998 | + INIT_LIST_HEAD(&fcr->csessions); | |
999 | + filp->private_data = fcr; | |
1000 | + return(0); | |
1001 | +} | |
1002 | + | |
1003 | +static int | |
1004 | +cryptodev_release(struct inode *inode, struct file *filp) | |
1005 | +{ | |
1006 | + struct fcrypt *fcr = filp->private_data; | |
1007 | + struct csession *cse, *tmp; | |
1008 | + | |
1009 | + dprintk("%s()\n", __FUNCTION__); | |
1010 | + if (!filp) { | |
1011 | + printk("cryptodev: No private data on release\n"); | |
1012 | + return(0); | |
1013 | + } | |
1014 | + | |
1015 | + list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) { | |
1016 | + list_del(&cse->list); | |
1017 | + (void)csefree(cse); | |
1018 | + } | |
1019 | + filp->private_data = NULL; | |
1020 | + kfree(fcr); | |
1021 | + return(0); | |
1022 | +} | |
1023 | + | |
1024 | +static struct file_operations cryptodev_fops = { | |
1025 | + .owner = THIS_MODULE, | |
1026 | + .open = cryptodev_open, | |
1027 | + .release = cryptodev_release, | |
1028 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) | |
1029 | + .ioctl = cryptodev_ioctl, | |
1030 | +#endif | |
1031 | +#ifdef HAVE_UNLOCKED_IOCTL | |
1032 | + .unlocked_ioctl = cryptodev_unlocked_ioctl, | |
1033 | +#endif | |
1034 | +}; | |
1035 | + | |
1036 | +static struct miscdevice cryptodev = { | |
1037 | + .minor = CRYPTODEV_MINOR, | |
1038 | + .name = "crypto", | |
1039 | + .fops = &cryptodev_fops, | |
1040 | +}; | |
1041 | + | |
1042 | +static int __init | |
1043 | +cryptodev_init(void) | |
1044 | +{ | |
1045 | + int rc; | |
1046 | + | |
1047 | + dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init); | |
1048 | + rc = misc_register(&cryptodev); | |
1049 | + if (rc) { | |
1050 | + printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n"); | |
1051 | + return(rc); | |
1052 | + } | |
1053 | + | |
1054 | + return(0); | |
1055 | +} | |
1056 | + | |
1057 | +static void __exit | |
1058 | +cryptodev_exit(void) | |
1059 | +{ | |
1060 | + dprintk("%s()\n", __FUNCTION__); | |
1061 | + misc_deregister(&cryptodev); | |
1062 | +} | |
1063 | + | |
1064 | +module_init(cryptodev_init); | |
1065 | +module_exit(cryptodev_exit); | |
1066 | + | |
1067 | +MODULE_LICENSE("BSD"); | |
1068 | +MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>"); | |
1069 | +MODULE_DESCRIPTION("Cryptodev (user interface to OCF)"); |
crypto/ocf/cryptodev.h
1 | +/* $FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $ */ | |
2 | +/* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */ | |
3 | + | |
4 | +/*- | |
5 | + * Linux port done by David McCullough <david_mccullough@mcafee.com> | |
6 | + * Copyright (C) 2006-2010 David McCullough | |
7 | + * Copyright (C) 2004-2005 Intel Corporation. | |
8 | + * The license and original author are listed below. | |
9 | + * | |
10 | + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) | |
11 | + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting | |
12 | + * | |
13 | + * This code was written by Angelos D. Keromytis in Athens, Greece, in | |
14 | + * February 2000. Network Security Technologies Inc. (NSTI) kindly | |
15 | + * supported the development of this code. | |
16 | + * | |
17 | + * Copyright (c) 2000 Angelos D. Keromytis | |
18 | + * | |
19 | + * Permission to use, copy, and modify this software with or without fee | |
20 | + * is hereby granted, provided that this entire notice is included in | |
21 | + * all source code copies of any software which is or includes a copy or | |
22 | + * modification of this software. | |
23 | + * | |
24 | + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR | |
25 | + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY | |
26 | + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE | |
27 | + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR | |
28 | + * PURPOSE. | |
29 | + * | |
30 | + * Copyright (c) 2001 Theo de Raadt | |
31 | + * | |
32 | + * Redistribution and use in source and binary forms, with or without | |
33 | + * modification, are permitted provided that the following conditions | |
34 | + * are met: | |
35 | + * | |
36 | + * 1. Redistributions of source code must retain the above copyright | |
37 | + * notice, this list of conditions and the following disclaimer. | |
38 | + * 2. Redistributions in binary form must reproduce the above copyright | |
39 | + * notice, this list of conditions and the following disclaimer in the | |
40 | + * documentation and/or other materials provided with the distribution. | |
41 | + * 3. The name of the author may not be used to endorse or promote products | |
42 | + * derived from this software without specific prior written permission. | |
43 | + * | |
44 | + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
45 | + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
46 | + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
47 | + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
48 | + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
49 | + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
50 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
51 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
52 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
53 | + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
54 | + * | |
55 | + * Effort sponsored in part by the Defense Advanced Research Projects | |
56 | + * Agency (DARPA) and Air Force Research Laboratory, Air Force | |
57 | + * Materiel Command, USAF, under agreement number F30602-01-2-0537. | |
58 | + * | |
59 | + */ | |
60 | + | |
61 | +#ifndef _CRYPTO_CRYPTO_H_ | |
62 | +#define _CRYPTO_CRYPTO_H_ | |
63 | + | |
64 | +/* Some initial values */ | |
65 | +#define CRYPTO_DRIVERS_INITIAL 4 | |
66 | +#define CRYPTO_SW_SESSIONS 32 | |
67 | + | |
68 | +/* Hash values */ | |
69 | +#define NULL_HASH_LEN 0 | |
70 | +#define MD5_HASH_LEN 16 | |
71 | +#define SHA1_HASH_LEN 20 | |
72 | +#define RIPEMD160_HASH_LEN 20 | |
73 | +#define SHA2_256_HASH_LEN 32 | |
74 | +#define SHA2_384_HASH_LEN 48 | |
75 | +#define SHA2_512_HASH_LEN 64 | |
76 | +#define MD5_KPDK_HASH_LEN 16 | |
77 | +#define SHA1_KPDK_HASH_LEN 20 | |
78 | +/* Maximum hash algorithm result length */ | |
79 | +#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */ | |
80 | + | |
81 | +/* HMAC values */ | |
82 | +#define NULL_HMAC_BLOCK_LEN 1 | |
83 | +#define MD5_HMAC_BLOCK_LEN 64 | |
84 | +#define SHA1_HMAC_BLOCK_LEN 64 | |
85 | +#define RIPEMD160_HMAC_BLOCK_LEN 64 | |
86 | +#define SHA2_256_HMAC_BLOCK_LEN 64 | |
87 | +#define SHA2_384_HMAC_BLOCK_LEN 128 | |
88 | +#define SHA2_512_HMAC_BLOCK_LEN 128 | |
89 | +/* Maximum HMAC block length */ | |
90 | +#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */ | |
91 | +#define HMAC_IPAD_VAL 0x36 | |
92 | +#define HMAC_OPAD_VAL 0x5C | |
93 | + | |
94 | +/* Encryption algorithm block sizes */ | |
95 | +#define NULL_BLOCK_LEN 1 | |
96 | +#define DES_BLOCK_LEN 8 | |
97 | +#define DES3_BLOCK_LEN 8 | |
98 | +#define BLOWFISH_BLOCK_LEN 8 | |
99 | +#define SKIPJACK_BLOCK_LEN 8 | |
100 | +#define CAST128_BLOCK_LEN 8 | |
101 | +#define RIJNDAEL128_BLOCK_LEN 16 | |
102 | +#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN | |
103 | +#define CAMELLIA_BLOCK_LEN 16 | |
104 | +#define ARC4_BLOCK_LEN 1 | |
105 | +#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */ | |
106 | + | |
107 | +/* Encryption algorithm min and max key sizes */ | |
108 | +#define NULL_MIN_KEY_LEN 0 | |
109 | +#define NULL_MAX_KEY_LEN 0 | |
110 | +#define DES_MIN_KEY_LEN 8 | |
111 | +#define DES_MAX_KEY_LEN 8 | |
112 | +#define DES3_MIN_KEY_LEN 24 | |
113 | +#define DES3_MAX_KEY_LEN 24 | |
114 | +#define BLOWFISH_MIN_KEY_LEN 4 | |
115 | +#define BLOWFISH_MAX_KEY_LEN 56 | |
116 | +#define SKIPJACK_MIN_KEY_LEN 10 | |
117 | +#define SKIPJACK_MAX_KEY_LEN 10 | |
118 | +#define CAST128_MIN_KEY_LEN 5 | |
119 | +#define CAST128_MAX_KEY_LEN 16 | |
120 | +#define RIJNDAEL128_MIN_KEY_LEN 16 | |
121 | +#define RIJNDAEL128_MAX_KEY_LEN 32 | |
122 | +#define AES_MIN_KEY_LEN RIJNDAEL128_MIN_KEY_LEN | |
123 | +#define AES_MAX_KEY_LEN RIJNDAEL128_MAX_KEY_LEN | |
124 | +#define CAMELLIA_MIN_KEY_LEN 16 | |
125 | +#define CAMELLIA_MAX_KEY_LEN 32 | |
126 | +#define ARC4_MIN_KEY_LEN 1 | |
127 | +#define ARC4_MAX_KEY_LEN 256 | |
128 | + | |
129 | +/* Max size of data that can be processed */ | |
130 | +#define CRYPTO_MAX_DATA_LEN 64*1024 - 1 | |
131 | + | |
132 | +#define CRYPTO_ALGORITHM_MIN 1 | |
133 | +#define CRYPTO_DES_CBC 1 | |
134 | +#define CRYPTO_3DES_CBC 2 | |
135 | +#define CRYPTO_BLF_CBC 3 | |
136 | +#define CRYPTO_CAST_CBC 4 | |
137 | +#define CRYPTO_SKIPJACK_CBC 5 | |
138 | +#define CRYPTO_MD5_HMAC 6 | |
139 | +#define CRYPTO_SHA1_HMAC 7 | |
140 | +#define CRYPTO_RIPEMD160_HMAC 8 | |
141 | +#define CRYPTO_MD5_KPDK 9 | |
142 | +#define CRYPTO_SHA1_KPDK 10 | |
143 | +#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */ | |
144 | +#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */ | |
145 | +#define CRYPTO_ARC4 12 | |
146 | +#define CRYPTO_MD5 13 | |
147 | +#define CRYPTO_SHA1 14 | |
148 | +#define CRYPTO_NULL_HMAC 15 | |
149 | +#define CRYPTO_NULL_CBC 16 | |
150 | +#define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */ | |
151 | +#define CRYPTO_SHA2_256_HMAC 18 | |
152 | +#define CRYPTO_SHA2_384_HMAC 19 | |
153 | +#define CRYPTO_SHA2_512_HMAC 20 | |
154 | +#define CRYPTO_CAMELLIA_CBC 21 | |
155 | +#define CRYPTO_SHA2_256 22 | |
156 | +#define CRYPTO_SHA2_384 23 | |
157 | +#define CRYPTO_SHA2_512 24 | |
158 | +#define CRYPTO_RIPEMD160 25 | |
159 | +#define CRYPTO_LZS_COMP 26 | |
160 | +#define CRYPTO_ALGORITHM_MAX 26 /* Keep updated - see above */ | |
161 | + | |
162 | +/* Algorithm flags */ | |
163 | +#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */ | |
164 | +#define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */ | |
165 | +#define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */ | |
166 | + | |
167 | +/* | |
168 | + * Crypto driver/device flags. They can set in the crid | |
169 | + * parameter when creating a session or submitting a key | |
170 | + * op to affect the device/driver assigned. If neither | |
171 | + * of these are specified then the crid is assumed to hold | |
172 | + * the driver id of an existing (and suitable) device that | |
173 | + * must be used to satisfy the request. | |
174 | + */ | |
175 | +#define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ | |
176 | +#define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ | |
177 | + | |
178 | +/* NB: deprecated */ | |
179 | +struct session_op { | |
180 | + u_int32_t cipher; /* ie. CRYPTO_DES_CBC */ | |
181 | + u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */ | |
182 | + | |
183 | + u_int32_t keylen; /* cipher key */ | |
184 | + caddr_t key; | |
185 | + int mackeylen; /* mac key */ | |
186 | + caddr_t mackey; | |
187 | + | |
188 | + u_int32_t ses; /* returns: session # */ | |
189 | +}; | |
190 | + | |
191 | +struct session2_op { | |
192 | + u_int32_t cipher; /* ie. CRYPTO_DES_CBC */ | |
193 | + u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */ | |
194 | + | |
195 | + u_int32_t keylen; /* cipher key */ | |
196 | + caddr_t key; | |
197 | + int mackeylen; /* mac key */ | |
198 | + caddr_t mackey; | |
199 | + | |
200 | + u_int32_t ses; /* returns: session # */ | |
201 | + int crid; /* driver id + flags (rw) */ | |
202 | + int pad[4]; /* for future expansion */ | |
203 | +}; | |
204 | + | |
205 | +struct crypt_op { | |
206 | + u_int32_t ses; | |
207 | + u_int16_t op; /* i.e. COP_ENCRYPT */ | |
208 | +#define COP_NONE 0 | |
209 | +#define COP_ENCRYPT 1 | |
210 | +#define COP_DECRYPT 2 | |
211 | + u_int16_t flags; | |
212 | +#define COP_F_BATCH 0x0008 /* Batch op if possible */ | |
213 | + u_int len; | |
214 | + caddr_t src, dst; /* become iov[] inside kernel */ | |
215 | + caddr_t mac; /* must be big enough for chosen MAC */ | |
216 | + caddr_t iv; | |
217 | +}; | |
218 | + | |
219 | +/* | |
220 | + * Parameters for looking up a crypto driver/device by | |
221 | + * device name or by id. The latter are returned for | |
222 | + * created sessions (crid) and completed key operations. | |
223 | + */ | |
224 | +struct crypt_find_op { | |
225 | + int crid; /* driver id + flags */ | |
226 | + char name[32]; /* device/driver name */ | |
227 | +}; | |
228 | + | |
229 | +/* bignum parameter, in packed bytes, ... */ | |
230 | +struct crparam { | |
231 | + caddr_t crp_p; | |
232 | + u_int crp_nbits; | |
233 | +}; | |
234 | + | |
235 | +#define CRK_MAXPARAM 8 | |
236 | + | |
237 | +struct crypt_kop { | |
238 | + u_int crk_op; /* ie. CRK_MOD_EXP or other */ | |
239 | + u_int crk_status; /* return status */ | |
240 | + u_short crk_iparams; /* # of input parameters */ | |
241 | + u_short crk_oparams; /* # of output parameters */ | |
242 | + u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */ | |
243 | + struct crparam crk_param[CRK_MAXPARAM]; | |
244 | +}; | |
245 | +#define CRK_ALGORITM_MIN 0 | |
246 | +#define CRK_MOD_EXP 0 | |
247 | +#define CRK_MOD_EXP_CRT 1 | |
248 | +#define CRK_DSA_SIGN 2 | |
249 | +#define CRK_DSA_VERIFY 3 | |
250 | +#define CRK_DH_COMPUTE_KEY 4 | |
251 | +#define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */ | |
252 | + | |
253 | +#define CRF_MOD_EXP (1 << CRK_MOD_EXP) | |
254 | +#define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT) | |
255 | +#define CRF_DSA_SIGN (1 << CRK_DSA_SIGN) | |
256 | +#define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY) | |
257 | +#define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY) | |
258 | + | |
259 | +/* | |
260 | + * done against open of /dev/crypto, to get a cloned descriptor. | |
261 | + * Please use F_SETFD against the cloned descriptor. | |
262 | + */ | |
263 | +#define CRIOGET _IOWR('c', 100, u_int32_t) | |
264 | +#define CRIOASYMFEAT CIOCASYMFEAT | |
265 | +#define CRIOFINDDEV CIOCFINDDEV | |
266 | + | |
267 | +/* the following are done against the cloned descriptor */ | |
268 | +#define CIOCGSESSION _IOWR('c', 101, struct session_op) | |
269 | +#define CIOCFSESSION _IOW('c', 102, u_int32_t) | |
270 | +#define CIOCCRYPT _IOWR('c', 103, struct crypt_op) | |
271 | +#define CIOCKEY _IOWR('c', 104, struct crypt_kop) | |
272 | +#define CIOCASYMFEAT _IOR('c', 105, u_int32_t) | |
273 | +#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op) | |
274 | +#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop) | |
275 | +#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op) | |
276 | + | |
277 | +struct cryptotstat { | |
278 | + struct timespec acc; /* total accumulated time */ | |
279 | + struct timespec min; /* min time */ | |
280 | + struct timespec max; /* max time */ | |
281 | + u_int32_t count; /* number of observations */ | |
282 | +}; | |
283 | + | |
284 | +struct cryptostats { | |
285 | + u_int32_t cs_ops; /* symmetric crypto ops submitted */ | |
286 | + u_int32_t cs_errs; /* symmetric crypto ops that failed */ | |
287 | + u_int32_t cs_kops; /* asymetric/key ops submitted */ | |
288 | + u_int32_t cs_kerrs; /* asymetric/key ops that failed */ | |
289 | + u_int32_t cs_intrs; /* crypto swi thread activations */ | |
290 | + u_int32_t cs_rets; /* crypto return thread activations */ | |
291 | + u_int32_t cs_blocks; /* symmetric op driver block */ | |
292 | + u_int32_t cs_kblocks; /* symmetric op driver block */ | |
293 | + /* | |
294 | + * When CRYPTO_TIMING is defined at compile time and the | |
295 | + * sysctl debug.crypto is set to 1, the crypto system will | |
296 | + * accumulate statistics about how long it takes to process | |
297 | + * crypto requests at various points during processing. | |
298 | + */ | |
299 | + struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */ | |
300 | + struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */ | |
301 | + struct cryptotstat cs_cb; /* crypto_done -> callback */ | |
302 | + struct cryptotstat cs_finis; /* callback -> callback return */ | |
303 | + | |
304 | + u_int32_t cs_drops; /* crypto ops dropped due to congestion */ | |
305 | +}; | |
306 | + | |
307 | +#ifdef __KERNEL__ | |
308 | + | |
309 | +/* Standard initialization structure beginning */ | |
310 | +struct cryptoini { | |
311 | + int cri_alg; /* Algorithm to use */ | |
312 | + int cri_klen; /* Key length, in bits */ | |
313 | + int cri_mlen; /* Number of bytes we want from the | |
314 | + entire hash. 0 means all. */ | |
315 | + caddr_t cri_key; /* key to use */ | |
316 | + u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */ | |
317 | + struct cryptoini *cri_next; | |
318 | +}; | |
319 | + | |
320 | +/* Describe boundaries of a single crypto operation */ | |
321 | +struct cryptodesc { | |
322 | + int crd_skip; /* How many bytes to ignore from start */ | |
323 | + int crd_len; /* How many bytes to process */ | |
324 | + int crd_inject; /* Where to inject results, if applicable */ | |
325 | + int crd_flags; | |
326 | + | |
327 | +#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */ | |
328 | +#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in | |
329 | + place, so don't copy. */ | |
330 | +#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */ | |
331 | +#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */ | |
332 | +#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */ | |
333 | +#define CRD_F_COMP 0x0f /* Set when doing compression */ | |
334 | + | |
335 | + struct cryptoini CRD_INI; /* Initialization/context data */ | |
336 | +#define crd_iv CRD_INI.cri_iv | |
337 | +#define crd_key CRD_INI.cri_key | |
338 | +#define crd_alg CRD_INI.cri_alg | |
339 | +#define crd_klen CRD_INI.cri_klen | |
340 | +#define crd_mlen CRD_INI.cri_mlen | |
341 | + | |
342 | + struct cryptodesc *crd_next; | |
343 | +}; | |
344 | + | |
345 | +/* Structure describing complete operation */ | |
346 | +struct cryptop { | |
347 | + struct list_head crp_next; | |
348 | + wait_queue_head_t crp_waitq; | |
349 | + | |
350 | + u_int64_t crp_sid; /* Session ID */ | |
351 | + int crp_ilen; /* Input data total length */ | |
352 | + int crp_olen; /* Result total length */ | |
353 | + | |
354 | + int crp_etype; /* | |
355 | + * Error type (zero means no error). | |
356 | + * All error codes except EAGAIN | |
357 | + * indicate possible data corruption (as in, | |
358 | + * the data have been touched). On all | |
359 | + * errors, the crp_sid may have changed | |
360 | + * (reset to a new one), so the caller | |
361 | + * should always check and use the new | |
362 | + * value on future requests. | |
363 | + */ | |
364 | + int crp_flags; | |
365 | + | |
366 | +#define CRYPTO_F_SKBUF 0x0001 /* Input/output are skbuf chains */ | |
367 | +#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */ | |
368 | +#define CRYPTO_F_REL 0x0004 /* Must return data in same place */ | |
369 | +#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */ | |
370 | +#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */ | |
371 | +#define CRYPTO_F_DONE 0x0020 /* Operation completed */ | |
372 | +#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */ | |
373 | + | |
374 | + caddr_t crp_buf; /* Data to be processed */ | |
375 | + caddr_t crp_opaque; /* Opaque pointer, passed along */ | |
376 | + struct cryptodesc *crp_desc; /* Linked list of processing descriptors */ | |
377 | + | |
378 | + int (*crp_callback)(struct cryptop *); /* Callback function */ | |
379 | +}; | |
380 | + | |
381 | +#define CRYPTO_BUF_CONTIG 0x0 | |
382 | +#define CRYPTO_BUF_IOV 0x1 | |
383 | +#define CRYPTO_BUF_SKBUF 0x2 | |
384 | + | |
385 | +#define CRYPTO_OP_DECRYPT 0x0 | |
386 | +#define CRYPTO_OP_ENCRYPT 0x1 | |
387 | + | |
388 | +/* | |
389 | + * Hints passed to process methods. | |
390 | + */ | |
391 | +#define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */ | |
392 | + | |
393 | +struct cryptkop { | |
394 | + struct list_head krp_next; | |
395 | + wait_queue_head_t krp_waitq; | |
396 | + | |
397 | + int krp_flags; | |
398 | +#define CRYPTO_KF_DONE 0x0001 /* Operation completed */ | |
399 | +#define CRYPTO_KF_CBIMM 0x0002 /* Do callback immediately */ | |
400 | + | |
401 | + u_int krp_op; /* ie. CRK_MOD_EXP or other */ | |
402 | + u_int krp_status; /* return status */ | |
403 | + u_short krp_iparams; /* # of input parameters */ | |
404 | + u_short krp_oparams; /* # of output parameters */ | |
405 | + u_int krp_crid; /* desired device, etc. */ | |
406 | + u_int32_t krp_hid; | |
407 | + struct crparam krp_param[CRK_MAXPARAM]; /* kvm */ | |
408 | + int (*krp_callback)(struct cryptkop *); | |
409 | +}; | |
410 | + | |
411 | +#include <ocf-compat.h> | |
412 | + | |
413 | +/* | |
414 | + * Session ids are 64 bits. The lower 32 bits contain a "local id" which | |
415 | + * is a driver-private session identifier. The upper 32 bits contain a | |
416 | + * "hardware id" used by the core crypto code to identify the driver and | |
417 | + * a copy of the driver's capabilities that can be used by client code to | |
418 | + * optimize operation. | |
419 | + */ | |
420 | +#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff) | |
421 | +#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000) | |
422 | +#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff) | |
423 | + | |
424 | +extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard); | |
425 | +extern int crypto_freesession(u_int64_t sid); | |
426 | +#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE | |
427 | +#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE | |
428 | +#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */ | |
429 | +extern int32_t crypto_get_driverid(device_t dev, int flags); | |
430 | +extern int crypto_find_driver(const char *); | |
431 | +extern device_t crypto_find_device_byhid(int hid); | |
432 | +extern int crypto_getcaps(int hid); | |
433 | +extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, | |
434 | + u_int32_t flags); | |
435 | +extern int crypto_kregister(u_int32_t, int, u_int32_t); | |
436 | +extern int crypto_unregister(u_int32_t driverid, int alg); | |
437 | +extern int crypto_unregister_all(u_int32_t driverid); | |
438 | +extern int crypto_dispatch(struct cryptop *crp); | |
439 | +extern int crypto_kdispatch(struct cryptkop *); | |
440 | +#define CRYPTO_SYMQ 0x1 | |
441 | +#define CRYPTO_ASYMQ 0x2 | |
442 | +extern int crypto_unblock(u_int32_t, int); | |
443 | +extern void crypto_done(struct cryptop *crp); | |
444 | +extern void crypto_kdone(struct cryptkop *); | |
445 | +extern int crypto_getfeat(int *); | |
446 | + | |
447 | +extern void crypto_freereq(struct cryptop *crp); | |
448 | +extern struct cryptop *crypto_getreq(int num); | |
449 | + | |
450 | +extern int crypto_usercrypto; /* userland may do crypto requests */ | |
451 | +extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ | |
452 | +extern int crypto_devallowsoft; /* only use hardware crypto */ | |
453 | + | |
454 | +/* | |
455 | + * random number support, crypto_unregister_all will unregister | |
456 | + */ | |
457 | +extern int crypto_rregister(u_int32_t driverid, | |
458 | + int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg); | |
459 | +extern int crypto_runregister_all(u_int32_t driverid); | |
460 | + | |
461 | +/* | |
462 | + * Crypto-related utility routines used mainly by drivers. | |
463 | + * | |
464 | + * XXX these don't really belong here; but for now they're | |
465 | + * kept apart from the rest of the system. | |
466 | + */ | |
467 | +struct uio; | |
468 | +extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp); | |
469 | +extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp); | |
470 | +extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off); | |
471 | + | |
472 | +extern void crypto_copyback(int flags, caddr_t buf, int off, int size, | |
473 | + caddr_t in); | |
474 | +extern void crypto_copydata(int flags, caddr_t buf, int off, int size, | |
475 | + caddr_t out); | |
476 | +extern int crypto_apply(int flags, caddr_t buf, int off, int len, | |
477 | + int (*f)(void *, void *, u_int), void *arg); | |
478 | + | |
479 | +#endif /* __KERNEL__ */ | |
480 | +#endif /* _CRYPTO_CRYPTO_H_ */ |
crypto/ocf/cryptosoft.c
Changes suppressed. Click to show
1 | +/* | |
2 | + * An OCF module that uses the linux kernel cryptoapi, based on the | |
3 | + * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu) | |
4 | + * but is mostly unrecognisable, | |
5 | + * | |
6 | + * Written by David McCullough <david_mccullough@mcafee.com> | |
7 | + * Copyright (C) 2004-2011 David McCullough | |
8 | + * Copyright (C) 2004-2005 Intel Corporation. | |
9 | + * | |
10 | + * LICENSE TERMS | |
11 | + * | |
12 | + * The free distribution and use of this software in both source and binary | |
13 | + * form is allowed (with or without changes) provided that: | |
14 | + * | |
15 | + * 1. distributions of this source code include the above copyright | |
16 | + * notice, this list of conditions and the following disclaimer; | |
17 | + * | |
18 | + * 2. distributions in binary form include the above copyright | |
19 | + * notice, this list of conditions and the following disclaimer | |
20 | + * in the documentation and/or other associated materials; | |
21 | + * | |
22 | + * 3. the copyright holder's name is not used to endorse products | |
23 | + * built using this software without specific written permission. | |
24 | + * | |
25 | + * ALTERNATIVELY, provided that this notice is retained in full, this product | |
26 | + * may be distributed under the terms of the GNU General Public License (GPL), | |
27 | + * in which case the provisions of the GPL apply INSTEAD OF those given above. | |
28 | + * | |
29 | + * DISCLAIMER | |
30 | + * | |
31 | + * This software is provided 'as is' with no explicit or implied warranties | |
32 | + * in respect of its properties, including, but not limited to, correctness | |
33 | + * and/or fitness for purpose. | |
34 | + * --------------------------------------------------------------------------- | |
35 | + */ | |
36 | + | |
37 | +#include <linux/version.h> | |
38 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
39 | +#include <linux/config.h> | |
40 | +#endif | |
41 | +#include <linux/module.h> | |
42 | +#include <linux/init.h> | |
43 | +#include <linux/list.h> | |
44 | +#include <linux/slab.h> | |
45 | +#include <linux/sched.h> | |
46 | +#include <linux/wait.h> | |
47 | +#include <linux/crypto.h> | |
48 | +#include <linux/mm.h> | |
49 | +#include <linux/skbuff.h> | |
50 | +#include <linux/random.h> | |
51 | +#include <linux/interrupt.h> | |
52 | +#include <linux/spinlock.h> | |
53 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) | |
54 | +#include <linux/scatterlist.h> | |
55 | +#endif | |
56 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) | |
57 | +#include <crypto/hash.h> | |
58 | +#endif | |
59 | + | |
60 | +#include <cryptodev.h> | |
61 | +#include <uio.h> | |
62 | + | |
63 | +struct { | |
64 | + softc_device_decl sc_dev; | |
65 | +} swcr_softc; | |
66 | + | |
67 | +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) | |
68 | + | |
69 | +#define SW_TYPE_CIPHER 0x01 | |
70 | +#define SW_TYPE_HMAC 0x02 | |
71 | +#define SW_TYPE_HASH 0x04 | |
72 | +#define SW_TYPE_COMP 0x08 | |
73 | +#define SW_TYPE_BLKCIPHER 0x10 | |
74 | +#define SW_TYPE_ALG_MASK 0x1f | |
75 | + | |
76 | +#define SW_TYPE_ASYNC 0x8000 | |
77 | + | |
78 | +#define SW_TYPE_INUSE 0x10000000 | |
79 | + | |
80 | +/* We change some of the above if we have an async interface */ | |
81 | + | |
82 | +#define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC) | |
83 | + | |
84 | +#define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC) | |
85 | +#define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC) | |
86 | +#define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC) | |
87 | + | |
88 | +#define SCATTERLIST_MAX 16 | |
89 | + | |
90 | +struct swcr_data { | |
91 | + struct work_struct workq; | |
92 | + int sw_type; | |
93 | + int sw_alg; | |
94 | + struct crypto_tfm *sw_tfm; | |
95 | + spinlock_t sw_tfm_lock; | |
96 | + union { | |
97 | + struct { | |
98 | + char *sw_key; | |
99 | + int sw_klen; | |
100 | + int sw_mlen; | |
101 | + } hmac; | |
102 | + void *sw_comp_buf; | |
103 | + } u; | |
104 | + struct swcr_data *sw_next; | |
105 | +}; | |
106 | + | |
107 | +struct swcr_req { | |
108 | + struct swcr_data *sw_head; | |
109 | + struct swcr_data *sw; | |
110 | + struct cryptop *crp; | |
111 | + struct cryptodesc *crd; | |
112 | + struct scatterlist sg[SCATTERLIST_MAX]; | |
113 | + unsigned char iv[EALG_MAX_BLOCK_LEN]; | |
114 | + char result[HASH_MAX_LEN]; | |
115 | + void *crypto_req; | |
116 | +}; | |
117 | + | |
118 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) | |
119 | +static kmem_cache_t *swcr_req_cache; | |
120 | +#else | |
121 | +static struct kmem_cache *swcr_req_cache; | |
122 | +#endif | |
123 | + | |
124 | +#ifndef CRYPTO_TFM_MODE_CBC | |
125 | +/* | |
126 | + * As of linux-2.6.21 this is no longer defined, and presumably no longer | |
127 | + * needed to be passed into the crypto core code. | |
128 | + */ | |
129 | +#define CRYPTO_TFM_MODE_CBC 0 | |
130 | +#define CRYPTO_TFM_MODE_ECB 0 | |
131 | +#endif | |
132 | + | |
133 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) | |
134 | + /* | |
135 | + * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new | |
136 | + * API into old API. | |
137 | + */ | |
138 | + | |
139 | + /* Symmetric/Block Cipher */ | |
140 | + struct blkcipher_desc | |
141 | + { | |
142 | + struct crypto_tfm *tfm; | |
143 | + void *info; | |
144 | + }; | |
145 | + #define ecb(X) #X , CRYPTO_TFM_MODE_ECB | |
146 | + #define cbc(X) #X , CRYPTO_TFM_MODE_CBC | |
147 | + #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0) | |
148 | + #define crypto_blkcipher_cast(X) X | |
149 | + #define crypto_blkcipher_tfm(X) X | |
150 | + #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode) | |
151 | + #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X) | |
152 | + #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X) | |
153 | + #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z) | |
154 | + #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \ | |
155 | + crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info)) | |
156 | + #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \ | |
157 | + crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info)) | |
158 | + #define crypto_blkcipher_set_flags(x, y) /* nop */ | |
159 | + #define crypto_free_blkcipher(x) crypto_free_tfm(x) | |
160 | + #define crypto_free_comp crypto_free_tfm | |
161 | + #define crypto_free_hash crypto_free_tfm | |
162 | + | |
163 | + /* Hash/HMAC/Digest */ | |
164 | + struct hash_desc | |
165 | + { | |
166 | + struct crypto_tfm *tfm; | |
167 | + }; | |
168 | + #define hmac(X) #X , 0 | |
169 | + #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0) | |
170 | + #define crypto_hash_cast(X) X | |
171 | + #define crypto_hash_tfm(X) X | |
172 | + #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode) | |
173 | + #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X) | |
174 | + #define crypto_hash_digest(W, X, Y, Z) \ | |
175 | + crypto_digest_digest((W)->tfm, X, sg_num, Z) | |
176 | + | |
177 | + /* Asymmetric Cipher */ | |
178 | + #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0) | |
179 | + | |
180 | + /* Compression */ | |
181 | + #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0) | |
182 | + #define crypto_comp_tfm(X) X | |
183 | + #define crypto_comp_cast(X) X | |
184 | + #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode) | |
185 | + #define plain(X) #X , 0 | |
186 | +#else | |
187 | + #define ecb(X) "ecb(" #X ")" , 0 | |
188 | + #define cbc(X) "cbc(" #X ")" , 0 | |
189 | + #define hmac(X) "hmac(" #X ")" , 0 | |
190 | + #define plain(X) #X , 0 | |
191 | +#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */ | |
192 | + | |
193 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) | |
194 | +/* no ablkcipher in older kernels */ | |
195 | +#define crypto_alloc_ablkcipher(a,b,c) (NULL) | |
196 | +#define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x)) | |
197 | +#define crypto_ablkcipher_set_flags(a, b) /* nop */ | |
198 | +#define crypto_ablkcipher_setkey(x, y, z) (-EINVAL) | |
199 | +#define crypto_has_ablkcipher(a,b,c) (0) | |
200 | +#else | |
201 | +#define HAVE_ABLKCIPHER | |
202 | +#endif | |
203 | + | |
204 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) | |
205 | +/* no ahash in older kernels */ | |
206 | +#define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x)) | |
207 | +#define crypto_alloc_ahash(a,b,c) (NULL) | |
208 | +#define crypto_ahash_digestsize(x) 0 | |
209 | +#else | |
210 | +#define HAVE_AHASH | |
211 | +#endif | |
212 | + | |
213 | +struct crypto_details { | |
214 | + char *alg_name; | |
215 | + int mode; | |
216 | + int sw_type; | |
217 | +}; | |
218 | + | |
219 | +static struct crypto_details crypto_details[] = { | |
220 | + [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, }, | |
221 | + [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, }, | |
222 | + [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, }, | |
223 | + [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, }, | |
224 | + [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, }, | |
225 | + [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, }, | |
226 | + [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, }, | |
227 | + [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, }, | |
228 | + [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, }, | |
229 | + [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, }, | |
230 | + [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, }, | |
231 | + [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, }, | |
232 | + [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, }, | |
233 | + [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, }, | |
234 | + [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, }, | |
235 | + [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, }, | |
236 | + [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, }, | |
237 | + [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, }, | |
238 | + [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, }, | |
239 | + [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, }, | |
240 | + [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, }, | |
241 | + [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, }, | |
242 | + [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, }, | |
243 | + [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, }, | |
244 | + [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, }, | |
245 | +}; | |
246 | + | |
247 | +int32_t swcr_id = -1; | |
248 | +module_param(swcr_id, int, 0444); | |
249 | +MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver"); | |
250 | + | |
251 | +int swcr_fail_if_compression_grows = 1; | |
252 | +module_param(swcr_fail_if_compression_grows, int, 0644); | |
253 | +MODULE_PARM_DESC(swcr_fail_if_compression_grows, | |
254 | + "Treat compression that results in more data as a failure"); | |
255 | + | |
256 | +int swcr_no_ahash = 0; | |
257 | +module_param(swcr_no_ahash, int, 0644); | |
258 | +MODULE_PARM_DESC(swcr_no_ahash, | |
259 | + "Do not use async hash/hmac even if available"); | |
260 | + | |
261 | +int swcr_no_ablk = 0; | |
262 | +module_param(swcr_no_ablk, int, 0644); | |
263 | +MODULE_PARM_DESC(swcr_no_ablk, | |
264 | + "Do not use async blk ciphers even if available"); | |
265 | + | |
266 | +static struct swcr_data **swcr_sessions = NULL; | |
267 | +static u_int32_t swcr_sesnum = 0; | |
268 | + | |
269 | +static int swcr_process(device_t, struct cryptop *, int); | |
270 | +static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *); | |
271 | +static int swcr_freesession(device_t, u_int64_t); | |
272 | + | |
273 | +static device_method_t swcr_methods = { | |
274 | + /* crypto device methods */ | |
275 | + DEVMETHOD(cryptodev_newsession, swcr_newsession), | |
276 | + DEVMETHOD(cryptodev_freesession,swcr_freesession), | |
277 | + DEVMETHOD(cryptodev_process, swcr_process), | |
278 | +}; | |
279 | + | |
280 | +#define debug swcr_debug | |
281 | +int swcr_debug = 0; | |
282 | +module_param(swcr_debug, int, 0644); | |
283 | +MODULE_PARM_DESC(swcr_debug, "Enable debug"); | |
284 | + | |
285 | +static void swcr_process_req(struct swcr_req *req); | |
286 | + | |
287 | +/* | |
288 | + * somethings just need to be run with user context no matter whether | |
289 | + * the kernel compression libs use vmalloc/vfree for example. | |
290 | + */ | |
291 | + | |
292 | +typedef struct { | |
293 | + struct work_struct wq; | |
294 | + void (*func)(void *arg); | |
295 | + void *arg; | |
296 | +} execute_later_t; | |
297 | + | |
298 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
299 | +static void | |
300 | +doing_it_now(struct work_struct *wq) | |
301 | +{ | |
302 | + execute_later_t *w = container_of(wq, execute_later_t, wq); | |
303 | + (w->func)(w->arg); | |
304 | + kfree(w); | |
305 | +} | |
306 | +#else | |
307 | +static void | |
308 | +doing_it_now(void *arg) | |
309 | +{ | |
310 | + execute_later_t *w = (execute_later_t *) arg; | |
311 | + (w->func)(w->arg); | |
312 | + kfree(w); | |
313 | +} | |
314 | +#endif | |
315 | + | |
316 | +static void | |
317 | +execute_later(void (fn)(void *), void *arg) | |
318 | +{ | |
319 | + execute_later_t *w; | |
320 | + | |
321 | + w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC); | |
322 | + if (w) { | |
323 | + memset(w, '\0', sizeof(w)); | |
324 | + w->func = fn; | |
325 | + w->arg = arg; | |
326 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
327 | + INIT_WORK(&w->wq, doing_it_now); | |
328 | +#else | |
329 | + INIT_WORK(&w->wq, doing_it_now, w); | |
330 | +#endif | |
331 | + schedule_work(&w->wq); | |
332 | + } | |
333 | +} | |
334 | + | |
335 | +/* | |
336 | + * Generate a new software session. | |
337 | + */ | |
338 | +static int | |
339 | +swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) | |
340 | +{ | |
341 | + struct swcr_data **swd; | |
342 | + u_int32_t i; | |
343 | + int error; | |
344 | + char *algo; | |
345 | + int mode; | |
346 | + | |
347 | + dprintk("%s()\n", __FUNCTION__); | |
348 | + if (sid == NULL || cri == NULL) { | |
349 | + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__); | |
350 | + return EINVAL; | |
351 | + } | |
352 | + | |
353 | + if (swcr_sessions) { | |
354 | + for (i = 1; i < swcr_sesnum; i++) | |
355 | + if (swcr_sessions[i] == NULL) | |
356 | + break; | |
357 | + } else | |
358 | + i = 1; /* NB: to silence compiler warning */ | |
359 | + | |
360 | + if (swcr_sessions == NULL || i == swcr_sesnum) { | |
361 | + if (swcr_sessions == NULL) { | |
362 | + i = 1; /* We leave swcr_sessions[0] empty */ | |
363 | + swcr_sesnum = CRYPTO_SW_SESSIONS; | |
364 | + } else | |
365 | + swcr_sesnum *= 2; | |
366 | + | |
367 | + swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC); | |
368 | + if (swd == NULL) { | |
369 | + /* Reset session number */ | |
370 | + if (swcr_sesnum == CRYPTO_SW_SESSIONS) | |
371 | + swcr_sesnum = 0; | |
372 | + else | |
373 | + swcr_sesnum /= 2; | |
374 | + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__); | |
375 | + return ENOBUFS; | |
376 | + } | |
377 | + memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *)); | |
378 | + | |
379 | + /* Copy existing sessions */ | |
380 | + if (swcr_sessions) { | |
381 | + memcpy(swd, swcr_sessions, | |
382 | + (swcr_sesnum / 2) * sizeof(struct swcr_data *)); | |
383 | + kfree(swcr_sessions); | |
384 | + } | |
385 | + | |
386 | + swcr_sessions = swd; | |
387 | + } | |
388 | + | |
389 | + swd = &swcr_sessions[i]; | |
390 | + *sid = i; | |
391 | + | |
392 | + while (cri) { | |
393 | + *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data), | |
394 | + SLAB_ATOMIC); | |
395 | + if (*swd == NULL) { | |
396 | + swcr_freesession(NULL, i); | |
397 | + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__); | |
398 | + return ENOBUFS; | |
399 | + } | |
400 | + memset(*swd, 0, sizeof(struct swcr_data)); | |
401 | + | |
402 | + if (cri->cri_alg < 0 || | |
403 | + cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){ | |
404 | + printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg); | |
405 | + swcr_freesession(NULL, i); | |
406 | + return EINVAL; | |
407 | + } | |
408 | + | |
409 | + algo = crypto_details[cri->cri_alg].alg_name; | |
410 | + if (!algo || !*algo) { | |
411 | + printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg); | |
412 | + swcr_freesession(NULL, i); | |
413 | + return EINVAL; | |
414 | + } | |
415 | + | |
416 | + mode = crypto_details[cri->cri_alg].mode; | |
417 | + (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type; | |
418 | + (*swd)->sw_alg = cri->cri_alg; | |
419 | + | |
420 | + spin_lock_init(&(*swd)->sw_tfm_lock); | |
421 | + | |
422 | + /* Algorithm specific configuration */ | |
423 | + switch (cri->cri_alg) { | |
424 | + case CRYPTO_NULL_CBC: | |
425 | + cri->cri_klen = 0; /* make it work with crypto API */ | |
426 | + break; | |
427 | + default: | |
428 | + break; | |
429 | + } | |
430 | + | |
431 | + if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) { | |
432 | + dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__, | |
433 | + algo, mode); | |
434 | + | |
435 | + /* try async first */ | |
436 | + (*swd)->sw_tfm = swcr_no_ablk ? NULL : | |
437 | + crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0)); | |
438 | + if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) { | |
439 | + dprintk("%s %s cipher is async\n", __FUNCTION__, algo); | |
440 | + (*swd)->sw_type |= SW_TYPE_ASYNC; | |
441 | + } else { | |
442 | + (*swd)->sw_tfm = crypto_blkcipher_tfm( | |
443 | + crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC)); | |
444 | + if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) | |
445 | + dprintk("%s %s cipher is sync\n", __FUNCTION__, algo); | |
446 | + } | |
447 | + if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) { | |
448 | + int err; | |
449 | + dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n", | |
450 | + algo,mode); | |
451 | + err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL; | |
452 | + (*swd)->sw_tfm = NULL; /* ensure NULL */ | |
453 | + swcr_freesession(NULL, i); | |
454 | + return err; | |
455 | + } | |
456 | + | |
457 | + if (debug) { | |
458 | + dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d", | |
459 | + __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8); | |
460 | + for (i = 0; i < (cri->cri_klen + 7) / 8; i++) | |
461 | + dprintk("%s0x%x", (i % 8) ? " " : "\n ", | |
462 | + cri->cri_key[i] & 0xff); | |
463 | + dprintk("\n"); | |
464 | + } | |
465 | + if ((*swd)->sw_type & SW_TYPE_ASYNC) { | |
466 | + /* OCF doesn't enforce keys */ | |
467 | + crypto_ablkcipher_set_flags( | |
468 | + __crypto_ablkcipher_cast((*swd)->sw_tfm), | |
469 | + CRYPTO_TFM_REQ_WEAK_KEY); | |
470 | + error = crypto_ablkcipher_setkey( | |
471 | + __crypto_ablkcipher_cast((*swd)->sw_tfm), | |
472 | + cri->cri_key, (cri->cri_klen + 7) / 8); | |
473 | + } else { | |
474 | + /* OCF doesn't enforce keys */ | |
475 | + crypto_blkcipher_set_flags( | |
476 | + crypto_blkcipher_cast((*swd)->sw_tfm), | |
477 | + CRYPTO_TFM_REQ_WEAK_KEY); | |
478 | + error = crypto_blkcipher_setkey( | |
479 | + crypto_blkcipher_cast((*swd)->sw_tfm), | |
480 | + cri->cri_key, (cri->cri_klen + 7) / 8); | |
481 | + } | |
482 | + if (error) { | |
483 | + printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error, | |
484 | + (*swd)->sw_tfm->crt_flags); | |
485 | + swcr_freesession(NULL, i); | |
486 | + return error; | |
487 | + } | |
488 | + } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) { | |
489 | + dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__, | |
490 | + algo, mode); | |
491 | + | |
492 | + /* try async first */ | |
493 | + (*swd)->sw_tfm = swcr_no_ahash ? NULL : | |
494 | + crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0)); | |
495 | + if ((*swd)->sw_tfm) { | |
496 | + dprintk("%s %s hash is async\n", __FUNCTION__, algo); | |
497 | + (*swd)->sw_type |= SW_TYPE_ASYNC; | |
498 | + } else { | |
499 | + dprintk("%s %s hash is sync\n", __FUNCTION__, algo); | |
500 | + (*swd)->sw_tfm = crypto_hash_tfm( | |
501 | + crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC)); | |
502 | + } | |
503 | + | |
504 | + if (!(*swd)->sw_tfm) { | |
505 | + dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n", | |
506 | + algo, mode); | |
507 | + swcr_freesession(NULL, i); | |
508 | + return EINVAL; | |
509 | + } | |
510 | + | |
511 | + (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8; | |
512 | + (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen, | |
513 | + SLAB_ATOMIC); | |
514 | + if ((*swd)->u.hmac.sw_key == NULL) { | |
515 | + swcr_freesession(NULL, i); | |
516 | + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__); | |
517 | + return ENOBUFS; | |
518 | + } | |
519 | + memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen); | |
520 | + if (cri->cri_mlen) { | |
521 | + (*swd)->u.hmac.sw_mlen = cri->cri_mlen; | |
522 | + } else if ((*swd)->sw_type & SW_TYPE_ASYNC) { | |
523 | + (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize( | |
524 | + __crypto_ahash_cast((*swd)->sw_tfm)); | |
525 | + } else { | |
526 | + (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize( | |
527 | + crypto_hash_cast((*swd)->sw_tfm)); | |
528 | + } | |
529 | + } else if ((*swd)->sw_type & SW_TYPE_COMP) { | |
530 | + (*swd)->sw_tfm = crypto_comp_tfm( | |
531 | + crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC)); | |
532 | + if (!(*swd)->sw_tfm) { | |
533 | + dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n", | |
534 | + algo, mode); | |
535 | + swcr_freesession(NULL, i); | |
536 | + return EINVAL; | |
537 | + } | |
538 | + (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC); | |
539 | + if ((*swd)->u.sw_comp_buf == NULL) { | |
540 | + swcr_freesession(NULL, i); | |
541 | + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__); | |
542 | + return ENOBUFS; | |
543 | + } | |
544 | + } else { | |
545 | + printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type); | |
546 | + swcr_freesession(NULL, i); | |
547 | + return EINVAL; | |
548 | + } | |
549 | + | |
550 | + cri = cri->cri_next; | |
551 | + swd = &((*swd)->sw_next); | |
552 | + } | |
553 | + return 0; | |
554 | +} | |
555 | + | |
556 | +/* | |
557 | + * Free a session. | |
558 | + */ | |
559 | +static int | |
560 | +swcr_freesession(device_t dev, u_int64_t tid) | |
561 | +{ | |
562 | + struct swcr_data *swd; | |
563 | + u_int32_t sid = CRYPTO_SESID2LID(tid); | |
564 | + | |
565 | + dprintk("%s()\n", __FUNCTION__); | |
566 | + if (sid > swcr_sesnum || swcr_sessions == NULL || | |
567 | + swcr_sessions[sid] == NULL) { | |
568 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
569 | + return(EINVAL); | |
570 | + } | |
571 | + | |
572 | + /* Silently accept and return */ | |
573 | + if (sid == 0) | |
574 | + return(0); | |
575 | + | |
576 | + while ((swd = swcr_sessions[sid]) != NULL) { | |
577 | + swcr_sessions[sid] = swd->sw_next; | |
578 | + if (swd->sw_tfm) { | |
579 | + switch (swd->sw_type & SW_TYPE_ALG_AMASK) { | |
580 | +#ifdef HAVE_AHASH | |
581 | + case SW_TYPE_AHMAC: | |
582 | + case SW_TYPE_AHASH: | |
583 | + crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm)); | |
584 | + break; | |
585 | +#endif | |
586 | +#ifdef HAVE_ABLKCIPHER | |
587 | + case SW_TYPE_ABLKCIPHER: | |
588 | + crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm)); | |
589 | + break; | |
590 | +#endif | |
591 | + case SW_TYPE_BLKCIPHER: | |
592 | + crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm)); | |
593 | + break; | |
594 | + case SW_TYPE_HMAC: | |
595 | + case SW_TYPE_HASH: | |
596 | + crypto_free_hash(crypto_hash_cast(swd->sw_tfm)); | |
597 | + break; | |
598 | + case SW_TYPE_COMP: | |
599 | + if (in_interrupt()) | |
600 | + execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm)); | |
601 | + else | |
602 | + crypto_free_comp(crypto_comp_cast(swd->sw_tfm)); | |
603 | + break; | |
604 | + default: | |
605 | + crypto_free_tfm(swd->sw_tfm); | |
606 | + break; | |
607 | + } | |
608 | + swd->sw_tfm = NULL; | |
609 | + } | |
610 | + if (swd->sw_type & SW_TYPE_COMP) { | |
611 | + if (swd->u.sw_comp_buf) | |
612 | + kfree(swd->u.sw_comp_buf); | |
613 | + } else { | |
614 | + if (swd->u.hmac.sw_key) | |
615 | + kfree(swd->u.hmac.sw_key); | |
616 | + } | |
617 | + kfree(swd); | |
618 | + } | |
619 | + return 0; | |
620 | +} | |
621 | + | |
622 | +static void swcr_process_req_complete(struct swcr_req *req) | |
623 | +{ | |
624 | + dprintk("%s()\n", __FUNCTION__); | |
625 | + | |
626 | + if (req->sw->sw_type & SW_TYPE_INUSE) { | |
627 | + unsigned long flags; | |
628 | + spin_lock_irqsave(&req->sw->sw_tfm_lock, flags); | |
629 | + req->sw->sw_type &= ~SW_TYPE_INUSE; | |
630 | + spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags); | |
631 | + } | |
632 | + | |
633 | + if (req->crp->crp_etype) | |
634 | + goto done; | |
635 | + | |
636 | + switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) { | |
637 | +#if defined(HAVE_AHASH) | |
638 | + case SW_TYPE_AHMAC: | |
639 | + case SW_TYPE_AHASH: | |
640 | + crypto_copyback(req->crp->crp_flags, req->crp->crp_buf, | |
641 | + req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result); | |
642 | + ahash_request_free(req->crypto_req); | |
643 | + break; | |
644 | +#endif | |
645 | +#if defined(HAVE_ABLKCIPHER) | |
646 | + case SW_TYPE_ABLKCIPHER: | |
647 | + ablkcipher_request_free(req->crypto_req); | |
648 | + break; | |
649 | +#endif | |
650 | + case SW_TYPE_CIPHER: | |
651 | + case SW_TYPE_HMAC: | |
652 | + case SW_TYPE_HASH: | |
653 | + case SW_TYPE_COMP: | |
654 | + case SW_TYPE_BLKCIPHER: | |
655 | + break; | |
656 | + default: | |
657 | + req->crp->crp_etype = EINVAL; | |
658 | + goto done; | |
659 | + } | |
660 | + | |
661 | + req->crd = req->crd->crd_next; | |
662 | + if (req->crd) { | |
663 | + swcr_process_req(req); | |
664 | + return; | |
665 | + } | |
666 | + | |
667 | +done: | |
668 | + dprintk("%s crypto_done %p\n", __FUNCTION__, req); | |
669 | + crypto_done(req->crp); | |
670 | + kmem_cache_free(swcr_req_cache, req); | |
671 | +} | |
672 | + | |
673 | +#if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) | |
674 | +static void swcr_process_callback(struct crypto_async_request *creq, int err) | |
675 | +{ | |
676 | + struct swcr_req *req = creq->data; | |
677 | + | |
678 | + dprintk("%s()\n", __FUNCTION__); | |
679 | + if (err) { | |
680 | + if (err == -EINPROGRESS) | |
681 | + return; | |
682 | + dprintk("%s() fail %d\n", __FUNCTION__, -err); | |
683 | + req->crp->crp_etype = -err; | |
684 | + } | |
685 | + | |
686 | + swcr_process_req_complete(req); | |
687 | +} | |
688 | +#endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */ | |
689 | + | |
690 | + | |
691 | +static void swcr_process_req(struct swcr_req *req) | |
692 | +{ | |
693 | + struct swcr_data *sw; | |
694 | + struct cryptop *crp = req->crp; | |
695 | + struct cryptodesc *crd = req->crd; | |
696 | + struct sk_buff *skb = (struct sk_buff *) crp->crp_buf; | |
697 | + struct uio *uiop = (struct uio *) crp->crp_buf; | |
698 | + int sg_num, sg_len, skip; | |
699 | + | |
700 | + dprintk("%s()\n", __FUNCTION__); | |
701 | + | |
702 | + /* | |
703 | + * Find the crypto context. | |
704 | + * | |
705 | + * XXX Note that the logic here prevents us from having | |
706 | + * XXX the same algorithm multiple times in a session | |
707 | + * XXX (or rather, we can but it won't give us the right | |
708 | + * XXX results). To do that, we'd need some way of differentiating | |
709 | + * XXX between the various instances of an algorithm (so we can | |
710 | + * XXX locate the correct crypto context). | |
711 | + */ | |
712 | + for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next) | |
713 | + ; | |
714 | + | |
715 | + /* No such context ? */ | |
716 | + if (sw == NULL) { | |
717 | + crp->crp_etype = EINVAL; | |
718 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
719 | + goto done; | |
720 | + } | |
721 | + | |
722 | + /* | |
723 | + * for some types we need to ensure only one user as info is stored in | |
724 | + * the tfm during an operation that can get corrupted | |
725 | + */ | |
726 | + switch (sw->sw_type & SW_TYPE_ALG_AMASK) { | |
727 | +#ifdef HAVE_AHASH | |
728 | + case SW_TYPE_AHMAC: | |
729 | + case SW_TYPE_AHASH: | |
730 | +#endif | |
731 | + case SW_TYPE_HMAC: | |
732 | + case SW_TYPE_HASH: { | |
733 | + unsigned long flags; | |
734 | + spin_lock_irqsave(&sw->sw_tfm_lock, flags); | |
735 | + if (sw->sw_type & SW_TYPE_INUSE) { | |
736 | + spin_unlock_irqrestore(&sw->sw_tfm_lock, flags); | |
737 | + execute_later((void (*)(void *))swcr_process_req, (void *)req); | |
738 | + return; | |
739 | + } | |
740 | + sw->sw_type |= SW_TYPE_INUSE; | |
741 | + spin_unlock_irqrestore(&sw->sw_tfm_lock, flags); | |
742 | + } break; | |
743 | + } | |
744 | + | |
745 | + req->sw = sw; | |
746 | + skip = crd->crd_skip; | |
747 | + | |
748 | + /* | |
749 | + * setup the SG list skip from the start of the buffer | |
750 | + */ | |
751 | + memset(req->sg, 0, sizeof(req->sg)); | |
752 | + sg_init_table(req->sg, SCATTERLIST_MAX); | |
753 | + if (crp->crp_flags & CRYPTO_F_SKBUF) { | |
754 | + int i, len; | |
755 | + | |
756 | + sg_num = 0; | |
757 | + sg_len = 0; | |
758 | + | |
759 | + if (skip < skb_headlen(skb)) { | |
760 | + len = skb_headlen(skb) - skip; | |
761 | + if (len + sg_len > crd->crd_len) | |
762 | + len = crd->crd_len - sg_len; | |
763 | + sg_set_page(&req->sg[sg_num], | |
764 | + virt_to_page(skb->data + skip), len, | |
765 | + offset_in_page(skb->data + skip)); | |
766 | + sg_len += len; | |
767 | + sg_num++; | |
768 | + skip = 0; | |
769 | + } else | |
770 | + skip -= skb_headlen(skb); | |
771 | + | |
772 | + for (i = 0; sg_len < crd->crd_len && | |
773 | + i < skb_shinfo(skb)->nr_frags && | |
774 | + sg_num < SCATTERLIST_MAX; i++) { | |
775 | + if (skip < skb_shinfo(skb)->frags[i].size) { | |
776 | + len = skb_shinfo(skb)->frags[i].size - skip; | |
777 | + if (len + sg_len > crd->crd_len) | |
778 | + len = crd->crd_len - sg_len; | |
779 | + sg_set_page(&req->sg[sg_num], | |
780 | + skb_frag_page(&skb_shinfo(skb)->frags[i]), | |
781 | + len, | |
782 | + skb_shinfo(skb)->frags[i].page_offset + skip); | |
783 | + sg_len += len; | |
784 | + sg_num++; | |
785 | + skip = 0; | |
786 | + } else | |
787 | + skip -= skb_shinfo(skb)->frags[i].size; | |
788 | + } | |
789 | + } else if (crp->crp_flags & CRYPTO_F_IOV) { | |
790 | + int len; | |
791 | + | |
792 | + sg_len = 0; | |
793 | + for (sg_num = 0; sg_len < crd->crd_len && | |
794 | + sg_num < uiop->uio_iovcnt && | |
795 | + sg_num < SCATTERLIST_MAX; sg_num++) { | |
796 | + if (skip <= uiop->uio_iov[sg_num].iov_len) { | |
797 | + len = uiop->uio_iov[sg_num].iov_len - skip; | |
798 | + if (len + sg_len > crd->crd_len) | |
799 | + len = crd->crd_len - sg_len; | |
800 | + sg_set_page(&req->sg[sg_num], | |
801 | + virt_to_page(uiop->uio_iov[sg_num].iov_base+skip), | |
802 | + len, | |
803 | + offset_in_page(uiop->uio_iov[sg_num].iov_base+skip)); | |
804 | + sg_len += len; | |
805 | + skip = 0; | |
806 | + } else | |
807 | + skip -= uiop->uio_iov[sg_num].iov_len; | |
808 | + } | |
809 | + } else { | |
810 | + sg_len = (crp->crp_ilen - skip); | |
811 | + if (sg_len > crd->crd_len) | |
812 | + sg_len = crd->crd_len; | |
813 | + sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip), | |
814 | + sg_len, offset_in_page(crp->crp_buf + skip)); | |
815 | + sg_num = 1; | |
816 | + } | |
817 | + if (sg_num > 0) | |
818 | + sg_mark_end(&req->sg[sg_num-1]); | |
819 | + | |
820 | + switch (sw->sw_type & SW_TYPE_ALG_AMASK) { | |
821 | + | |
822 | +#ifdef HAVE_AHASH | |
823 | + case SW_TYPE_AHMAC: | |
824 | + case SW_TYPE_AHASH: | |
825 | + { | |
826 | + int ret; | |
827 | + | |
828 | + /* check we have room for the result */ | |
829 | + if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) { | |
830 | + dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d " | |
831 | + "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len, | |
832 | + crd->crd_inject, sw->u.hmac.sw_mlen); | |
833 | + crp->crp_etype = EINVAL; | |
834 | + goto done; | |
835 | + } | |
836 | + | |
837 | + req->crypto_req = | |
838 | + ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC); | |
839 | + if (!req->crypto_req) { | |
840 | + crp->crp_etype = ENOMEM; | |
841 | + dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__); | |
842 | + goto done; | |
843 | + } | |
844 | + | |
845 | + ahash_request_set_callback(req->crypto_req, | |
846 | + CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req); | |
847 | + | |
848 | + memset(req->result, 0, sizeof(req->result)); | |
849 | + | |
850 | + if (sw->sw_type & SW_TYPE_AHMAC) | |
851 | + crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm), | |
852 | + sw->u.hmac.sw_key, sw->u.hmac.sw_klen); | |
853 | + ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len); | |
854 | + ret = crypto_ahash_digest(req->crypto_req); | |
855 | + switch (ret) { | |
856 | + case -EINPROGRESS: | |
857 | + case -EBUSY: | |
858 | + return; | |
859 | + default: | |
860 | + case 0: | |
861 | + dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret); | |
862 | + crp->crp_etype = ret; | |
863 | + goto done; | |
864 | + } | |
865 | + } break; | |
866 | +#endif /* HAVE_AHASH */ | |
867 | + | |
868 | +#ifdef HAVE_ABLKCIPHER | |
869 | + case SW_TYPE_ABLKCIPHER: { | |
870 | + int ret; | |
871 | + unsigned char *ivp = req->iv; | |
872 | + int ivsize = | |
873 | + crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm)); | |
874 | + | |
875 | + if (sg_len < crypto_ablkcipher_blocksize( | |
876 | + __crypto_ablkcipher_cast(sw->sw_tfm))) { | |
877 | + crp->crp_etype = EINVAL; | |
878 | + dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__, | |
879 | + sg_len, crypto_ablkcipher_blocksize( | |
880 | + __crypto_ablkcipher_cast(sw->sw_tfm))); | |
881 | + goto done; | |
882 | + } | |
883 | + | |
884 | + if (ivsize > sizeof(req->iv)) { | |
885 | + crp->crp_etype = EINVAL; | |
886 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
887 | + goto done; | |
888 | + } | |
889 | + | |
890 | + req->crypto_req = ablkcipher_request_alloc( | |
891 | + __crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC); | |
892 | + if (!req->crypto_req) { | |
893 | + crp->crp_etype = ENOMEM; | |
894 | + dprintk("%s,%d: ENOMEM ablkcipher_request_alloc", | |
895 | + __FILE__, __LINE__); | |
896 | + goto done; | |
897 | + } | |
898 | + | |
899 | + ablkcipher_request_set_callback(req->crypto_req, | |
900 | + CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req); | |
901 | + | |
902 | + if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { | |
903 | + int i, error; | |
904 | + | |
905 | + if (debug) { | |
906 | + dprintk("%s key:", __FUNCTION__); | |
907 | + for (i = 0; i < (crd->crd_klen + 7) / 8; i++) | |
908 | + dprintk("%s0x%x", (i % 8) ? " " : "\n ", | |
909 | + crd->crd_key[i] & 0xff); | |
910 | + dprintk("\n"); | |
911 | + } | |
912 | + /* OCF doesn't enforce keys */ | |
913 | + crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm), | |
914 | + CRYPTO_TFM_REQ_WEAK_KEY); | |
915 | + error = crypto_ablkcipher_setkey( | |
916 | + __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key, | |
917 | + (crd->crd_klen + 7) / 8); | |
918 | + if (error) { | |
919 | + dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", | |
920 | + error, sw->sw_tfm->crt_flags); | |
921 | + crp->crp_etype = -error; | |
922 | + } | |
923 | + } | |
924 | + | |
925 | + if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */ | |
926 | + | |
927 | + if (crd->crd_flags & CRD_F_IV_EXPLICIT) | |
928 | + ivp = crd->crd_iv; | |
929 | + else | |
930 | + get_random_bytes(ivp, ivsize); | |
931 | + /* | |
932 | + * do we have to copy the IV back to the buffer ? | |
933 | + */ | |
934 | + if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) { | |
935 | + crypto_copyback(crp->crp_flags, crp->crp_buf, | |
936 | + crd->crd_inject, ivsize, (caddr_t)ivp); | |
937 | + } | |
938 | + ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg, | |
939 | + sg_len, ivp); | |
940 | + ret = crypto_ablkcipher_encrypt(req->crypto_req); | |
941 | + | |
942 | + } else { /*decrypt */ | |
943 | + | |
944 | + if (crd->crd_flags & CRD_F_IV_EXPLICIT) | |
945 | + ivp = crd->crd_iv; | |
946 | + else | |
947 | + crypto_copydata(crp->crp_flags, crp->crp_buf, | |
948 | + crd->crd_inject, ivsize, (caddr_t)ivp); | |
949 | + ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg, | |
950 | + sg_len, ivp); | |
951 | + ret = crypto_ablkcipher_decrypt(req->crypto_req); | |
952 | + } | |
953 | + | |
954 | + switch (ret) { | |
955 | + case -EINPROGRESS: | |
956 | + case -EBUSY: | |
957 | + return; | |
958 | + default: | |
959 | + case 0: | |
960 | + dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret); | |
961 | + crp->crp_etype = ret; | |
962 | + goto done; | |
963 | + } | |
964 | + } break; | |
965 | +#endif /* HAVE_ABLKCIPHER */ | |
966 | + | |
967 | + case SW_TYPE_BLKCIPHER: { | |
968 | + unsigned char iv[EALG_MAX_BLOCK_LEN]; | |
969 | + unsigned char *ivp = iv; | |
970 | + struct blkcipher_desc desc; | |
971 | + int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm)); | |
972 | + | |
973 | + if (sg_len < crypto_blkcipher_blocksize( | |
974 | + crypto_blkcipher_cast(sw->sw_tfm))) { | |
975 | + crp->crp_etype = EINVAL; | |
976 | + dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__, | |
977 | + sg_len, crypto_blkcipher_blocksize( | |
978 | + crypto_blkcipher_cast(sw->sw_tfm))); | |
979 | + goto done; | |
980 | + } | |
981 | + | |
982 | + if (ivsize > sizeof(iv)) { | |
983 | + crp->crp_etype = EINVAL; | |
984 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
985 | + goto done; | |
986 | + } | |
987 | + | |
988 | + if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { | |
989 | + int i, error; | |
990 | + | |
991 | + if (debug) { | |
992 | + dprintk("%s key:", __FUNCTION__); | |
993 | + for (i = 0; i < (crd->crd_klen + 7) / 8; i++) | |
994 | + dprintk("%s0x%x", (i % 8) ? " " : "\n ", | |
995 | + crd->crd_key[i] & 0xff); | |
996 | + dprintk("\n"); | |
997 | + } | |
998 | + /* OCF doesn't enforce keys */ | |
999 | + crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm), | |
1000 | + CRYPTO_TFM_REQ_WEAK_KEY); | |
1001 | + error = crypto_blkcipher_setkey( | |
1002 | + crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key, | |
1003 | + (crd->crd_klen + 7) / 8); | |
1004 | + if (error) { | |
1005 | + dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", | |
1006 | + error, sw->sw_tfm->crt_flags); | |
1007 | + crp->crp_etype = -error; | |
1008 | + } | |
1009 | + } | |
1010 | + | |
1011 | + memset(&desc, 0, sizeof(desc)); | |
1012 | + desc.tfm = crypto_blkcipher_cast(sw->sw_tfm); | |
1013 | + | |
1014 | + if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */ | |
1015 | + | |
1016 | + if (crd->crd_flags & CRD_F_IV_EXPLICIT) { | |
1017 | + ivp = crd->crd_iv; | |
1018 | + } else { | |
1019 | + get_random_bytes(ivp, ivsize); | |
1020 | + } | |
1021 | + /* | |
1022 | + * do we have to copy the IV back to the buffer ? | |
1023 | + */ | |
1024 | + if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) { | |
1025 | + crypto_copyback(crp->crp_flags, crp->crp_buf, | |
1026 | + crd->crd_inject, ivsize, (caddr_t)ivp); | |
1027 | + } | |
1028 | + desc.info = ivp; | |
1029 | + crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len); | |
1030 | + | |
1031 | + } else { /*decrypt */ | |
1032 | + | |
1033 | + if (crd->crd_flags & CRD_F_IV_EXPLICIT) { | |
1034 | + ivp = crd->crd_iv; | |
1035 | + } else { | |
1036 | + crypto_copydata(crp->crp_flags, crp->crp_buf, | |
1037 | + crd->crd_inject, ivsize, (caddr_t)ivp); | |
1038 | + } | |
1039 | + desc.info = ivp; | |
1040 | + crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len); | |
1041 | + } | |
1042 | + } break; | |
1043 | + | |
1044 | + case SW_TYPE_HMAC: | |
1045 | + case SW_TYPE_HASH: | |
1046 | + { | |
1047 | + char result[HASH_MAX_LEN]; | |
1048 | + struct hash_desc desc; | |
1049 | + | |
1050 | + /* check we have room for the result */ | |
1051 | + if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) { | |
1052 | + dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d " | |
1053 | + "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len, | |
1054 | + crd->crd_inject, sw->u.hmac.sw_mlen); | |
1055 | + crp->crp_etype = EINVAL; | |
1056 | + goto done; | |
1057 | + } | |
1058 | + | |
1059 | + memset(&desc, 0, sizeof(desc)); | |
1060 | + desc.tfm = crypto_hash_cast(sw->sw_tfm); | |
1061 | + | |
1062 | + memset(result, 0, sizeof(result)); | |
1063 | + | |
1064 | + if (sw->sw_type & SW_TYPE_HMAC) { | |
1065 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) | |
1066 | + crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen, | |
1067 | + req->sg, sg_num, result); | |
1068 | +#else | |
1069 | + crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key, | |
1070 | + sw->u.hmac.sw_klen); | |
1071 | + crypto_hash_digest(&desc, req->sg, sg_len, result); | |
1072 | +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */ | |
1073 | + | |
1074 | + } else { /* SW_TYPE_HASH */ | |
1075 | + crypto_hash_digest(&desc, req->sg, sg_len, result); | |
1076 | + } | |
1077 | + | |
1078 | + crypto_copyback(crp->crp_flags, crp->crp_buf, | |
1079 | + crd->crd_inject, sw->u.hmac.sw_mlen, result); | |
1080 | + } | |
1081 | + break; | |
1082 | + | |
1083 | + case SW_TYPE_COMP: { | |
1084 | + void *ibuf = NULL; | |
1085 | + void *obuf = sw->u.sw_comp_buf; | |
1086 | + int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN; | |
1087 | + int ret = 0; | |
1088 | + | |
1089 | + /* | |
1090 | + * we need to use an additional copy if there is more than one | |
1091 | + * input chunk since the kernel comp routines do not handle | |
1092 | + * SG yet. Otherwise we just use the input buffer as is. | |
1093 | + * Rather than allocate another buffer we just split the tmp | |
1094 | + * buffer we already have. | |
1095 | + * Perhaps we should just use zlib directly ? | |
1096 | + */ | |
1097 | + if (sg_num > 1) { | |
1098 | + int blk; | |
1099 | + | |
1100 | + ibuf = obuf; | |
1101 | + for (blk = 0; blk < sg_num; blk++) { | |
1102 | + memcpy(obuf, sg_virt(&req->sg[blk]), | |
1103 | + req->sg[blk].length); | |
1104 | + obuf += req->sg[blk].length; | |
1105 | + } | |
1106 | + olen -= sg_len; | |
1107 | + } else | |
1108 | + ibuf = sg_virt(&req->sg[0]); | |
1109 | + | |
1110 | + if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */ | |
1111 | + ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm), | |
1112 | + ibuf, ilen, obuf, &olen); | |
1113 | + if (!ret && olen > crd->crd_len) { | |
1114 | + dprintk("cryptosoft: ERANGE compress %d into %d\n", | |
1115 | + crd->crd_len, olen); | |
1116 | + if (swcr_fail_if_compression_grows) | |
1117 | + ret = ERANGE; | |
1118 | + } | |
1119 | + } else { /* decompress */ | |
1120 | + ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm), | |
1121 | + ibuf, ilen, obuf, &olen); | |
1122 | + if (!ret && (olen + crd->crd_inject) > crp->crp_olen) { | |
1123 | + dprintk("cryptosoft: ETOOSMALL decompress %d into %d, " | |
1124 | + "space for %d,at offset %d\n", | |
1125 | + crd->crd_len, olen, crp->crp_olen, crd->crd_inject); | |
1126 | + ret = ETOOSMALL; | |
1127 | + } | |
1128 | + } | |
1129 | + if (ret) | |
1130 | + dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret); | |
1131 | + | |
1132 | + /* | |
1133 | + * on success copy result back, | |
1134 | + * linux crpyto API returns -errno, we need to fix that | |
1135 | + */ | |
1136 | + crp->crp_etype = ret < 0 ? -ret : ret; | |
1137 | + if (ret == 0) { | |
1138 | + /* copy back the result and return it's size */ | |
1139 | + crypto_copyback(crp->crp_flags, crp->crp_buf, | |
1140 | + crd->crd_inject, olen, obuf); | |
1141 | + crp->crp_olen = olen; | |
1142 | + } | |
1143 | + } break; | |
1144 | + | |
1145 | + default: | |
1146 | + /* Unknown/unsupported algorithm */ | |
1147 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
1148 | + crp->crp_etype = EINVAL; | |
1149 | + goto done; | |
1150 | + } | |
1151 | + | |
1152 | +done: | |
1153 | + swcr_process_req_complete(req); | |
1154 | +} | |
1155 | + | |
1156 | + | |
1157 | +/* | |
1158 | + * Process a crypto request. | |
1159 | + */ | |
1160 | +static int | |
1161 | +swcr_process(device_t dev, struct cryptop *crp, int hint) | |
1162 | +{ | |
1163 | + struct swcr_req *req = NULL; | |
1164 | + u_int32_t lid; | |
1165 | + | |
1166 | + dprintk("%s()\n", __FUNCTION__); | |
1167 | + /* Sanity check */ | |
1168 | + if (crp == NULL) { | |
1169 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
1170 | + return EINVAL; | |
1171 | + } | |
1172 | + | |
1173 | + crp->crp_etype = 0; | |
1174 | + | |
1175 | + if (crp->crp_desc == NULL || crp->crp_buf == NULL) { | |
1176 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
1177 | + crp->crp_etype = EINVAL; | |
1178 | + goto done; | |
1179 | + } | |
1180 | + | |
1181 | + lid = crp->crp_sid & 0xffffffff; | |
1182 | + if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL || | |
1183 | + swcr_sessions[lid] == NULL) { | |
1184 | + crp->crp_etype = ENOENT; | |
1185 | + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__); | |
1186 | + goto done; | |
1187 | + } | |
1188 | + | |
1189 | + /* | |
1190 | + * do some error checking outside of the loop for SKB and IOV processing | |
1191 | + * this leaves us with valid skb or uiop pointers for later | |
1192 | + */ | |
1193 | + if (crp->crp_flags & CRYPTO_F_SKBUF) { | |
1194 | + struct sk_buff *skb = (struct sk_buff *) crp->crp_buf; | |
1195 | + if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) { | |
1196 | + printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__, | |
1197 | + skb_shinfo(skb)->nr_frags); | |
1198 | + goto done; | |
1199 | + } | |
1200 | + } else if (crp->crp_flags & CRYPTO_F_IOV) { | |
1201 | + struct uio *uiop = (struct uio *) crp->crp_buf; | |
1202 | + if (uiop->uio_iovcnt > SCATTERLIST_MAX) { | |
1203 | + printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__, | |
1204 | + uiop->uio_iovcnt); | |
1205 | + goto done; | |
1206 | + } | |
1207 | + } | |
1208 | + | |
1209 | + /* | |
1210 | + * setup a new request ready for queuing | |
1211 | + */ | |
1212 | + req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC); | |
1213 | + if (req == NULL) { | |
1214 | + dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__); | |
1215 | + crp->crp_etype = ENOMEM; | |
1216 | + goto done; | |
1217 | + } | |
1218 | + memset(req, 0, sizeof(*req)); | |
1219 | + | |
1220 | + req->sw_head = swcr_sessions[lid]; | |
1221 | + req->crp = crp; | |
1222 | + req->crd = crp->crp_desc; | |
1223 | + | |
1224 | + swcr_process_req(req); | |
1225 | + return 0; | |
1226 | + | |
1227 | +done: | |
1228 | + crypto_done(crp); | |
1229 | + if (req) | |
1230 | + kmem_cache_free(swcr_req_cache, req); | |
1231 | + return 0; | |
1232 | +} | |
1233 | + | |
1234 | + | |
1235 | +static int | |
1236 | +cryptosoft_init(void) | |
1237 | +{ | |
1238 | + int i, sw_type, mode; | |
1239 | + char *algo; | |
1240 | + | |
1241 | + dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init); | |
1242 | + | |
1243 | + swcr_req_cache = kmem_cache_create("cryptosoft_req", | |
1244 | + sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL | |
1245 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) | |
1246 | + , NULL | |
1247 | +#endif | |
1248 | + ); | |
1249 | + if (!swcr_req_cache) { | |
1250 | + printk("cryptosoft: failed to create request cache\n"); | |
1251 | + return -ENOENT; | |
1252 | + } | |
1253 | + | |
1254 | + softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods); | |
1255 | + | |
1256 | + swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc), | |
1257 | + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); | |
1258 | + if (swcr_id < 0) { | |
1259 | + printk("cryptosoft: Software crypto device cannot initialize!"); | |
1260 | + return -ENODEV; | |
1261 | + } | |
1262 | + | |
1263 | +#define REGISTER(alg) \ | |
1264 | + crypto_register(swcr_id, alg, 0,0) | |
1265 | + | |
1266 | + for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) { | |
1267 | + int found; | |
1268 | + | |
1269 | + algo = crypto_details[i].alg_name; | |
1270 | + if (!algo || !*algo) { | |
1271 | + dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i); | |
1272 | + continue; | |
1273 | + } | |
1274 | + | |
1275 | + mode = crypto_details[i].mode; | |
1276 | + sw_type = crypto_details[i].sw_type; | |
1277 | + | |
1278 | + found = 0; | |
1279 | + switch (sw_type & SW_TYPE_ALG_MASK) { | |
1280 | + case SW_TYPE_CIPHER: | |
1281 | + found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC); | |
1282 | + break; | |
1283 | + case SW_TYPE_HMAC: | |
1284 | + found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0); | |
1285 | + break; | |
1286 | + case SW_TYPE_HASH: | |
1287 | + found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0); | |
1288 | + break; | |
1289 | + case SW_TYPE_COMP: | |
1290 | + found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC); | |
1291 | + break; | |
1292 | + case SW_TYPE_BLKCIPHER: | |
1293 | + found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); | |
1294 | + if (!found && !swcr_no_ablk) | |
1295 | + found = crypto_has_ablkcipher(algo, 0, 0); | |
1296 | + break; | |
1297 | + } | |
1298 | + if (found) { | |
1299 | + REGISTER(i); | |
1300 | + } else { | |
1301 | + dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n", | |
1302 | + __FUNCTION__, sw_type, i, algo); | |
1303 | + } | |
1304 | + } | |
1305 | + return 0; | |
1306 | +} | |
1307 | + | |
1308 | +static void | |
1309 | +cryptosoft_exit(void) | |
1310 | +{ | |
1311 | + dprintk("%s()\n", __FUNCTION__); | |
1312 | + crypto_unregister_all(swcr_id); | |
1313 | + swcr_id = -1; | |
1314 | + kmem_cache_destroy(swcr_req_cache); | |
1315 | +} | |
1316 | + | |
1317 | +late_initcall(cryptosoft_init); | |
1318 | +module_exit(cryptosoft_exit); | |
1319 | + | |
1320 | +MODULE_LICENSE("Dual BSD/GPL"); | |
1321 | +MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>"); | |
1322 | +MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)"); |
crypto/ocf/ocf-bench.c
1 | +/* | |
2 | + * A loadable module that benchmarks the OCF crypto speed from kernel space. | |
3 | + * | |
4 | + * Copyright (C) 2004-2010 David McCullough <david_mccullough@mcafee.com> | |
5 | + * | |
6 | + * LICENSE TERMS | |
7 | + * | |
8 | + * The free distribution and use of this software in both source and binary | |
9 | + * form is allowed (with or without changes) provided that: | |
10 | + * | |
11 | + * 1. distributions of this source code include the above copyright | |
12 | + * notice, this list of conditions and the following disclaimer; | |
13 | + * | |
14 | + * 2. distributions in binary form include the above copyright | |
15 | + * notice, this list of conditions and the following disclaimer | |
16 | + * in the documentation and/or other associated materials; | |
17 | + * | |
18 | + * 3. the copyright holder's name is not used to endorse products | |
19 | + * built using this software without specific written permission. | |
20 | + * | |
21 | + * ALTERNATIVELY, provided that this notice is retained in full, this product | |
22 | + * may be distributed under the terms of the GNU General Public License (GPL), | |
23 | + * in which case the provisions of the GPL apply INSTEAD OF those given above. | |
24 | + * | |
25 | + * DISCLAIMER | |
26 | + * | |
27 | + * This software is provided 'as is' with no explicit or implied warranties | |
28 | + * in respect of its properties, including, but not limited to, correctness | |
29 | + * and/or fitness for purpose. | |
30 | + */ | |
31 | + | |
32 | + | |
33 | +#include <linux/version.h> | |
34 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
35 | +#include <linux/config.h> | |
36 | +#endif | |
37 | +#include <linux/module.h> | |
38 | +#include <linux/init.h> | |
39 | +#include <linux/list.h> | |
40 | +#include <linux/slab.h> | |
41 | +#include <linux/wait.h> | |
42 | +#include <linux/sched.h> | |
43 | +#include <linux/spinlock.h> | |
44 | +#include <linux/interrupt.h> | |
45 | +#include <cryptodev.h> | |
46 | + | |
47 | +#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK | |
48 | +#define BENCH_IXP_ACCESS_LIB 1 | |
49 | +#endif | |
50 | +#ifdef BENCH_IXP_ACCESS_LIB | |
51 | +#include <IxTypes.h> | |
52 | +#include <IxOsBuffMgt.h> | |
53 | +#include <IxNpeDl.h> | |
54 | +#include <IxCryptoAcc.h> | |
55 | +#include <IxQMgr.h> | |
56 | +#include <IxOsServices.h> | |
57 | +#include <IxOsCacheMMU.h> | |
58 | +#endif | |
59 | + | |
60 | +/* | |
61 | + * support for access lib version 1.4 | |
62 | + */ | |
63 | +#ifndef IX_MBUF_PRIV | |
64 | +#define IX_MBUF_PRIV(x) ((x)->priv) | |
65 | +#endif | |
66 | + | |
67 | +/* | |
68 | + * the number of simultaneously active requests | |
69 | + */ | |
70 | +static int request_q_len = 40; | |
71 | +module_param(request_q_len, int, 0); | |
72 | +MODULE_PARM_DESC(request_q_len, "Number of outstanding requests"); | |
73 | + | |
74 | +/* | |
75 | + * how many requests we want to have processed | |
76 | + */ | |
77 | +static int request_num = 1024; | |
78 | +module_param(request_num, int, 0); | |
79 | +MODULE_PARM_DESC(request_num, "run for at least this many requests"); | |
80 | + | |
81 | +/* | |
82 | + * the size of each request | |
83 | + */ | |
84 | +static int request_size = 1488; | |
85 | +module_param(request_size, int, 0); | |
86 | +MODULE_PARM_DESC(request_size, "size of each request"); | |
87 | + | |
88 | +/* | |
89 | + * OCF batching of requests | |
90 | + */ | |
91 | +static int request_batch = 1; | |
92 | +module_param(request_batch, int, 0); | |
93 | +MODULE_PARM_DESC(request_batch, "enable OCF request batching"); | |
94 | + | |
95 | +/* | |
96 | + * OCF immediate callback on completion | |
97 | + */ | |
98 | +static int request_cbimm = 1; | |
99 | +module_param(request_cbimm, int, 0); | |
100 | +MODULE_PARM_DESC(request_cbimm, "enable OCF immediate callback on completion"); | |
101 | + | |
102 | +/* | |
103 | + * a structure for each request | |
104 | + */ | |
105 | +typedef struct { | |
106 | + struct work_struct work; | |
107 | +#ifdef BENCH_IXP_ACCESS_LIB | |
108 | + IX_MBUF mbuf; | |
109 | +#endif | |
110 | + unsigned char *buffer; | |
111 | +} request_t; | |
112 | + | |
113 | +static request_t *requests; | |
114 | + | |
115 | +static spinlock_t ocfbench_counter_lock; | |
116 | +static int outstanding; | |
117 | +static int total; | |
118 | + | |
119 | +/*************************************************************************/ | |
120 | +/* | |
121 | + * OCF benchmark routines | |
122 | + */ | |
123 | + | |
124 | +static uint64_t ocf_cryptoid; | |
125 | +static unsigned long jstart, jstop; | |
126 | + | |
127 | +static int ocf_init(void); | |
128 | +static int ocf_cb(struct cryptop *crp); | |
129 | +static void ocf_request(void *arg); | |
130 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
131 | +static void ocf_request_wq(struct work_struct *work); | |
132 | +#endif | |
133 | + | |
134 | +static int | |
135 | +ocf_init(void) | |
136 | +{ | |
137 | + int error; | |
138 | + struct cryptoini crie, cria; | |
139 | + struct cryptodesc crda, crde; | |
140 | + | |
141 | + memset(&crie, 0, sizeof(crie)); | |
142 | + memset(&cria, 0, sizeof(cria)); | |
143 | + memset(&crde, 0, sizeof(crde)); | |
144 | + memset(&crda, 0, sizeof(crda)); | |
145 | + | |
146 | + cria.cri_alg = CRYPTO_SHA1_HMAC; | |
147 | + cria.cri_klen = 20 * 8; | |
148 | + cria.cri_key = "0123456789abcdefghij"; | |
149 | + | |
150 | + //crie.cri_alg = CRYPTO_3DES_CBC; | |
151 | + crie.cri_alg = CRYPTO_AES_CBC; | |
152 | + crie.cri_klen = 24 * 8; | |
153 | + crie.cri_key = "0123456789abcdefghijklmn"; | |
154 | + | |
155 | + crie.cri_next = &cria; | |
156 | + | |
157 | + error = crypto_newsession(&ocf_cryptoid, &crie, | |
158 | + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); | |
159 | + if (error) { | |
160 | + printk("crypto_newsession failed %d\n", error); | |
161 | + return -1; | |
162 | + } | |
163 | + return 0; | |
164 | +} | |
165 | + | |
166 | +static int | |
167 | +ocf_cb(struct cryptop *crp) | |
168 | +{ | |
169 | + request_t *r = (request_t *) crp->crp_opaque; | |
170 | + unsigned long flags; | |
171 | + | |
172 | + if (crp->crp_etype) | |
173 | + printk("Error in OCF processing: %d\n", crp->crp_etype); | |
174 | + crypto_freereq(crp); | |
175 | + crp = NULL; | |
176 | + | |
177 | + /* do all requests but take at least 1 second */ | |
178 | + spin_lock_irqsave(&ocfbench_counter_lock, flags); | |
179 | + total++; | |
180 | + if (total > request_num && jstart + HZ < jiffies) { | |
181 | + outstanding--; | |
182 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
183 | + return 0; | |
184 | + } | |
185 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
186 | + | |
187 | + schedule_work(&r->work); | |
188 | + return 0; | |
189 | +} | |
190 | + | |
191 | + | |
192 | +static void | |
193 | +ocf_request(void *arg) | |
194 | +{ | |
195 | + request_t *r = arg; | |
196 | + struct cryptop *crp = crypto_getreq(2); | |
197 | + struct cryptodesc *crde, *crda; | |
198 | + unsigned long flags; | |
199 | + | |
200 | + if (!crp) { | |
201 | + spin_lock_irqsave(&ocfbench_counter_lock, flags); | |
202 | + outstanding--; | |
203 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
204 | + return; | |
205 | + } | |
206 | + | |
207 | + crde = crp->crp_desc; | |
208 | + crda = crde->crd_next; | |
209 | + | |
210 | + crda->crd_skip = 0; | |
211 | + crda->crd_flags = 0; | |
212 | + crda->crd_len = request_size; | |
213 | + crda->crd_inject = request_size; | |
214 | + crda->crd_alg = CRYPTO_SHA1_HMAC; | |
215 | + crda->crd_key = "0123456789abcdefghij"; | |
216 | + crda->crd_klen = 20 * 8; | |
217 | + | |
218 | + crde->crd_skip = 0; | |
219 | + crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT; | |
220 | + crde->crd_len = request_size; | |
221 | + crde->crd_inject = request_size; | |
222 | + //crde->crd_alg = CRYPTO_3DES_CBC; | |
223 | + crde->crd_alg = CRYPTO_AES_CBC; | |
224 | + crde->crd_key = "0123456789abcdefghijklmn"; | |
225 | + crde->crd_klen = 24 * 8; | |
226 | + | |
227 | + crp->crp_ilen = request_size + 64; | |
228 | + crp->crp_flags = 0; | |
229 | + if (request_batch) | |
230 | + crp->crp_flags |= CRYPTO_F_BATCH; | |
231 | + if (request_cbimm) | |
232 | + crp->crp_flags |= CRYPTO_F_CBIMM; | |
233 | + crp->crp_buf = (caddr_t) r->buffer; | |
234 | + crp->crp_callback = ocf_cb; | |
235 | + crp->crp_sid = ocf_cryptoid; | |
236 | + crp->crp_opaque = (caddr_t) r; | |
237 | + crypto_dispatch(crp); | |
238 | +} | |
239 | + | |
240 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
241 | +static void | |
242 | +ocf_request_wq(struct work_struct *work) | |
243 | +{ | |
244 | + request_t *r = container_of(work, request_t, work); | |
245 | + ocf_request(r); | |
246 | +} | |
247 | +#endif | |
248 | + | |
249 | +static void | |
250 | +ocf_done(void) | |
251 | +{ | |
252 | + crypto_freesession(ocf_cryptoid); | |
253 | +} | |
254 | + | |
255 | +/*************************************************************************/ | |
256 | +#ifdef BENCH_IXP_ACCESS_LIB | |
257 | +/*************************************************************************/ | |
258 | +/* | |
259 | + * CryptoAcc benchmark routines | |
260 | + */ | |
261 | + | |
262 | +static IxCryptoAccCtx ixp_ctx; | |
263 | +static UINT32 ixp_ctx_id; | |
264 | +static IX_MBUF ixp_pri; | |
265 | +static IX_MBUF ixp_sec; | |
266 | +static int ixp_registered = 0; | |
267 | + | |
268 | +static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, | |
269 | + IxCryptoAccStatus status); | |
270 | +static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp, | |
271 | + IxCryptoAccStatus status); | |
272 | +static void ixp_request(void *arg); | |
273 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
274 | +static void ixp_request_wq(struct work_struct *work); | |
275 | +#endif | |
276 | + | |
277 | +static int | |
278 | +ixp_init(void) | |
279 | +{ | |
280 | + IxCryptoAccStatus status; | |
281 | + | |
282 | + ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES; | |
283 | + ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC; | |
284 | + ixp_ctx.cipherCtx.cipherKeyLen = 24; | |
285 | + ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64; | |
286 | + ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64; | |
287 | + memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24); | |
288 | + | |
289 | + ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1; | |
290 | + ixp_ctx.authCtx.authDigestLen = 12; | |
291 | + ixp_ctx.authCtx.aadLen = 0; | |
292 | + ixp_ctx.authCtx.authKeyLen = 20; | |
293 | + memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20); | |
294 | + | |
295 | + ixp_ctx.useDifferentSrcAndDestMbufs = 0; | |
296 | + ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ; | |
297 | + | |
298 | + IX_MBUF_MLEN(&ixp_pri) = IX_MBUF_PKT_LEN(&ixp_pri) = 128; | |
299 | + IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC); | |
300 | + IX_MBUF_MLEN(&ixp_sec) = IX_MBUF_PKT_LEN(&ixp_sec) = 128; | |
301 | + IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC); | |
302 | + | |
303 | + status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec, | |
304 | + ixp_register_cb, ixp_perform_cb, &ixp_ctx_id); | |
305 | + | |
306 | + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) { | |
307 | + while (!ixp_registered) | |
308 | + schedule(); | |
309 | + return ixp_registered < 0 ? -1 : 0; | |
310 | + } | |
311 | + | |
312 | + printk("ixp: ixCryptoAccCtxRegister failed %d\n", status); | |
313 | + return -1; | |
314 | +} | |
315 | + | |
316 | +static void | |
317 | +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status) | |
318 | +{ | |
319 | + if (bufp) { | |
320 | + IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0; | |
321 | + kfree(IX_MBUF_MDATA(bufp)); | |
322 | + IX_MBUF_MDATA(bufp) = NULL; | |
323 | + } | |
324 | + | |
325 | + if (IX_CRYPTO_ACC_STATUS_WAIT == status) | |
326 | + return; | |
327 | + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) | |
328 | + ixp_registered = 1; | |
329 | + else | |
330 | + ixp_registered = -1; | |
331 | +} | |
332 | + | |
333 | +static void | |
334 | +ixp_perform_cb( | |
335 | + UINT32 ctx_id, | |
336 | + IX_MBUF *sbufp, | |
337 | + IX_MBUF *dbufp, | |
338 | + IxCryptoAccStatus status) | |
339 | +{ | |
340 | + request_t *r = NULL; | |
341 | + unsigned long flags; | |
342 | + | |
343 | + /* do all requests but take at least 1 second */ | |
344 | + spin_lock_irqsave(&ocfbench_counter_lock, flags); | |
345 | + total++; | |
346 | + if (total > request_num && jstart + HZ < jiffies) { | |
347 | + outstanding--; | |
348 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
349 | + return; | |
350 | + } | |
351 | + | |
352 | + if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) { | |
353 | + printk("crappo %p %p\n", sbufp, r); | |
354 | + outstanding--; | |
355 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
356 | + return; | |
357 | + } | |
358 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
359 | + | |
360 | + schedule_work(&r->work); | |
361 | +} | |
362 | + | |
363 | +static void | |
364 | +ixp_request(void *arg) | |
365 | +{ | |
366 | + request_t *r = arg; | |
367 | + IxCryptoAccStatus status; | |
368 | + unsigned long flags; | |
369 | + | |
370 | + memset(&r->mbuf, 0, sizeof(r->mbuf)); | |
371 | + IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64; | |
372 | + IX_MBUF_MDATA(&r->mbuf) = r->buffer; | |
373 | + IX_MBUF_PRIV(&r->mbuf) = r; | |
374 | + status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL, | |
375 | + 0, request_size, 0, request_size, request_size, r->buffer); | |
376 | + if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) { | |
377 | + printk("status1 = %d\n", status); | |
378 | + spin_lock_irqsave(&ocfbench_counter_lock, flags); | |
379 | + outstanding--; | |
380 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
381 | + return; | |
382 | + } | |
383 | + return; | |
384 | +} | |
385 | + | |
386 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
387 | +static void | |
388 | +ixp_request_wq(struct work_struct *work) | |
389 | +{ | |
390 | + request_t *r = container_of(work, request_t, work); | |
391 | + ixp_request(r); | |
392 | +} | |
393 | +#endif | |
394 | + | |
395 | +static void | |
396 | +ixp_done(void) | |
397 | +{ | |
398 | + /* we should free the session here but I am lazy :-) */ | |
399 | +} | |
400 | + | |
401 | +/*************************************************************************/ | |
402 | +#endif /* BENCH_IXP_ACCESS_LIB */ | |
403 | +/*************************************************************************/ | |
404 | + | |
405 | +int | |
406 | +ocfbench_init(void) | |
407 | +{ | |
408 | + int i; | |
409 | + unsigned long mbps; | |
410 | + unsigned long flags; | |
411 | + | |
412 | + printk("Crypto Speed tests\n"); | |
413 | + | |
414 | + requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL); | |
415 | + if (!requests) { | |
416 | + printk("malloc failed\n"); | |
417 | + return -EINVAL; | |
418 | + } | |
419 | + | |
420 | + for (i = 0; i < request_q_len; i++) { | |
421 | + /* +64 for return data */ | |
422 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
423 | + INIT_WORK(&requests[i].work, ocf_request_wq); | |
424 | +#else | |
425 | + INIT_WORK(&requests[i].work, ocf_request, &requests[i]); | |
426 | +#endif | |
427 | + requests[i].buffer = kmalloc(request_size + 128, GFP_DMA); | |
428 | + if (!requests[i].buffer) { | |
429 | + printk("malloc failed\n"); | |
430 | + return -EINVAL; | |
431 | + } | |
432 | + memset(requests[i].buffer, '0' + i, request_size + 128); | |
433 | + } | |
434 | + | |
435 | + /* | |
436 | + * OCF benchmark | |
437 | + */ | |
438 | + printk("OCF: testing ...\n"); | |
439 | + if (ocf_init() == -1) | |
440 | + return -EINVAL; | |
441 | + | |
442 | + spin_lock_init(&ocfbench_counter_lock); | |
443 | + total = outstanding = 0; | |
444 | + jstart = jiffies; | |
445 | + for (i = 0; i < request_q_len; i++) { | |
446 | + spin_lock_irqsave(&ocfbench_counter_lock, flags); | |
447 | + outstanding++; | |
448 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
449 | + ocf_request(&requests[i]); | |
450 | + } | |
451 | + while (outstanding > 0) | |
452 | + schedule(); | |
453 | + jstop = jiffies; | |
454 | + | |
455 | + mbps = 0; | |
456 | + if (jstop > jstart) { | |
457 | + mbps = (unsigned long) total * (unsigned long) request_size * 8; | |
458 | + mbps /= ((jstop - jstart) * 1000) / HZ; | |
459 | + } | |
460 | + printk("OCF: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n", | |
461 | + total, request_size, (int)(jstop - jstart), | |
462 | + ((int)mbps) / 1000, ((int)mbps) % 1000); | |
463 | + ocf_done(); | |
464 | + | |
465 | +#ifdef BENCH_IXP_ACCESS_LIB | |
466 | + /* | |
467 | + * IXP benchmark | |
468 | + */ | |
469 | + printk("IXP: testing ...\n"); | |
470 | + ixp_init(); | |
471 | + total = outstanding = 0; | |
472 | + jstart = jiffies; | |
473 | + for (i = 0; i < request_q_len; i++) { | |
474 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
475 | + INIT_WORK(&requests[i].work, ixp_request_wq); | |
476 | +#else | |
477 | + INIT_WORK(&requests[i].work, ixp_request, &requests[i]); | |
478 | +#endif | |
479 | + spin_lock_irqsave(&ocfbench_counter_lock, flags); | |
480 | + outstanding++; | |
481 | + spin_unlock_irqrestore(&ocfbench_counter_lock, flags); | |
482 | + ixp_request(&requests[i]); | |
483 | + } | |
484 | + while (outstanding > 0) | |
485 | + schedule(); | |
486 | + jstop = jiffies; | |
487 | + | |
488 | + mbps = 0; | |
489 | + if (jstop > jstart) { | |
490 | + mbps = (unsigned long) total * (unsigned long) request_size * 8; | |
491 | + mbps /= ((jstop - jstart) * 1000) / HZ; | |
492 | + } | |
493 | + printk("IXP: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n", | |
494 | + total, request_size, jstop - jstart, | |
495 | + ((int)mbps) / 1000, ((int)mbps) % 1000); | |
496 | + ixp_done(); | |
497 | +#endif /* BENCH_IXP_ACCESS_LIB */ | |
498 | + | |
499 | + for (i = 0; i < request_q_len; i++) | |
500 | + kfree(requests[i].buffer); | |
501 | + kfree(requests); | |
502 | + return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */ | |
503 | +} | |
504 | + | |
505 | +static void __exit ocfbench_exit(void) | |
506 | +{ | |
507 | +} | |
508 | + | |
509 | +module_init(ocfbench_init); | |
510 | +module_exit(ocfbench_exit); | |
511 | + | |
512 | +MODULE_LICENSE("BSD"); | |
513 | +MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>"); | |
514 | +MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds"); |
crypto/ocf/ocf-compat.h
1 | +#ifndef _BSD_COMPAT_H_ | |
2 | +#define _BSD_COMPAT_H_ 1 | |
3 | +/****************************************************************************/ | |
4 | +/* | |
5 | + * Provide compat routines for older linux kernels and BSD kernels | |
6 | + * | |
7 | + * Written by David McCullough <david_mccullough@mcafee.com> | |
8 | + * Copyright (C) 2010 David McCullough <david_mccullough@mcafee.com> | |
9 | + * | |
10 | + * LICENSE TERMS | |
11 | + * | |
12 | + * The free distribution and use of this software in both source and binary | |
13 | + * form is allowed (with or without changes) provided that: | |
14 | + * | |
15 | + * 1. distributions of this source code include the above copyright | |
16 | + * notice, this list of conditions and the following disclaimer; | |
17 | + * | |
18 | + * 2. distributions in binary form include the above copyright | |
19 | + * notice, this list of conditions and the following disclaimer | |
20 | + * in the documentation and/or other associated materials; | |
21 | + * | |
22 | + * 3. the copyright holder's name is not used to endorse products | |
23 | + * built using this software without specific written permission. | |
24 | + * | |
25 | + * ALTERNATIVELY, provided that this notice is retained in full, this file | |
26 | + * may be distributed under the terms of the GNU General Public License (GPL), | |
27 | + * in which case the provisions of the GPL apply INSTEAD OF those given above. | |
28 | + * | |
29 | + * DISCLAIMER | |
30 | + * | |
31 | + * This software is provided 'as is' with no explicit or implied warranties | |
32 | + * in respect of its properties, including, but not limited to, correctness | |
33 | + * and/or fitness for purpose. | |
34 | + */ | |
35 | +/****************************************************************************/ | |
36 | +#ifdef __KERNEL__ | |
37 | +#include <linux/version.h> | |
38 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
39 | +#include <linux/config.h> | |
40 | +#endif | |
41 | + | |
42 | +/* | |
43 | + * fake some BSD driver interface stuff specifically for OCF use | |
44 | + */ | |
45 | + | |
46 | +typedef struct ocf_device *device_t; | |
47 | + | |
48 | +typedef struct { | |
49 | + int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri); | |
50 | + int (*cryptodev_freesession)(device_t dev, u_int64_t tid); | |
51 | + int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint); | |
52 | + int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint); | |
53 | +} device_method_t; | |
54 | +#define DEVMETHOD(id, func) id: func | |
55 | + | |
56 | +struct ocf_device { | |
57 | + char name[32]; /* the driver name */ | |
58 | + char nameunit[32]; /* the driver name + HW instance */ | |
59 | + int unit; | |
60 | + device_method_t methods; | |
61 | + void *softc; | |
62 | +}; | |
63 | + | |
64 | +#define CRYPTODEV_NEWSESSION(dev, sid, cri) \ | |
65 | + ((*(dev)->methods.cryptodev_newsession)(dev,sid,cri)) | |
66 | +#define CRYPTODEV_FREESESSION(dev, sid) \ | |
67 | + ((*(dev)->methods.cryptodev_freesession)(dev, sid)) | |
68 | +#define CRYPTODEV_PROCESS(dev, crp, hint) \ | |
69 | + ((*(dev)->methods.cryptodev_process)(dev, crp, hint)) | |
70 | +#define CRYPTODEV_KPROCESS(dev, krp, hint) \ | |
71 | + ((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint)) | |
72 | + | |
73 | +#define device_get_name(dev) ((dev)->name) | |
74 | +#define device_get_nameunit(dev) ((dev)->nameunit) | |
75 | +#define device_get_unit(dev) ((dev)->unit) | |
76 | +#define device_get_softc(dev) ((dev)->softc) | |
77 | + | |
78 | +#define softc_device_decl \ | |
79 | + struct ocf_device _device; \ | |
80 | + device_t | |
81 | + | |
82 | +#define softc_device_init(_sc, _name, _unit, _methods) \ | |
83 | + if (1) {\ | |
84 | + strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \ | |
85 | + snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \ | |
86 | + (_sc)->_device.unit = _unit; \ | |
87 | + (_sc)->_device.methods = _methods; \ | |
88 | + (_sc)->_device.softc = (void *) _sc; \ | |
89 | + *(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \ | |
90 | + } else | |
91 | + | |
92 | +#define softc_get_device(_sc) (&(_sc)->_device) | |
93 | + | |
94 | +/* | |
95 | + * iomem support for 2.4 and 2.6 kernels | |
96 | + */ | |
97 | +#include <linux/version.h> | |
98 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
99 | +#define ocf_iomem_t unsigned long | |
100 | + | |
101 | +/* | |
102 | + * implement simple workqueue like support for older kernels | |
103 | + */ | |
104 | + | |
105 | +#include <linux/tqueue.h> | |
106 | + | |
107 | +#define work_struct tq_struct | |
108 | + | |
109 | +#define INIT_WORK(wp, fp, ap) \ | |
110 | + do { \ | |
111 | + (wp)->sync = 0; \ | |
112 | + (wp)->routine = (fp); \ | |
113 | + (wp)->data = (ap); \ | |
114 | + } while (0) | |
115 | + | |
116 | +#define schedule_work(wp) \ | |
117 | + do { \ | |
118 | + queue_task((wp), &tq_immediate); \ | |
119 | + mark_bh(IMMEDIATE_BH); \ | |
120 | + } while (0) | |
121 | + | |
122 | +#define flush_scheduled_work() run_task_queue(&tq_immediate) | |
123 | + | |
124 | +#else | |
125 | +#define ocf_iomem_t void __iomem * | |
126 | + | |
127 | +#include <linux/workqueue.h> | |
128 | + | |
129 | +#endif | |
130 | + | |
131 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) | |
132 | +#include <linux/fdtable.h> | |
133 | +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) | |
134 | +#define files_fdtable(files) (files) | |
135 | +#endif | |
136 | + | |
137 | +#ifdef MODULE_PARM | |
138 | +#undef module_param /* just in case */ | |
139 | +#define module_param(a,b,c) MODULE_PARM(a,"i") | |
140 | +#endif | |
141 | + | |
142 | +#define bzero(s,l) memset(s,0,l) | |
143 | +#define bcopy(s,d,l) memcpy(d,s,l) | |
144 | +#define bcmp(x, y, l) memcmp(x,y,l) | |
145 | + | |
146 | +#define MIN(x,y) ((x) < (y) ? (x) : (y)) | |
147 | + | |
148 | +#define device_printf(dev, a...) ({ \ | |
149 | + printk("%s: ", device_get_nameunit(dev)); printk(a); \ | |
150 | + }) | |
151 | + | |
152 | +#undef printf | |
153 | +#define printf(fmt...) printk(fmt) | |
154 | + | |
155 | +#define KASSERT(c,p) if (!(c)) { printk p ; } else | |
156 | + | |
157 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
158 | +#define ocf_daemonize(str) \ | |
159 | + daemonize(); \ | |
160 | + spin_lock_irq(¤t->sigmask_lock); \ | |
161 | + sigemptyset(¤t->blocked); \ | |
162 | + recalc_sigpending(current); \ | |
163 | + spin_unlock_irq(¤t->sigmask_lock); \ | |
164 | + sprintf(current->comm, str); | |
165 | +#else | |
166 | +#define ocf_daemonize(str) daemonize(str); | |
167 | +#endif | |
168 | + | |
169 | +#define TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q)) | |
170 | +#define TAILQ_EMPTY(q) list_empty(q) | |
171 | +#define TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m) | |
172 | + | |
173 | +#define read_random(p,l) get_random_bytes(p,l) | |
174 | + | |
175 | +#define DELAY(x) ((x) > 2000 ? mdelay((x)/1000) : udelay(x)) | |
176 | +#define strtoul simple_strtoul | |
177 | + | |
178 | +#define pci_get_vendor(dev) ((dev)->vendor) | |
179 | +#define pci_get_device(dev) ((dev)->device) | |
180 | + | |
181 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
182 | +#define pci_set_consistent_dma_mask(dev, mask) (0) | |
183 | +#endif | |
184 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) | |
185 | +#define pci_dma_sync_single_for_cpu pci_dma_sync_single | |
186 | +#endif | |
187 | + | |
188 | +#ifndef DMA_32BIT_MASK | |
189 | +#define DMA_32BIT_MASK 0x00000000ffffffffULL | |
190 | +#endif | |
191 | + | |
192 | +#ifndef htole32 | |
193 | +#define htole32(x) cpu_to_le32(x) | |
194 | +#endif | |
195 | +#ifndef htobe32 | |
196 | +#define htobe32(x) cpu_to_be32(x) | |
197 | +#endif | |
198 | +#ifndef htole16 | |
199 | +#define htole16(x) cpu_to_le16(x) | |
200 | +#endif | |
201 | +#ifndef htobe16 | |
202 | +#define htobe16(x) cpu_to_be16(x) | |
203 | +#endif | |
204 | + | |
205 | +/* older kernels don't have these */ | |
206 | + | |
207 | +#include <asm/irq.h> | |
208 | +#if !defined(IRQ_NONE) && !defined(IRQ_RETVAL) | |
209 | +#define IRQ_NONE | |
210 | +#define IRQ_HANDLED | |
211 | +#define IRQ_WAKE_THREAD | |
212 | +#define IRQ_RETVAL | |
213 | +#define irqreturn_t void | |
214 | +typedef irqreturn_t (*irq_handler_t)(int irq, void *arg, struct pt_regs *regs); | |
215 | +#endif | |
216 | +#ifndef IRQF_SHARED | |
217 | +#define IRQF_SHARED SA_SHIRQ | |
218 | +#endif | |
219 | + | |
220 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | |
221 | +# define strlcpy(dest,src,len) \ | |
222 | + ({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; }) | |
223 | +#endif | |
224 | + | |
225 | +#ifndef MAX_ERRNO | |
226 | +#define MAX_ERRNO 4095 | |
227 | +#endif | |
228 | +#ifndef IS_ERR_VALUE | |
229 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,5) | |
230 | +#include <linux/err.h> | |
231 | +#endif | |
232 | +#ifndef IS_ERR_VALUE | |
233 | +#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO) | |
234 | +#endif | |
235 | +#endif | |
236 | + | |
237 | +/* | |
238 | + * common debug for all | |
239 | + */ | |
240 | +#if 1 | |
241 | +#define dprintk(a...) do { if (debug) printk(a); } while(0) | |
242 | +#else | |
243 | +#define dprintk(a...) | |
244 | +#endif | |
245 | + | |
246 | +#ifndef SLAB_ATOMIC | |
247 | +/* Changed in 2.6.20, must use GFP_ATOMIC now */ | |
248 | +#define SLAB_ATOMIC GFP_ATOMIC | |
249 | +#endif | |
250 | + | |
251 | +/* | |
252 | + * need some additional support for older kernels */ | |
253 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2) | |
254 | +#define pci_register_driver_compat(driver, rc) \ | |
255 | + do { \ | |
256 | + if ((rc) > 0) { \ | |
257 | + (rc) = 0; \ | |
258 | + } else if (rc == 0) { \ | |
259 | + (rc) = -ENODEV; \ | |
260 | + } else { \ | |
261 | + pci_unregister_driver(driver); \ | |
262 | + } \ | |
263 | + } while (0) | |
264 | +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) | |
265 | +#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0) | |
266 | +#else | |
267 | +#define pci_register_driver_compat(driver,rc) | |
268 | +#endif | |
269 | + | |
270 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) | |
271 | + | |
272 | +#include <linux/mm.h> | |
273 | +#include <asm/scatterlist.h> | |
274 | + | |
275 | +static inline void sg_set_page(struct scatterlist *sg, struct page *page, | |
276 | + unsigned int len, unsigned int offset) | |
277 | +{ | |
278 | + sg->page = page; | |
279 | + sg->offset = offset; | |
280 | + sg->length = len; | |
281 | +} | |
282 | + | |
283 | +static inline void *sg_virt(struct scatterlist *sg) | |
284 | +{ | |
285 | + return page_address(sg->page) + sg->offset; | |
286 | +} | |
287 | + | |
288 | +#define sg_init_table(sg, n) | |
289 | + | |
290 | +#define sg_mark_end(sg) | |
291 | + | |
292 | +#endif | |
293 | + | |
294 | +#ifndef late_initcall | |
295 | +#define late_initcall(init) module_init(init) | |
296 | +#endif | |
297 | + | |
298 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) || !defined(CONFIG_SMP) | |
299 | +#define ocf_for_each_cpu(cpu) for ((cpu) = 0; (cpu) == 0; (cpu)++) | |
300 | +#else | |
301 | +#define ocf_for_each_cpu(cpu) for_each_present_cpu(cpu) | |
302 | +#endif | |
303 | + | |
304 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) | |
305 | +#include <linux/sched.h> | |
306 | +#define kill_proc(p,s,v) send_sig(s,find_task_by_vpid(p),0) | |
307 | +#endif | |
308 | + | |
309 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) | |
310 | + | |
311 | +struct ocf_thread { | |
312 | + struct task_struct *task; | |
313 | + int (*func)(void *arg); | |
314 | + void *arg; | |
315 | +}; | |
316 | + | |
317 | +/* thread startup helper func */ | |
318 | +static inline int ocf_run_thread(void *arg) | |
319 | +{ | |
320 | + struct ocf_thread *t = (struct ocf_thread *) arg; | |
321 | + if (!t) | |
322 | + return -1; /* very bad */ | |
323 | + t->task = current; | |
324 | + daemonize(); | |
325 | + spin_lock_irq(¤t->sigmask_lock); | |
326 | + sigemptyset(¤t->blocked); | |
327 | + recalc_sigpending(current); | |
328 | + spin_unlock_irq(¤t->sigmask_lock); | |
329 | + return (*t->func)(t->arg); | |
330 | +} | |
331 | + | |
332 | +#define kthread_create(f,a,fmt...) \ | |
333 | + ({ \ | |
334 | + struct ocf_thread t; \ | |
335 | + pid_t p; \ | |
336 | + t.task = NULL; \ | |
337 | + t.func = (f); \ | |
338 | + t.arg = (a); \ | |
339 | + p = kernel_thread(ocf_run_thread, &t, CLONE_FS|CLONE_FILES); \ | |
340 | + while (p != (pid_t) -1 && t.task == NULL) \ | |
341 | + schedule(); \ | |
342 | + if (t.task) \ | |
343 | + snprintf(t.task->comm, sizeof(t.task->comm), fmt); \ | |
344 | + (t.task); \ | |
345 | + }) | |
346 | + | |
347 | +#define kthread_bind(t,cpu) /**/ | |
348 | + | |
349 | +#define kthread_should_stop() (strcmp(current->comm, "stopping") == 0) | |
350 | + | |
351 | +#define kthread_stop(t) \ | |
352 | + ({ \ | |
353 | + strcpy((t)->comm, "stopping"); \ | |
354 | + kill_proc((t)->pid, SIGTERM, 1); \ | |
355 | + do { \ | |
356 | + schedule(); \ | |
357 | + } while (kill_proc((t)->pid, SIGTERM, 1) == 0); \ | |
358 | + }) | |
359 | + | |
360 | +#else | |
361 | +#include <linux/kthread.h> | |
362 | +#endif | |
363 | + | |
364 | + | |
365 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) | |
366 | +#define skb_frag_page(x) ((x)->page) | |
367 | +#endif | |
368 | + | |
369 | +#endif /* __KERNEL__ */ | |
370 | + | |
371 | +/****************************************************************************/ | |
372 | +#endif /* _BSD_COMPAT_H_ */ |
crypto/ocf/ocfnull/Makefile
crypto/ocf/ocfnull/ocfnull.c
1 | +/* | |
2 | + * An OCF module for determining the cost of crypto versus the cost of | |
3 | + * IPSec processing outside of OCF. This modules gives us the effect of | |
4 | + * zero cost encryption, of course you will need to run it at both ends | |
5 | + * since it does no crypto at all. | |
6 | + * | |
7 | + * Written by David McCullough <david_mccullough@mcafee.com> | |
8 | + * Copyright (C) 2006-2010 David McCullough | |
9 | + * | |
10 | + * LICENSE TERMS | |
11 | + * | |
12 | + * The free distribution and use of this software in both source and binary | |
13 | + * form is allowed (with or without changes) provided that: | |
14 | + * | |
15 | + * 1. distributions of this source code include the above copyright | |
16 | + * notice, this list of conditions and the following disclaimer; | |
17 | + * | |
18 | + * 2. distributions in binary form include the above copyright | |
19 | + * notice, this list of conditions and the following disclaimer | |
20 | + * in the documentation and/or other associated materials; | |
21 | + * | |
22 | + * 3. the copyright holder's name is not used to endorse products | |
23 | + * built using this software without specific written permission. | |
24 | + * | |
25 | + * ALTERNATIVELY, provided that this notice is retained in full, this product | |
26 | + * may be distributed under the terms of the GNU General Public License (GPL), | |
27 | + * in which case the provisions of the GPL apply INSTEAD OF those given above. | |
28 | + * | |
29 | + * DISCLAIMER | |
30 | + * | |
31 | + * This software is provided 'as is' with no explicit or implied warranties | |
32 | + * in respect of its properties, including, but not limited to, correctness | |
33 | + * and/or fitness for purpose. | |
34 | + */ | |
35 | + | |
36 | +#include <linux/version.h> | |
37 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
38 | +#include <linux/config.h> | |
39 | +#endif | |
40 | +#include <linux/module.h> | |
41 | +#include <linux/init.h> | |
42 | +#include <linux/list.h> | |
43 | +#include <linux/slab.h> | |
44 | +#include <linux/sched.h> | |
45 | +#include <linux/wait.h> | |
46 | +#include <linux/crypto.h> | |
47 | +#include <linux/interrupt.h> | |
48 | + | |
49 | +#include <cryptodev.h> | |
50 | +#include <uio.h> | |
51 | + | |
52 | +static int32_t null_id = -1; | |
53 | +static u_int32_t null_sesnum = 0; | |
54 | + | |
55 | +static int null_process(device_t, struct cryptop *, int); | |
56 | +static int null_newsession(device_t, u_int32_t *, struct cryptoini *); | |
57 | +static int null_freesession(device_t, u_int64_t); | |
58 | + | |
59 | +#define debug ocfnull_debug | |
60 | +int ocfnull_debug = 0; | |
61 | +module_param(ocfnull_debug, int, 0644); | |
62 | +MODULE_PARM_DESC(ocfnull_debug, "Enable debug"); | |
63 | + | |
64 | +/* | |
65 | + * dummy device structure | |
66 | + */ | |
67 | + | |
68 | +static struct { | |
69 | + softc_device_decl sc_dev; | |
70 | +} nulldev; | |
71 | + | |
72 | +static device_method_t null_methods = { | |
73 | + /* crypto device methods */ | |
74 | + DEVMETHOD(cryptodev_newsession, null_newsession), | |
75 | + DEVMETHOD(cryptodev_freesession,null_freesession), | |
76 | + DEVMETHOD(cryptodev_process, null_process), | |
77 | +}; | |
78 | + | |
79 | +/* | |
80 | + * Generate a new software session. | |
81 | + */ | |
82 | +static int | |
83 | +null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri) | |
84 | +{ | |
85 | + dprintk("%s()\n", __FUNCTION__); | |
86 | + if (sid == NULL || cri == NULL) { | |
87 | + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__); | |
88 | + return EINVAL; | |
89 | + } | |
90 | + | |
91 | + if (null_sesnum == 0) | |
92 | + null_sesnum++; | |
93 | + *sid = null_sesnum++; | |
94 | + return 0; | |
95 | +} | |
96 | + | |
97 | + | |
98 | +/* | |
99 | + * Free a session. | |
100 | + */ | |
101 | +static int | |
102 | +null_freesession(device_t arg, u_int64_t tid) | |
103 | +{ | |
104 | + u_int32_t sid = CRYPTO_SESID2LID(tid); | |
105 | + | |
106 | + dprintk("%s()\n", __FUNCTION__); | |
107 | + if (sid > null_sesnum) { | |
108 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
109 | + return EINVAL; | |
110 | + } | |
111 | + | |
112 | + /* Silently accept and return */ | |
113 | + if (sid == 0) | |
114 | + return 0; | |
115 | + return 0; | |
116 | +} | |
117 | + | |
118 | + | |
119 | +/* | |
120 | + * Process a request. | |
121 | + */ | |
122 | +static int | |
123 | +null_process(device_t arg, struct cryptop *crp, int hint) | |
124 | +{ | |
125 | + unsigned int lid; | |
126 | + | |
127 | + dprintk("%s()\n", __FUNCTION__); | |
128 | + | |
129 | + /* Sanity check */ | |
130 | + if (crp == NULL) { | |
131 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
132 | + return EINVAL; | |
133 | + } | |
134 | + | |
135 | + crp->crp_etype = 0; | |
136 | + | |
137 | + if (crp->crp_desc == NULL || crp->crp_buf == NULL) { | |
138 | + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); | |
139 | + crp->crp_etype = EINVAL; | |
140 | + goto done; | |
141 | + } | |
142 | + | |
143 | + /* | |
144 | + * find the session we are using | |
145 | + */ | |
146 | + | |
147 | + lid = crp->crp_sid & 0xffffffff; | |
148 | + if (lid >= null_sesnum || lid == 0) { | |
149 | + crp->crp_etype = ENOENT; | |
150 | + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__); | |
151 | + goto done; | |
152 | + } | |
153 | + | |
154 | +done: | |
155 | + crypto_done(crp); | |
156 | + return 0; | |
157 | +} | |
158 | + | |
159 | + | |
160 | +/* | |
161 | + * our driver startup and shutdown routines | |
162 | + */ | |
163 | + | |
164 | +static int | |
165 | +null_init(void) | |
166 | +{ | |
167 | + dprintk("%s(%p)\n", __FUNCTION__, null_init); | |
168 | + | |
169 | + memset(&nulldev, 0, sizeof(nulldev)); | |
170 | + softc_device_init(&nulldev, "ocfnull", 0, null_methods); | |
171 | + | |
172 | + null_id = crypto_get_driverid(softc_get_device(&nulldev), | |
173 | + CRYPTOCAP_F_HARDWARE); | |
174 | + if (null_id < 0) | |
175 | + panic("ocfnull: crypto device cannot initialize!"); | |
176 | + | |
177 | +#define REGISTER(alg) \ | |
178 | + crypto_register(null_id,alg,0,0) | |
179 | + REGISTER(CRYPTO_DES_CBC); | |
180 | + REGISTER(CRYPTO_3DES_CBC); | |
181 | + REGISTER(CRYPTO_RIJNDAEL128_CBC); | |
182 | + REGISTER(CRYPTO_MD5); | |
183 | + REGISTER(CRYPTO_SHA1); | |
184 | + REGISTER(CRYPTO_MD5_HMAC); | |
185 | + REGISTER(CRYPTO_SHA1_HMAC); | |
186 | +#undef REGISTER | |
187 | + | |
188 | + return 0; | |
189 | +} | |
190 | + | |
191 | +static void | |
192 | +null_exit(void) | |
193 | +{ | |
194 | + dprintk("%s()\n", __FUNCTION__); | |
195 | + crypto_unregister_all(null_id); | |
196 | + null_id = -1; | |
197 | +} | |
198 | + | |
199 | +module_init(null_init); | |
200 | +module_exit(null_exit); | |
201 | + | |
202 | +MODULE_LICENSE("Dual BSD/GPL"); | |
203 | +MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>"); | |
204 | +MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing"); |
crypto/ocf/random.c
1 | +/* | |
2 | + * A system independant way of adding entropy to the kernels pool | |
3 | + * this way the drivers can focus on the real work and we can take | |
4 | + * care of pushing it to the appropriate place in the kernel. | |
5 | + * | |
6 | + * This should be fast and callable from timers/interrupts | |
7 | + * | |
8 | + * Written by David McCullough <david_mccullough@mcafee.com> | |
9 | + * Copyright (C) 2006-2010 David McCullough | |
10 | + * Copyright (C) 2004-2005 Intel Corporation. | |
11 | + * | |
12 | + * LICENSE TERMS | |
13 | + * | |
14 | + * The free distribution and use of this software in both source and binary | |
15 | + * form is allowed (with or without changes) provided that: | |
16 | + * | |
17 | + * 1. distributions of this source code include the above copyright | |
18 | + * notice, this list of conditions and the following disclaimer; | |
19 | + * | |
20 | + * 2. distributions in binary form include the above copyright | |
21 | + * notice, this list of conditions and the following disclaimer | |
22 | + * in the documentation and/or other associated materials; | |
23 | + * | |
24 | + * 3. the copyright holder's name is not used to endorse products | |
25 | + * built using this software without specific written permission. | |
26 | + * | |
27 | + * ALTERNATIVELY, provided that this notice is retained in full, this product | |
28 | + * may be distributed under the terms of the GNU General Public License (GPL), | |
29 | + * in which case the provisions of the GPL apply INSTEAD OF those given above. | |
30 | + * | |
31 | + * DISCLAIMER | |
32 | + * | |
33 | + * This software is provided 'as is' with no explicit or implied warranties | |
34 | + * in respect of its properties, including, but not limited to, correctness | |
35 | + * and/or fitness for purpose. | |
36 | + */ | |
37 | + | |
38 | +#include <linux/version.h> | |
39 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
40 | +#include <linux/config.h> | |
41 | +#endif | |
42 | +#include <linux/module.h> | |
43 | +#include <linux/init.h> | |
44 | +#include <linux/list.h> | |
45 | +#include <linux/slab.h> | |
46 | +#include <linux/wait.h> | |
47 | +#include <linux/sched.h> | |
48 | +#include <linux/spinlock.h> | |
49 | +#include <linux/unistd.h> | |
50 | +#include <linux/poll.h> | |
51 | +#include <linux/random.h> | |
52 | +#include <cryptodev.h> | |
53 | + | |
54 | +#ifdef CONFIG_OCF_FIPS | |
55 | +#include "rndtest.h" | |
56 | +#endif | |
57 | + | |
58 | +#ifndef HAS_RANDOM_INPUT_WAIT | |
59 | +#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches" | |
60 | +#endif | |
61 | + | |
62 | +/* | |
63 | + * a hack to access the debug levels from the crypto driver | |
64 | + */ | |
65 | +extern int crypto_debug; | |
66 | +#define debug crypto_debug | |
67 | + | |
68 | +/* | |
69 | + * a list of all registered random providers | |
70 | + */ | |
71 | +static LIST_HEAD(random_ops); | |
72 | +static int started = 0; | |
73 | +static int initted = 0; | |
74 | + | |
75 | +struct random_op { | |
76 | + struct list_head random_list; | |
77 | + u_int32_t driverid; | |
78 | + int (*read_random)(void *arg, u_int32_t *buf, int len); | |
79 | + void *arg; | |
80 | +}; | |
81 | + | |
82 | +static int random_proc(void *arg); | |
83 | + | |
84 | +static pid_t randomproc = (pid_t) -1; | |
85 | +static spinlock_t random_lock; | |
86 | + | |
87 | +/* | |
88 | + * just init the spin locks | |
89 | + */ | |
90 | +static int | |
91 | +crypto_random_init(void) | |
92 | +{ | |
93 | + spin_lock_init(&random_lock); | |
94 | + initted = 1; | |
95 | + return(0); | |
96 | +} | |
97 | + | |
98 | +/* | |
99 | + * Add the given random reader to our list (if not present) | |
100 | + * and start the thread (if not already started) | |
101 | + * | |
102 | + * we have to assume that driver id is ok for now | |
103 | + */ | |
104 | +int | |
105 | +crypto_rregister( | |
106 | + u_int32_t driverid, | |
107 | + int (*read_random)(void *arg, u_int32_t *buf, int len), | |
108 | + void *arg) | |
109 | +{ | |
110 | + unsigned long flags; | |
111 | + int ret = 0; | |
112 | + struct random_op *rops, *tmp; | |
113 | + | |
114 | + dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__, | |
115 | + __FUNCTION__, driverid, read_random, arg); | |
116 | + | |
117 | + if (!initted) | |
118 | + crypto_random_init(); | |
119 | + | |
120 | +#if 0 | |
121 | + struct cryptocap *cap; | |
122 | + | |
123 | + cap = crypto_checkdriver(driverid); | |
124 | + if (!cap) | |
125 | + return EINVAL; | |
126 | +#endif | |
127 | + | |
128 | + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) { | |
129 | + if (rops->driverid == driverid && rops->read_random == read_random) | |
130 | + return EEXIST; | |
131 | + } | |
132 | + | |
133 | + rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL); | |
134 | + if (!rops) | |
135 | + return ENOMEM; | |
136 | + | |
137 | + rops->driverid = driverid; | |
138 | + rops->read_random = read_random; | |
139 | + rops->arg = arg; | |
140 | + | |
141 | + spin_lock_irqsave(&random_lock, flags); | |
142 | + list_add_tail(&rops->random_list, &random_ops); | |
143 | + if (!started) { | |
144 | + randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES); | |
145 | + if (randomproc < 0) { | |
146 | + ret = randomproc; | |
147 | + printk("crypto: crypto_rregister cannot start random thread; " | |
148 | + "error %d", ret); | |
149 | + } else | |
150 | + started = 1; | |
151 | + } | |
152 | + spin_unlock_irqrestore(&random_lock, flags); | |
153 | + | |
154 | + return ret; | |
155 | +} | |
156 | +EXPORT_SYMBOL(crypto_rregister); | |
157 | + | |
158 | +int | |
159 | +crypto_runregister_all(u_int32_t driverid) | |
160 | +{ | |
161 | + struct random_op *rops, *tmp; | |
162 | + unsigned long flags; | |
163 | + | |
164 | + dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid); | |
165 | + | |
166 | + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) { | |
167 | + if (rops->driverid == driverid) { | |
168 | + list_del(&rops->random_list); | |
169 | + kfree(rops); | |
170 | + } | |
171 | + } | |
172 | + | |
173 | + spin_lock_irqsave(&random_lock, flags); | |
174 | + if (list_empty(&random_ops) && started) | |
175 | + kill_proc(randomproc, SIGKILL, 1); | |
176 | + spin_unlock_irqrestore(&random_lock, flags); | |
177 | + return(0); | |
178 | +} | |
179 | +EXPORT_SYMBOL(crypto_runregister_all); | |
180 | + | |
181 | +/* | |
182 | + * while we can add entropy to random.c continue to read random data from | |
183 | + * the drivers and push it to random. | |
184 | + */ | |
185 | +static int | |
186 | +random_proc(void *arg) | |
187 | +{ | |
188 | + int n; | |
189 | + int wantcnt; | |
190 | + int bufcnt = 0; | |
191 | + int retval = 0; | |
192 | + int *buf = NULL; | |
193 | + | |
194 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
195 | + daemonize(); | |
196 | + spin_lock_irq(¤t->sigmask_lock); | |
197 | + sigemptyset(¤t->blocked); | |
198 | + recalc_sigpending(current); | |
199 | + spin_unlock_irq(¤t->sigmask_lock); | |
200 | + sprintf(current->comm, "ocf-random"); | |
201 | +#else | |
202 | + daemonize("ocf-random"); | |
203 | + allow_signal(SIGKILL); | |
204 | +#endif | |
205 | + | |
206 | + (void) get_fs(); | |
207 | + set_fs(get_ds()); | |
208 | + | |
209 | +#ifdef CONFIG_OCF_FIPS | |
210 | +#define NUM_INT (RNDTEST_NBYTES/sizeof(int)) | |
211 | +#else | |
212 | +#define NUM_INT 32 | |
213 | +#endif | |
214 | + | |
215 | + /* | |
216 | + * some devices can transferr their RNG data direct into memory, | |
217 | + * so make sure it is device friendly | |
218 | + */ | |
219 | + buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA); | |
220 | + if (NULL == buf) { | |
221 | + printk("crypto: RNG could not allocate memory\n"); | |
222 | + retval = -ENOMEM; | |
223 | + goto bad_alloc; | |
224 | + } | |
225 | + | |
226 | + wantcnt = NUM_INT; /* start by adding some entropy */ | |
227 | + | |
228 | + /* | |
229 | + * its possible due to errors or driver removal that we no longer | |
230 | + * have anything to do, if so exit or we will consume all the CPU | |
231 | + * doing nothing | |
232 | + */ | |
233 | + while (!list_empty(&random_ops)) { | |
234 | + struct random_op *rops, *tmp; | |
235 | + | |
236 | +#ifdef CONFIG_OCF_FIPS | |
237 | + if (wantcnt) | |
238 | + wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */ | |
239 | +#endif | |
240 | + | |
241 | + /* see if we can get enough entropy to make the world | |
242 | + * a better place. | |
243 | + */ | |
244 | + while (bufcnt < wantcnt && bufcnt < NUM_INT) { | |
245 | + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) { | |
246 | + | |
247 | + n = (*rops->read_random)(rops->arg, &buf[bufcnt], | |
248 | + NUM_INT - bufcnt); | |
249 | + | |
250 | + /* on failure remove the random number generator */ | |
251 | + if (n == -1) { | |
252 | + list_del(&rops->random_list); | |
253 | + printk("crypto: RNG (driverid=0x%x) failed, disabling\n", | |
254 | + rops->driverid); | |
255 | + kfree(rops); | |
256 | + } else if (n > 0) | |
257 | + bufcnt += n; | |
258 | + } | |
259 | + /* give up CPU for a bit, just in case as this is a loop */ | |
260 | + schedule(); | |
261 | + } | |
262 | + | |
263 | + | |
264 | +#ifdef CONFIG_OCF_FIPS | |
265 | + if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) { | |
266 | + dprintk("crypto: buffer had fips errors, discarding\n"); | |
267 | + bufcnt = 0; | |
268 | + } | |
269 | +#endif | |
270 | + | |
271 | + /* | |
272 | + * if we have a certified buffer, we can send some data | |
273 | + * to /dev/random and move along | |
274 | + */ | |
275 | + if (bufcnt > 0) { | |
276 | + /* add what we have */ | |
277 | + random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8); | |
278 | + bufcnt = 0; | |
279 | + } | |
280 | + | |
281 | + /* give up CPU for a bit so we don't hog while filling */ | |
282 | + schedule(); | |
283 | + | |
284 | + /* wait for needing more */ | |
285 | + wantcnt = random_input_wait(); | |
286 | + | |
287 | + if (wantcnt <= 0) | |
288 | + wantcnt = 0; /* try to get some info again */ | |
289 | + else | |
290 | + /* round up to one word or we can loop forever */ | |
291 | + wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8); | |
292 | + if (wantcnt > NUM_INT) { | |
293 | + wantcnt = NUM_INT; | |
294 | + } | |
295 | + | |
296 | + if (signal_pending(current)) { | |
297 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
298 | + spin_lock_irq(¤t->sigmask_lock); | |
299 | +#endif | |
300 | + flush_signals(current); | |
301 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) | |
302 | + spin_unlock_irq(¤t->sigmask_lock); | |
303 | +#endif | |
304 | + } | |
305 | + } | |
306 | + | |
307 | + kfree(buf); | |
308 | + | |
309 | +bad_alloc: | |
310 | + spin_lock_irq(&random_lock); | |
311 | + randomproc = (pid_t) -1; | |
312 | + started = 0; | |
313 | + spin_unlock_irq(&random_lock); | |
314 | + | |
315 | + return retval; | |
316 | +} |
crypto/ocf/rndtest.c
1 | +/* $OpenBSD$ */ | |
2 | + | |
3 | +/* | |
4 | + * OCF/Linux port done by David McCullough <david_mccullough@mcafee.com> | |
5 | + * Copyright (C) 2006-2010 David McCullough | |
6 | + * Copyright (C) 2004-2005 Intel Corporation. | |
7 | + * The license and original author are listed below. | |
8 | + * | |
9 | + * Copyright (c) 2002 Jason L. Wright (jason@thought.net) | |
10 | + * All rights reserved. | |
11 | + * | |
12 | + * Redistribution and use in source and binary forms, with or without | |
13 | + * modification, are permitted provided that the following conditions | |
14 | + * are met: | |
15 | + * 1. Redistributions of source code must retain the above copyright | |
16 | + * notice, this list of conditions and the following disclaimer. | |
17 | + * 2. Redistributions in binary form must reproduce the above copyright | |
18 | + * notice, this list of conditions and the following disclaimer in the | |
19 | + * documentation and/or other materials provided with the distribution. | |
20 | + * 3. All advertising materials mentioning features or use of this software | |
21 | + * must display the following acknowledgement: | |
22 | + * This product includes software developed by Jason L. Wright | |
23 | + * 4. The name of the author may not be used to endorse or promote products | |
24 | + * derived from this software without specific prior written permission. | |
25 | + * | |
26 | + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
27 | + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
28 | + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
29 | + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, | |
30 | + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
31 | + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |
32 | + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
33 | + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | |
34 | + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
35 | + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
36 | + * POSSIBILITY OF SUCH DAMAGE. | |
37 | + */ | |
38 | + | |
39 | +#include <linux/version.h> | |
40 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED) | |
41 | +#include <linux/config.h> | |
42 | +#endif | |
43 | +#include <linux/module.h> | |
44 | +#include <linux/list.h> | |
45 | +#include <linux/wait.h> | |
46 | +#include <linux/time.h> | |
47 | +#include <linux/unistd.h> | |
48 | +#include <linux/kernel.h> | |
49 | +#include <linux/string.h> | |
50 | +#include <linux/time.h> | |
51 | +#include <cryptodev.h> | |
52 | +#include "rndtest.h" | |
53 | + | |
54 | +static struct rndtest_stats rndstats; | |
55 | + | |
56 | +static void rndtest_test(struct rndtest_state *); | |
57 | + | |
58 | +/* The tests themselves */ | |
59 | +static int rndtest_monobit(struct rndtest_state *); | |
60 | +static int rndtest_runs(struct rndtest_state *); | |
61 | +static int rndtest_longruns(struct rndtest_state *); | |
62 | +static int rndtest_chi_4(struct rndtest_state *); | |
63 | + | |
64 | +static int rndtest_runs_check(struct rndtest_state *, int, int *); | |
65 | +static void rndtest_runs_record(struct rndtest_state *, int, int *); | |
66 | + | |
67 | +static const struct rndtest_testfunc { | |
68 | + int (*test)(struct rndtest_state *); | |
69 | +} rndtest_funcs[] = { | |
70 | + { rndtest_monobit }, | |
71 | + { rndtest_runs }, | |
72 | + { rndtest_chi_4 }, | |
73 | + { rndtest_longruns }, | |
74 | +}; | |
75 | + | |
76 | +#define RNDTEST_NTESTS (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0])) | |
77 | + | |
78 | +static void | |
79 | +rndtest_test(struct rndtest_state *rsp) | |
80 | +{ | |
81 | + int i, rv = 0; | |
82 | + | |
83 | + rndstats.rst_tests++; | |
84 | + for (i = 0; i < RNDTEST_NTESTS; i++) | |
85 | + rv |= (*rndtest_funcs[i].test)(rsp); | |
86 | + rsp->rs_discard = (rv != 0); | |
87 | +} | |
88 | + | |
89 | + | |
90 | +extern int crypto_debug; | |
91 | +#define rndtest_verbose 2 | |
92 | +#define rndtest_report(rsp, failure, fmt, a...) \ | |
93 | + { if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; } | |
94 | + | |
95 | +#define RNDTEST_MONOBIT_MINONES 9725 | |
96 | +#define RNDTEST_MONOBIT_MAXONES 10275 | |
97 | + | |
98 | +static int | |
99 | +rndtest_monobit(struct rndtest_state *rsp) | |
100 | +{ | |
101 | + int i, ones = 0, j; | |
102 | + u_int8_t r; | |
103 | + | |
104 | + for (i = 0; i < RNDTEST_NBYTES; i++) { | |
105 | + r = rsp->rs_buf[i]; | |
106 | + for (j = 0; j < 8; j++, r <<= 1) | |
107 | + if (r & 0x80) | |
108 | + ones++; | |
109 | + } | |
110 | + if (ones > RNDTEST_MONOBIT_MINONES && | |
111 | + ones < RNDTEST_MONOBIT_MAXONES) { | |
112 | + if (rndtest_verbose > 1) | |
113 | + rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)", | |
114 | + RNDTEST_MONOBIT_MINONES, ones, | |
115 | + RNDTEST_MONOBIT_MAXONES); | |
116 | + return (0); | |
117 | + } else { | |
118 | + if (rndtest_verbose) | |
119 | + rndtest_report(rsp, 1, | |
120 | + "monobit failed (%d ones)", ones); | |
121 | + rndstats.rst_monobit++; | |
122 | + return (-1); | |
123 | + } | |
124 | +} | |
125 | + | |
126 | +#define RNDTEST_RUNS_NINTERVAL 6 | |
127 | + | |
128 | +static const struct rndtest_runs_tabs { | |
129 | + u_int16_t min, max; | |
130 | +} rndtest_runs_tab[] = { | |
131 | + { 2343, 2657 }, | |
132 | + { 1135, 1365 }, | |
133 | + { 542, 708 }, | |
134 | + { 251, 373 }, | |
135 | + { 111, 201 }, | |
136 | + { 111, 201 }, | |
137 | +}; | |
138 | + | |
139 | +static int | |
140 | +rndtest_runs(struct rndtest_state *rsp) | |
141 | +{ | |
142 | + int i, j, ones, zeros, rv = 0; | |
143 | + int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL]; | |
144 | + u_int8_t c; | |
145 | + | |
146 | + bzero(onei, sizeof(onei)); | |
147 | + bzero(zeroi, sizeof(zeroi)); | |
148 | + ones = zeros = 0; | |
149 | + for (i = 0; i < RNDTEST_NBYTES; i++) { | |
150 | + c = rsp->rs_buf[i]; | |
151 | + for (j = 0; j < 8; j++, c <<= 1) { | |
152 | + if (c & 0x80) { | |
153 | + ones++; | |
154 | + rndtest_runs_record(rsp, zeros, zeroi); | |
155 | + zeros = 0; | |
156 | + } else { | |
157 | + zeros++; | |
158 | + rndtest_runs_record(rsp, ones, onei); | |
159 | + ones = 0; | |
160 | + } | |
161 | + } | |
162 | + } | |
163 | + rndtest_runs_record(rsp, ones, onei); | |
164 | + rndtest_runs_record(rsp, zeros, zeroi); | |
165 | + | |
166 | + rv |= rndtest_runs_check(rsp, 0, zeroi); | |
167 | + rv |= rndtest_runs_check(rsp, 1, onei); | |
168 | + | |
169 | + if (rv) | |
170 | + rndstats.rst_runs++; | |
171 | + | |
172 | + return (rv); | |
173 | +} | |
174 | + | |
175 | +static void | |
176 | +rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv) | |
177 | +{ | |
178 | + if (len == 0) | |
179 | + return; | |
180 | + if (len > RNDTEST_RUNS_NINTERVAL) | |
181 | + len = RNDTEST_RUNS_NINTERVAL; | |
182 | + len -= 1; | |
183 | + intrv[len]++; | |
184 | +} | |
185 | + | |
186 | +static int | |
187 | +rndtest_runs_check(struct rndtest_state *rsp, int val, int *src) | |
188 | +{ | |
189 | + int i, rv = 0; | |
190 | + | |
191 | + for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) { | |
192 | + if (src[i] < rndtest_runs_tab[i].min || | |
193 | + src[i] > rndtest_runs_tab[i].max) { | |
194 | + rndtest_report(rsp, 1, | |
195 | + "%s interval %d failed (%d, %d-%d)", | |
196 | + val ? "ones" : "zeros", | |
197 | + i + 1, src[i], rndtest_runs_tab[i].min, | |
198 | + rndtest_runs_tab[i].max); | |
199 | + rv = -1; | |
200 | + } else { | |
201 | + rndtest_report(rsp, 0, | |
202 | + "runs pass %s interval %d (%d < %d < %d)", | |
203 | + val ? "ones" : "zeros", | |
204 | + i + 1, rndtest_runs_tab[i].min, src[i], | |
205 | + rndtest_runs_tab[i].max); | |
206 | + } | |
207 | + } | |
208 | + return (rv); | |
209 | +} | |
210 | + | |
211 | +static int | |
212 | +rndtest_longruns(struct rndtest_state *rsp) | |
213 | +{ | |
214 | + int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0; | |
215 | + u_int8_t c; | |
216 | + | |
217 | + for (i = 0; i < RNDTEST_NBYTES; i++) { | |
218 | + c = rsp->rs_buf[i]; | |
219 | + for (j = 0; j < 8; j++, c <<= 1) { | |
220 | + if (c & 0x80) { | |
221 | + zeros = 0; | |
222 | + ones++; | |
223 | + if (ones > maxones) | |
224 | + maxones = ones; | |
225 | + } else { | |
226 | + ones = 0; | |
227 | + zeros++; | |
228 | + if (zeros > maxzeros) | |
229 | + maxzeros = zeros; | |
230 | + } | |
231 | + } | |
232 | + } | |
233 | + | |
234 | + if (maxones < 26 && maxzeros < 26) { | |
235 | + rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)", | |
236 | + maxones, maxzeros); | |
237 | + return (0); | |
238 | + } else { | |
239 | + rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)", | |
240 | + maxones, maxzeros); | |
241 | + rndstats.rst_longruns++; | |
242 | + return (-1); | |
243 | + } | |
244 | +} | |
245 | + | |
246 | +/* | |
247 | + * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2, | |
248 | + * but it is really the chi^2 test over 4 bits (the poker test as described | |
249 | + * by Knuth vol 2 is something different, and I take him as authoritative | |
250 | + * on nomenclature over NIST). | |
251 | + */ | |
252 | +#define RNDTEST_CHI4_K 16 | |
253 | +#define RNDTEST_CHI4_K_MASK (RNDTEST_CHI4_K - 1) | |
254 | + | |
255 | +/* | |
256 | + * The unnormalized values are used so that we don't have to worry about | |
257 | + * fractional precision. The "real" value is found by: | |
258 | + * (V - 1562500) * (16 / 5000) = Vn (where V is the unnormalized value) | |
259 | + */ | |
260 | +#define RNDTEST_CHI4_VMIN 1563181 /* 2.1792 */ | |
261 | +#define RNDTEST_CHI4_VMAX 1576929 /* 46.1728 */ | |
262 | + | |
263 | +static int | |
264 | +rndtest_chi_4(struct rndtest_state *rsp) | |
265 | +{ | |
266 | + unsigned int freq[RNDTEST_CHI4_K], i, sum; | |
267 | + | |
268 | + for (i = 0; i < RNDTEST_CHI4_K; i++) | |
269 | + freq[i] = 0; | |
270 | + | |
271 | + /* Get number of occurances of each 4 bit pattern */ | |
272 | + for (i = 0; i < RNDTEST_NBYTES; i++) { | |
273 | + freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++; | |
274 | + freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++; | |
275 | + } | |
276 | + | |
277 | + for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++) | |
278 | + sum += freq[i] * freq[i]; | |
279 | + | |
280 | + if (sum >= 1563181 && sum <= 1576929) { | |
281 | + rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum); | |
282 | + return (0); | |
283 | + } else { | |
284 | + rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum); | |
285 | + rndstats.rst_chi++; | |
286 | + return (-1); | |
287 | + } | |
288 | +} | |
289 | + | |
290 | +int | |
291 | +rndtest_buf(unsigned char *buf) | |
292 | +{ | |
293 | + struct rndtest_state rsp; | |
294 | + | |
295 | + memset(&rsp, 0, sizeof(rsp)); | |
296 | + rsp.rs_buf = buf; | |
297 | + rndtest_test(&rsp); | |
298 | + return(rsp.rs_discard); | |
299 | +} |
crypto/ocf/rndtest.h
1 | +/* $FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $ */ | |
2 | +/* $OpenBSD$ */ | |
3 | + | |
4 | +/* | |
5 | + * Copyright (c) 2002 Jason L. Wright (jason@thought.net) | |
6 | + * All rights reserved. | |
7 | + * | |
8 | + * Redistribution and use in source and binary forms, with or without | |
9 | + * modification, are permitted provided that the following conditions | |
10 | + * are met: | |
11 | + * 1. Redistributions of source code must retain the above copyright | |
12 | + * notice, this list of conditions and the following disclaimer. | |
13 | + * 2. Redistributions in binary form must reproduce the above copyright | |
14 | + * notice, this list of conditions and the following disclaimer in the | |
15 | + * documentation and/or other materials provided with the distribution. | |
16 | + * 3. All advertising materials mentioning features or use of this software | |
17 | + * must display the following acknowledgement: | |
18 | + * This product includes software developed by Jason L. Wright | |
19 | + * 4. The name of the author may not be used to endorse or promote products | |
20 | + * derived from this software without specific prior written permission. | |
21 | + * | |
22 | + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
23 | + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
24 | + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
25 | + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, | |
26 | + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
27 | + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |
28 | + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
29 | + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | |
30 | + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
31 | + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
32 | + * POSSIBILITY OF SUCH DAMAGE. | |
33 | + */ | |
34 | + | |
35 | + | |
36 | +/* Some of the tests depend on these values */ | |
37 | +#define RNDTEST_NBYTES 2500 | |
38 | +#define RNDTEST_NBITS (8 * RNDTEST_NBYTES) | |
39 | + | |
40 | +struct rndtest_state { | |
41 | + int rs_discard; /* discard/accept random data */ | |
42 | + u_int8_t *rs_buf; | |
43 | +}; | |
44 | + | |
45 | +struct rndtest_stats { | |
46 | + u_int32_t rst_discard; /* number of bytes discarded */ | |
47 | + u_int32_t rst_tests; /* number of test runs */ | |
48 | + u_int32_t rst_monobit; /* monobit test failures */ | |
49 | + u_int32_t rst_runs; /* 0/1 runs failures */ | |
50 | + u_int32_t rst_longruns; /* longruns failures */ | |
51 | + u_int32_t rst_chi; /* chi^2 failures */ | |
52 | +}; | |
53 | + | |
54 | +extern int rndtest_buf(unsigned char *buf); |
crypto/ocf/uio.h
1 | +#ifndef _OCF_UIO_H_ | |
2 | +#define _OCF_UIO_H_ | |
3 | + | |
4 | +#include <linux/uio.h> | |
5 | + | |
6 | +/* | |
7 | + * The linux uio.h doesn't have all we need. To be fully api compatible | |
8 | + * with the BSD cryptodev, we need to keep this around. Perhaps this can | |
9 | + * be moved back into the linux/uio.h | |
10 | + * | |
11 | + * Linux port done by David McCullough <david_mccullough@mcafee.com> | |
12 | + * Copyright (C) 2006-2010 David McCullough | |
13 | + * Copyright (C) 2004-2005 Intel Corporation. | |
14 | + * | |
15 | + * LICENSE TERMS | |
16 | + * | |
17 | + * The free distribution and use of this software in both source and binary | |
18 | + * form is allowed (with or without changes) provided that: | |
19 | + * | |
20 | + * 1. distributions of this source code include the above copyright | |
21 | + * notice, this list of conditions and the following disclaimer; | |
22 | + * | |
23 | + * 2. distributions in binary form include the above copyright | |
24 | + * notice, this list of conditions and the following disclaimer | |
25 | + * in the documentation and/or other associated materials; | |
26 | + * | |
27 | + * 3. the copyright holder's name is not used to endorse products | |
28 | + * built using this software without specific written permission. | |
29 | + * | |
30 | + * ALTERNATIVELY, provided that this notice is retained in full, this product | |
31 | + * may be distributed under the terms of the GNU General Public License (GPL), | |
32 | + * in which case the provisions of the GPL apply INSTEAD OF those given above. | |
33 | + * | |
34 | + * DISCLAIMER | |
35 | + * | |
36 | + * This software is provided 'as is' with no explicit or implied warranties | |
37 | + * in respect of its properties, including, but not limited to, correctness | |
38 | + * and/or fitness for purpose. | |
39 | + * --------------------------------------------------------------------------- | |
40 | + */ | |
41 | + | |
42 | +struct uio { | |
43 | + struct iovec *uio_iov; | |
44 | + int uio_iovcnt; | |
45 | + off_t uio_offset; | |
46 | + int uio_resid; | |
47 | +#if 0 | |
48 | + enum uio_seg uio_segflg; | |
49 | + enum uio_rw uio_rw; | |
50 | + struct thread *uio_td; | |
51 | +#endif | |
52 | +}; | |
53 | + | |
54 | +#endif |
drivers/char/random.c
... | ... | @@ -130,6 +130,9 @@ |
130 | 130 | * void add_interrupt_randomness(int irq); |
131 | 131 | * void add_disk_randomness(struct gendisk *disk); |
132 | 132 | * |
133 | + * void random_input_words(__u32 *buf, size_t wordcount, int ent_count) | |
134 | + * int random_input_wait(void); | |
135 | + * | |
133 | 136 | * add_input_randomness() uses the input layer interrupt timing, as well as |
134 | 137 | * the event type information from the hardware. |
135 | 138 | * |
... | ... | @@ -147,6 +150,13 @@ |
147 | 150 | * seek times do not make for good sources of entropy, as their seek |
148 | 151 | * times are usually fairly consistent. |
149 | 152 | * |
153 | + * random_input_words() just provides a raw block of entropy to the input | |
154 | + * pool, such as from a hardware entropy generator. | |
155 | + * | |
156 | + * random_input_wait() suspends the caller until such time as the | |
157 | + * entropy pool falls below the write threshold, and returns a count of how | |
158 | + * much entropy (in bits) is needed to sustain the pool. | |
159 | + * | |
150 | 160 | * All of these routines try to estimate how many bits of randomness a |
151 | 161 | * particular randomness source. They do this by keeping track of the |
152 | 162 | * first and second order deltas of the event timings. |
... | ... | @@ -721,6 +731,63 @@ |
721 | 731 | add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); |
722 | 732 | } |
723 | 733 | #endif |
734 | + | |
735 | +/* | |
736 | + * random_input_words - add bulk entropy to pool | |
737 | + * | |
738 | + * @buf: buffer to add | |
739 | + * @wordcount: number of __u32 words to add | |
740 | + * @ent_count: total amount of entropy (in bits) to credit | |
741 | + * | |
742 | + * this provides bulk input of entropy to the input pool | |
743 | + * | |
744 | + */ | |
745 | +void random_input_words(__u32 *buf, size_t wordcount, int ent_count) | |
746 | +{ | |
747 | + mix_pool_bytes(&input_pool, buf, wordcount*4); | |
748 | + | |
749 | + credit_entropy_bits(&input_pool, ent_count); | |
750 | + | |
751 | + DEBUG_ENT("crediting %d bits => %d\n", | |
752 | + ent_count, input_pool.entropy_count); | |
753 | + /* | |
754 | + * Wake up waiting processes if we have enough | |
755 | + * entropy. | |
756 | + */ | |
757 | + if (input_pool.entropy_count >= random_read_wakeup_thresh) | |
758 | + wake_up_interruptible(&random_read_wait); | |
759 | +} | |
760 | +EXPORT_SYMBOL(random_input_words); | |
761 | + | |
762 | +/* | |
763 | + * random_input_wait - wait until random needs entropy | |
764 | + * | |
765 | + * this function sleeps until the /dev/random subsystem actually | |
766 | + * needs more entropy, and then return the amount of entropy | |
767 | + * that it would be nice to have added to the system. | |
768 | + */ | |
769 | +int random_input_wait(void) | |
770 | +{ | |
771 | + int count; | |
772 | + | |
773 | + wait_event_interruptible(random_write_wait, | |
774 | + input_pool.entropy_count < random_write_wakeup_thresh); | |
775 | + | |
776 | + count = random_write_wakeup_thresh - input_pool.entropy_count; | |
777 | + | |
778 | + /* likely we got woken up due to a signal */ | |
779 | + if (count <= 0) count = random_read_wakeup_thresh; | |
780 | + | |
781 | + DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n", | |
782 | + count, | |
783 | + input_pool.entropy_count, random_write_wakeup_thresh); | |
784 | + | |
785 | + return count; | |
786 | +} | |
787 | +EXPORT_SYMBOL(random_input_wait); | |
788 | + | |
789 | + | |
790 | +#define EXTRACT_SIZE 10 | |
724 | 791 | |
725 | 792 | /********************************************************************* |
726 | 793 | * |
fs/fcntl.c
include/linux/miscdevice.h
... | ... | @@ -19,6 +19,7 @@ |
19 | 19 | #define APOLLO_MOUSE_MINOR 7 |
20 | 20 | #define PC110PAD_MINOR 9 |
21 | 21 | /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ |
22 | +#define CRYPTODEV_MINOR 70 /* /dev/crypto */ | |
22 | 23 | #define WATCHDOG_MINOR 130 /* Watchdog timer */ |
23 | 24 | #define TEMP_MINOR 131 /* Temperature Sensor */ |
24 | 25 | #define RTC_MINOR 135 |
include/linux/random.h
... | ... | @@ -34,6 +34,30 @@ |
34 | 34 | /* Clear the entropy pool and associated counters. (Superuser only.) */ |
35 | 35 | #define RNDCLEARPOOL _IO( 'R', 0x06 ) |
36 | 36 | |
37 | +#ifdef CONFIG_FIPS_RNG | |
38 | + | |
39 | +/* Size of seed value - equal to AES blocksize */ | |
40 | +#define AES_BLOCK_SIZE_BYTES 16 | |
41 | +#define SEED_SIZE_BYTES AES_BLOCK_SIZE_BYTES | |
42 | +/* Size of AES key */ | |
43 | +#define KEY_SIZE_BYTES 16 | |
44 | + | |
45 | +/* ioctl() structure used by FIPS 140-2 Tests */ | |
46 | +struct rand_fips_test { | |
47 | + unsigned char key[KEY_SIZE_BYTES]; /* Input */ | |
48 | + unsigned char datetime[SEED_SIZE_BYTES]; /* Input */ | |
49 | + unsigned char seed[SEED_SIZE_BYTES]; /* Input */ | |
50 | + unsigned char result[SEED_SIZE_BYTES]; /* Output */ | |
51 | +}; | |
52 | + | |
53 | +/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */ | |
54 | +#define RNDFIPSVST _IOWR('R', 0x10, struct rand_fips_test) | |
55 | + | |
56 | +/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */ | |
57 | +#define RNDFIPSMCT _IOWR('R', 0x11, struct rand_fips_test) | |
58 | + | |
59 | +#endif /* #ifdef CONFIG_FIPS_RNG */ | |
60 | + | |
37 | 61 | struct rand_pool_info { |
38 | 62 | int entropy_count; |
39 | 63 | int buf_size; |
... | ... | @@ -53,6 +77,10 @@ |
53 | 77 | extern void add_input_randomness(unsigned int type, unsigned int code, |
54 | 78 | unsigned int value); |
55 | 79 | extern void add_interrupt_randomness(int irq); |
80 | + | |
81 | +extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count); | |
82 | +extern int random_input_wait(void); | |
83 | +#define HAS_RANDOM_INPUT_WAIT 1 | |
56 | 84 | |
57 | 85 | extern void get_random_bytes(void *buf, int nbytes); |
58 | 86 | void generate_random_uuid(unsigned char uuid_out[16]); |